summaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
Diffstat (limited to 'include/net')
-rw-r--r--include/net/6lowpan.h330
-rw-r--r--include/net/9p/9p.h557
-rw-r--r--include/net/9p/client.h300
-rw-r--r--include/net/9p/transport.h69
-rw-r--r--include/net/Space.h20
-rw-r--r--include/net/act_api.h296
-rw-r--r--include/net/addrconf.h517
-rw-r--r--include/net/af_ieee802154.h59
-rw-r--r--include/net/af_rxrpc.h75
-rw-r--r--include/net/af_unix.h110
-rw-r--r--include/net/af_vsock.h229
-rw-r--r--include/net/ah.h23
-rw-r--r--include/net/amt.h408
-rw-r--r--include/net/arp.h77
-rw-r--r--include/net/atmclip.h53
-rw-r--r--include/net/ax25.h489
-rw-r--r--include/net/ax88796.h46
-rw-r--r--include/net/bareudp.h16
-rw-r--r--include/net/bluetooth/bluetooth.h634
-rw-r--r--include/net/bluetooth/hci.h2967
-rw-r--r--include/net/bluetooth/hci_core.h2124
-rw-r--r--include/net/bluetooth/hci_mon.h69
-rw-r--r--include/net/bluetooth/hci_sock.h176
-rw-r--r--include/net/bluetooth/hci_sync.h131
-rw-r--r--include/net/bluetooth/iso.h32
-rw-r--r--include/net/bluetooth/l2cap.h1009
-rw-r--r--include/net/bluetooth/mgmt.h1177
-rw-r--r--include/net/bluetooth/rfcomm.h375
-rw-r--r--include/net/bluetooth/sco.h51
-rw-r--r--include/net/bond_3ad.h310
-rw-r--r--include/net/bond_alb.h169
-rw-r--r--include/net/bond_options.h164
-rw-r--r--include/net/bonding.h795
-rw-r--r--include/net/bpf_sk_storage.h63
-rw-r--r--include/net/busy_poll.h177
-rw-r--r--include/net/caif/caif_dev.h128
-rw-r--r--include/net/caif/caif_device.h55
-rw-r--r--include/net/caif/caif_layer.h279
-rw-r--r--include/net/caif/cfcnfg.h90
-rw-r--r--include/net/caif/cfctrl.h130
-rw-r--r--include/net/caif/cffrml.h21
-rw-r--r--include/net/caif/cfmuxl.h20
-rw-r--r--include/net/caif/cfpkt.h232
-rw-r--r--include/net/caif/cfserl.h13
-rw-r--r--include/net/caif/cfsrvl.h65
-rw-r--r--include/net/calipso.h77
-rw-r--r--include/net/cfg80211-wext.h52
-rw-r--r--include/net/cfg80211.h9019
-rw-r--r--include/net/cfg802154.h422
-rw-r--r--include/net/checksum.h195
-rw-r--r--include/net/cipso_ipv4.h306
-rw-r--r--include/net/cls_cgroup.h88
-rw-r--r--include/net/codel.h167
-rw-r--r--include/net/codel_impl.h269
-rw-r--r--include/net/codel_qdisc.h77
-rw-r--r--include/net/compat.h95
-rw-r--r--include/net/datalink.h28
-rw-r--r--include/net/dcbevent.h39
-rw-r--r--include/net/dcbnl.h114
-rw-r--r--include/net/devlink.h1906
-rw-r--r--include/net/dropreason.h323
-rw-r--r--include/net/dsa.h1472
-rw-r--r--include/net/dsfield.h53
-rw-r--r--include/net/dst.h560
-rw-r--r--include/net/dst_cache.h109
-rw-r--r--include/net/dst_metadata.h272
-rw-r--r--include/net/dst_ops.h73
-rw-r--r--include/net/erspan.h321
-rw-r--r--include/net/esp.h50
-rw-r--r--include/net/espintcp.h40
-rw-r--r--include/net/ethoc.h23
-rw-r--r--include/net/failover.h37
-rw-r--r--include/net/fib_notifier.h51
-rw-r--r--include/net/fib_rules.h205
-rw-r--r--include/net/firewire.h27
-rw-r--r--include/net/flow.h195
-rw-r--r--include/net/flow_dissector.h438
-rw-r--r--include/net/flow_offload.h636
-rw-r--r--include/net/fou.h20
-rw-r--r--include/net/fq.h106
-rw-r--r--include/net/fq_impl.h389
-rw-r--r--include/net/garp.h132
-rw-r--r--include/net/gen_stats.h84
-rw-r--r--include/net/genetlink.h465
-rw-r--r--include/net/geneve.h76
-rw-r--r--include/net/gre.h149
-rw-r--r--include/net/gro.h450
-rw-r--r--include/net/gro_cells.h19
-rw-r--r--include/net/gtp.h81
-rw-r--r--include/net/gue.h120
-rw-r--r--include/net/hwbm.h35
-rw-r--r--include/net/icmp.h62
-rw-r--r--include/net/ieee80211_radiotap.h373
-rw-r--r--include/net/ieee802154_netdev.h384
-rw-r--r--include/net/if_inet6.h276
-rw-r--r--include/net/ife.h52
-rw-r--r--include/net/ila.h16
-rw-r--r--include/net/inet6_connection_sock.h29
-rw-r--r--include/net/inet6_hashtables.h126
-rw-r--r--include/net/inet_common.h82
-rw-r--r--include/net/inet_connection_sock.h357
-rw-r--r--include/net/inet_dscp.h57
-rw-r--r--include/net/inet_ecn.h323
-rw-r--r--include/net/inet_frag.h190
-rw-r--r--include/net/inet_hashtables.h479
-rw-r--r--include/net/inet_sock.h405
-rw-r--r--include/net/inet_timewait_sock.h130
-rw-r--r--include/net/inetpeer.h152
-rw-r--r--include/net/ioam6.h68
-rw-r--r--include/net/ip.h806
-rw-r--r--include/net/ip6_checksum.h87
-rw-r--r--include/net/ip6_fib.h654
-rw-r--r--include/net/ip6_route.h349
-rw-r--r--include/net/ip6_tunnel.h170
-rw-r--r--include/net/ip_fib.h608
-rw-r--r--include/net/ip_tunnels.h556
-rw-r--r--include/net/ip_vs.h1739
-rw-r--r--include/net/ipcomp.h32
-rw-r--r--include/net/ipconfig.h28
-rw-r--r--include/net/ipv6.h1347
-rw-r--r--include/net/ipv6_frag.h144
-rw-r--r--include/net/ipv6_stubs.h91
-rw-r--r--include/net/iucv/af_iucv.h164
-rw-r--r--include/net/iucv/iucv.h496
-rw-r--r--include/net/iw_handler.h552
-rw-r--r--include/net/kcm.h198
-rw-r--r--include/net/l3mdev.h334
-rw-r--r--include/net/lag.h17
-rw-r--r--include/net/lapb.h162
-rw-r--r--include/net/lib80211.h122
-rw-r--r--include/net/llc.h165
-rw-r--r--include/net/llc_c_ac.h187
-rw-r--r--include/net/llc_c_ev.h224
-rw-r--r--include/net/llc_c_st.h52
-rw-r--r--include/net/llc_conn.h120
-rw-r--r--include/net/llc_if.h69
-rw-r--r--include/net/llc_pdu.h442
-rw-r--r--include/net/llc_s_ac.h41
-rw-r--r--include/net/llc_s_ev.h67
-rw-r--r--include/net/llc_s_st.h38
-rw-r--r--include/net/llc_sap.h32
-rw-r--r--include/net/lwtunnel.h269
-rw-r--r--include/net/mac80211.h7227
-rw-r--r--include/net/mac802154.h520
-rw-r--r--include/net/macsec.h316
-rw-r--r--include/net/mctp.h298
-rw-r--r--include/net/mctpdevice.h56
-rw-r--r--include/net/mip6.h41
-rw-r--r--include/net/mld.h117
-rw-r--r--include/net/mpls.h45
-rw-r--r--include/net/mpls_iptunnel.h25
-rw-r--r--include/net/mptcp.h306
-rw-r--r--include/net/mrp.h148
-rw-r--r--include/net/ncsi.h72
-rw-r--r--include/net/ndisc.h498
-rw-r--r--include/net/neighbour.h605
-rw-r--r--include/net/net_debug.h157
-rw-r--r--include/net/net_failover.h40
-rw-r--r--include/net/net_namespace.h530
-rw-r--r--include/net/net_ratelimit.h9
-rw-r--r--include/net/net_trackers.h18
-rw-r--r--include/net/netevent.h39
-rw-r--r--include/net/netfilter/br_netfilter.h77
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h29
-rw-r--r--include/net/netfilter/ipv4/nf_defrag_ipv4.h9
-rw-r--r--include/net/netfilter/ipv4/nf_dup_ipv4.h11
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h31
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_ipv6.h7
-rw-r--r--include/net/netfilter/ipv6/nf_defrag_ipv6.h22
-rw-r--r--include/net/netfilter/ipv6/nf_dup_ipv6.h10
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h31
-rw-r--r--include/net/netfilter/nf_conntrack.h372
-rw-r--r--include/net/netfilter/nf_conntrack_acct.h83
-rw-r--r--include/net/netfilter/nf_conntrack_act_ct.h54
-rw-r--r--include/net/netfilter/nf_conntrack_bpf.h46
-rw-r--r--include/net/netfilter/nf_conntrack_bridge.h19
-rw-r--r--include/net/netfilter/nf_conntrack_core.h104
-rw-r--r--include/net/netfilter/nf_conntrack_count.h40
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h175
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h138
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h79
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h181
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h247
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h63
-rw-r--r--include/net/netfilter/nf_conntrack_seqadj.h45
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h48
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h111
-rw-r--r--include/net/netfilter/nf_conntrack_timestamp.h47
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h187
-rw-r--r--include/net/netfilter/nf_conntrack_zones.h89
-rw-r--r--include/net/netfilter/nf_dup_netdev.h16
-rw-r--r--include/net/netfilter/nf_flow_table.h375
-rw-r--r--include/net/netfilter/nf_hooks_lwtunnel.h7
-rw-r--r--include/net/netfilter/nf_log.h101
-rw-r--r--include/net/netfilter/nf_nat.h115
-rw-r--r--include/net/netfilter/nf_nat_helper.h42
-rw-r--r--include/net/netfilter/nf_nat_masquerade.h20
-rw-r--r--include/net/netfilter/nf_nat_redirect.h15
-rw-r--r--include/net/netfilter/nf_queue.h130
-rw-r--r--include/net/netfilter/nf_reject.h43
-rw-r--r--include/net/netfilter/nf_socket.h13
-rw-r--r--include/net/netfilter/nf_synproxy.h89
-rw-r--r--include/net/netfilter/nf_tables.h1745
-rw-r--r--include/net/netfilter/nf_tables_core.h151
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h86
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h115
-rw-r--r--include/net/netfilter/nf_tables_offload.h100
-rw-r--r--include/net/netfilter/nf_tproxy.h128
-rw-r--r--include/net/netfilter/nft_fib.h43
-rw-r--r--include/net/netfilter/nft_meta.h49
-rw-r--r--include/net/netfilter/nft_reject.h30
-rw-r--r--include/net/netfilter/xt_rateest.h27
-rw-r--r--include/net/netlabel.h682
-rw-r--r--include/net/netlink.h2004
-rw-r--r--include/net/netns/bpf.h28
-rw-r--r--include/net/netns/can.h41
-rw-r--r--include/net/netns/conntrack.h113
-rw-r--r--include/net/netns/core.h22
-rw-r--r--include/net/netns/flow_table.h14
-rw-r--r--include/net/netns/generic.h52
-rw-r--r--include/net/netns/hash.h11
-rw-r--r--include/net/netns/ieee802154_6lowpan.h22
-rw-r--r--include/net/netns/ipv4.h234
-rw-r--r--include/net/netns/ipv6.h129
-rw-r--r--include/net/netns/mctp.h37
-rw-r--r--include/net/netns/mib.h45
-rw-r--r--include/net/netns/mpls.h23
-rw-r--r--include/net/netns/netfilter.h34
-rw-r--r--include/net/netns/nexthop.h20
-rw-r--r--include/net/netns/nftables.h11
-rw-r--r--include/net/netns/packet.h16
-rw-r--r--include/net/netns/sctp.h180
-rw-r--r--include/net/netns/smc.h26
-rw-r--r--include/net/netns/unix.h22
-rw-r--r--include/net/netns/xdp.h13
-rw-r--r--include/net/netns/xfrm.h88
-rw-r--r--include/net/netprio_cgroup.h54
-rw-r--r--include/net/netrom.h273
-rw-r--r--include/net/nexthop.h556
-rw-r--r--include/net/nfc/digital.h265
-rw-r--r--include/net/nfc/hci.h275
-rw-r--r--include/net/nfc/llc.h37
-rw-r--r--include/net/nfc/nci.h561
-rw-r--r--include/net/nfc/nci_core.h465
-rw-r--r--include/net/nfc/nfc.h352
-rw-r--r--include/net/nl802154.h451
-rw-r--r--include/net/nsh.h310
-rw-r--r--include/net/p8022.h19
-rw-r--r--include/net/page_pool.h386
-rw-r--r--include/net/phonet/gprs.h25
-rw-r--r--include/net/phonet/pep.h159
-rw-r--r--include/net/phonet/phonet.h112
-rw-r--r--include/net/phonet/pn_dev.h54
-rw-r--r--include/net/pie.h135
-rw-r--r--include/net/ping.h87
-rw-r--r--include/net/pkt_cls.h1069
-rw-r--r--include/net/pkt_sched.h250
-rw-r--r--include/net/pptp.h27
-rw-r--r--include/net/protocol.h116
-rw-r--r--include/net/psample.h51
-rw-r--r--include/net/psnap.h17
-rw-r--r--include/net/raw.h102
-rw-r--r--include/net/rawv6.h28
-rw-r--r--include/net/red.h465
-rw-r--r--include/net/regulatory.h240
-rw-r--r--include/net/request_sock.h241
-rw-r--r--include/net/rose.h250
-rw-r--r--include/net/route.h402
-rw-r--r--include/net/rpl.h37
-rw-r--r--include/net/rsi_91x.h56
-rw-r--r--include/net/rtnetlink.h198
-rw-r--r--include/net/rtnh.h34
-rw-r--r--include/net/sch_generic.h1308
-rw-r--r--include/net/scm.h156
-rw-r--r--include/net/sctp/auth.h113
-rw-r--r--include/net/sctp/checksum.h66
-rw-r--r--include/net/sctp/command.h236
-rw-r--r--include/net/sctp/constants.h445
-rw-r--r--include/net/sctp/sctp.h679
-rw-r--r--include/net/sctp/sm.h432
-rw-r--r--include/net/sctp/stream_interleave.h46
-rw-r--r--include/net/sctp/stream_sched.h64
-rw-r--r--include/net/sctp/structs.h2205
-rw-r--r--include/net/sctp/tsnmap.h157
-rw-r--r--include/net/sctp/ulpevent.h189
-rw-r--r--include/net/sctp/ulpqueue.h69
-rw-r--r--include/net/secure_seq.h24
-rw-r--r--include/net/seg6.h87
-rw-r--r--include/net/seg6_hmac.h57
-rw-r--r--include/net/seg6_local.h29
-rw-r--r--include/net/selftests.h31
-rw-r--r--include/net/slhc_vj.h184
-rw-r--r--include/net/smc.h105
-rw-r--r--include/net/snmp.h202
-rw-r--r--include/net/sock.h3057
-rw-r--r--include/net/sock_reuseport.h64
-rw-r--r--include/net/stp.h17
-rw-r--r--include/net/strparser.h170
-rw-r--r--include/net/switchdev.h524
-rw-r--r--include/net/tc_act/tc_bpf.h24
-rw-r--r--include/net/tc_act/tc_connmark.h15
-rw-r--r--include/net/tc_act/tc_csum.h41
-rw-r--r--include/net/tc_act/tc_ct.h93
-rw-r--r--include/net/tc_act/tc_ctinfo.h33
-rw-r--r--include/net/tc_act/tc_defact.h14
-rw-r--r--include/net/tc_act/tc_gact.h77
-rw-r--r--include/net/tc_act/tc_gate.h141
-rw-r--r--include/net/tc_act/tc_ife.h67
-rw-r--r--include/net/tc_act/tc_ipt.h17
-rw-r--r--include/net/tc_act/tc_mirred.h59
-rw-r--r--include/net/tc_act/tc_mpls.h105
-rw-r--r--include/net/tc_act/tc_nat.h19
-rw-r--r--include/net/tc_act/tc_pedit.h119
-rw-r--r--include/net/tc_act/tc_police.h192
-rw-r--r--include/net/tc_act/tc_sample.h44
-rw-r--r--include/net/tc_act/tc_skbedit.h110
-rw-r--r--include/net/tc_act/tc_skbmod.h26
-rw-r--r--include/net/tc_act/tc_tunnel_key.h86
-rw-r--r--include/net/tc_act/tc_vlan.h91
-rw-r--r--include/net/tcp.h2488
-rw-r--r--include/net/tcp_states.h48
-rw-r--r--include/net/timewait_sock.h36
-rw-r--r--include/net/tipc.h62
-rw-r--r--include/net/tls.h516
-rw-r--r--include/net/tls_toe.h77
-rw-r--r--include/net/transp_v6.h70
-rw-r--r--include/net/tso.h25
-rw-r--r--include/net/tun_proto.h50
-rw-r--r--include/net/udp.h527
-rw-r--r--include/net/udp_tunnel.h379
-rw-r--r--include/net/udplite.h86
-rw-r--r--include/net/vsock_addr.h22
-rw-r--r--include/net/vxlan.h574
-rw-r--r--include/net/wext.h61
-rw-r--r--include/net/x25.h327
-rw-r--r--include/net/x25device.h18
-rw-r--r--include/net/xdp.h412
-rw-r--r--include/net/xdp_priv.h19
-rw-r--r--include/net/xdp_sock.h102
-rw-r--r--include/net/xdp_sock_drv.h289
-rw-r--r--include/net/xfrm.h2088
-rw-r--r--include/net/xsk_buff_pool.h224
342 files changed, 98395 insertions, 0 deletions
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
new file mode 100644
index 000000000..c80539be1
--- /dev/null
+++ b/include/net/6lowpan.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright 2011, Siemens AG
+ * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+/*
+ * Based on patches from Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* Jon's code is based on 6lowpan implementation for Contiki which is:
+ * Copyright (c) 2008, Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Institute nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __6LOWPAN_H__
+#define __6LOWPAN_H__
+
+#include <linux/debugfs.h>
+
+#include <net/ipv6.h>
+#include <net/net_namespace.h>
+
+/* special link-layer handling */
+#include <net/mac802154.h>
+
+#define EUI64_ADDR_LEN 8
+
+#define LOWPAN_NHC_MAX_ID_LEN 1
+/* Maximum next header compression length which we currently support inclusive
+ * possible inline data.
+ */
+#define LOWPAN_NHC_MAX_HDR_LEN (sizeof(struct udphdr))
+/* Max IPHC Header len without IPv6 hdr specific inline data.
+ * Useful for getting the "extra" bytes we need at worst case compression.
+ *
+ * LOWPAN_IPHC + CID + LOWPAN_NHC_MAX_ID_LEN
+ */
+#define LOWPAN_IPHC_MAX_HEADER_LEN (2 + 1 + LOWPAN_NHC_MAX_ID_LEN)
+/* Maximum worst case IPHC header buffer size */
+#define LOWPAN_IPHC_MAX_HC_BUF_LEN (sizeof(struct ipv6hdr) + \
+ LOWPAN_IPHC_MAX_HEADER_LEN + \
+ LOWPAN_NHC_MAX_HDR_LEN)
+/* SCI/DCI is 4 bit width, so we have maximum 16 entries */
+#define LOWPAN_IPHC_CTX_TABLE_SIZE (1 << 4)
+
+#define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */
+#define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */
+#define LOWPAN_DISPATCH_IPHC_MASK 0xe0
+
+static inline bool lowpan_is_ipv6(u8 dispatch)
+{
+ return dispatch == LOWPAN_DISPATCH_IPV6;
+}
+
+static inline bool lowpan_is_iphc(u8 dispatch)
+{
+ return (dispatch & LOWPAN_DISPATCH_IPHC_MASK) == LOWPAN_DISPATCH_IPHC;
+}
+
+#define LOWPAN_PRIV_SIZE(llpriv_size) \
+ (sizeof(struct lowpan_dev) + llpriv_size)
+
+enum lowpan_lltypes {
+ LOWPAN_LLTYPE_BTLE,
+ LOWPAN_LLTYPE_IEEE802154,
+};
+
+enum lowpan_iphc_ctx_flags {
+ LOWPAN_IPHC_CTX_FLAG_ACTIVE,
+ LOWPAN_IPHC_CTX_FLAG_COMPRESSION,
+};
+
+struct lowpan_iphc_ctx {
+ u8 id;
+ struct in6_addr pfx;
+ u8 plen;
+ unsigned long flags;
+};
+
+struct lowpan_iphc_ctx_table {
+ spinlock_t lock;
+ const struct lowpan_iphc_ctx_ops *ops;
+ struct lowpan_iphc_ctx table[LOWPAN_IPHC_CTX_TABLE_SIZE];
+};
+
+static inline bool lowpan_iphc_ctx_is_active(const struct lowpan_iphc_ctx *ctx)
+{
+ return test_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, &ctx->flags);
+}
+
+static inline bool
+lowpan_iphc_ctx_is_compression(const struct lowpan_iphc_ctx *ctx)
+{
+ return test_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags);
+}
+
+struct lowpan_dev {
+ enum lowpan_lltypes lltype;
+ struct dentry *iface_debugfs;
+ struct lowpan_iphc_ctx_table ctx;
+
+ /* must be last */
+ u8 priv[] __aligned(sizeof(void *));
+};
+
+struct lowpan_802154_neigh {
+ __le16 short_addr;
+};
+
+static inline
+struct lowpan_802154_neigh *lowpan_802154_neigh(void *neigh_priv)
+{
+ return neigh_priv;
+}
+
+static inline
+struct lowpan_dev *lowpan_dev(const struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+/* private device info */
+struct lowpan_802154_dev {
+ struct net_device *wdev; /* wpan device ptr */
+ u16 fragment_tag;
+};
+
+static inline struct
+lowpan_802154_dev *lowpan_802154_dev(const struct net_device *dev)
+{
+ return (struct lowpan_802154_dev *)lowpan_dev(dev)->priv;
+}
+
+struct lowpan_802154_cb {
+ u16 d_tag;
+ unsigned int d_size;
+ u8 d_offset;
+};
+
+static inline
+struct lowpan_802154_cb *lowpan_802154_cb(const struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(skb->cb));
+ return (struct lowpan_802154_cb *)skb->cb;
+}
+
+static inline void lowpan_iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr,
+ const void *lladdr)
+{
+ /* fe:80::XXXX:XXXX:XXXX:XXXX
+ * \_________________/
+ * hwaddr
+ */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ memcpy(&ipaddr->s6_addr[8], lladdr, EUI64_ADDR_LEN);
+ /* second bit-flip (Universe/Local)
+ * is done according RFC2464
+ */
+ ipaddr->s6_addr[8] ^= 0x02;
+}
+
+static inline void lowpan_iphc_uncompress_eui48_lladdr(struct in6_addr *ipaddr,
+ const void *lladdr)
+{
+ /* fe:80::XXXX:XXff:feXX:XXXX
+ * \_________________/
+ * hwaddr
+ */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ memcpy(&ipaddr->s6_addr[8], lladdr, 3);
+ ipaddr->s6_addr[11] = 0xFF;
+ ipaddr->s6_addr[12] = 0xFE;
+ memcpy(&ipaddr->s6_addr[13], lladdr + 3, 3);
+}
+
+#ifdef DEBUG
+/* print data in line */
+static inline void raw_dump_inline(const char *caller, char *msg,
+ const unsigned char *buf, int len)
+{
+ if (msg)
+ pr_debug("%s():%s: ", caller, msg);
+
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, buf, len, false);
+}
+
+/* print data in a table format:
+ *
+ * addr: xx xx xx xx xx xx
+ * addr: xx xx xx xx xx xx
+ * ...
+ */
+static inline void raw_dump_table(const char *caller, char *msg,
+ const unsigned char *buf, int len)
+{
+ if (msg)
+ pr_debug("%s():%s:\n", caller, msg);
+
+ print_hex_dump_debug("\t", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false);
+}
+#else
+static inline void raw_dump_table(const char *caller, char *msg,
+ const unsigned char *buf, int len) { }
+static inline void raw_dump_inline(const char *caller, char *msg,
+ const unsigned char *buf, int len) { }
+#endif
+
+/**
+ * lowpan_fetch_skb - getting inline data from 6LoWPAN header
+ *
+ * This function will pull data from sk buffer and put it into data to
+ * remove the 6LoWPAN inline data. This function returns true if the
+ * sk buffer is too small to pull the amount of data which is specified
+ * by len.
+ *
+ * @skb: the buffer where the inline data should be pulled from.
+ * @data: destination buffer for the inline data.
+ * @len: amount of data which should be pulled in bytes.
+ */
+static inline bool lowpan_fetch_skb(struct sk_buff *skb, void *data,
+ unsigned int len)
+{
+ if (unlikely(!pskb_may_pull(skb, len)))
+ return true;
+
+ skb_copy_from_linear_data(skb, data, len);
+ skb_pull(skb, len);
+
+ return false;
+}
+
+static inline bool lowpan_802154_is_valid_src_short_addr(__le16 addr)
+{
+ /* First bit of addr is multicast, reserved or 802.15.4 specific */
+ return !(addr & cpu_to_le16(0x8000));
+}
+
+static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data,
+ const size_t len)
+{
+ memcpy(*hc_ptr, data, len);
+ *hc_ptr += len;
+}
+
+int lowpan_register_netdevice(struct net_device *dev,
+ enum lowpan_lltypes lltype);
+int lowpan_register_netdev(struct net_device *dev,
+ enum lowpan_lltypes lltype);
+void lowpan_unregister_netdevice(struct net_device *dev);
+void lowpan_unregister_netdev(struct net_device *dev);
+
+/**
+ * lowpan_header_decompress - replace 6LoWPAN header with IPv6 header
+ *
+ * This function replaces the IPHC 6LoWPAN header which should be pointed at
+ * skb->data and skb_network_header, with the IPv6 header.
+ * It would be nice that the caller have the necessary headroom of IPv6 header
+ * and greatest Transport layer header, this would reduce the overhead for
+ * reallocate headroom.
+ *
+ * @skb: the buffer which should be manipulate.
+ * @dev: the lowpan net device pointer.
+ * @daddr: destination lladdr of mac header which is used for compression
+ * methods.
+ * @saddr: source lladdr of mac header which is used for compression
+ * methods.
+ */
+int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
+ const void *daddr, const void *saddr);
+
+/**
+ * lowpan_header_compress - replace IPv6 header with 6LoWPAN header
+ *
+ * This function replaces the IPv6 header which should be pointed at
+ * skb->data and skb_network_header, with the IPHC 6LoWPAN header.
+ * The caller need to be sure that the sk buffer is not shared and at have
+ * at least a headroom which is smaller or equal LOWPAN_IPHC_MAX_HEADER_LEN,
+ * which is the IPHC "more bytes than IPv6 header" at worst case.
+ *
+ * @skb: the buffer which should be manipulate.
+ * @dev: the lowpan net device pointer.
+ * @daddr: destination lladdr of mac header which is used for compression
+ * methods.
+ * @saddr: source lladdr of mac header which is used for compression
+ * methods.
+ */
+int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
+ const void *daddr, const void *saddr);
+
+#endif /* __6LOWPAN_H__ */
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
new file mode 100644
index 000000000..13abe013a
--- /dev/null
+++ b/include/net/9p/9p.h
@@ -0,0 +1,557 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * 9P protocol definitions.
+ *
+ * Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
+ * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
+ * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
+ */
+
+#ifndef NET_9P_H
+#define NET_9P_H
+
+/**
+ * enum p9_debug_flags - bits for mount time debug parameter
+ * @P9_DEBUG_ERROR: more verbose error messages including original error string
+ * @P9_DEBUG_9P: 9P protocol tracing
+ * @P9_DEBUG_VFS: VFS API tracing
+ * @P9_DEBUG_CONV: protocol conversion tracing
+ * @P9_DEBUG_MUX: trace management of concurrent transactions
+ * @P9_DEBUG_TRANS: transport tracing
+ * @P9_DEBUG_SLABS: memory management tracing
+ * @P9_DEBUG_FCALL: verbose dump of protocol messages
+ * @P9_DEBUG_FID: fid allocation/deallocation tracking
+ * @P9_DEBUG_PKT: packet marshalling/unmarshalling
+ * @P9_DEBUG_FSC: FS-cache tracing
+ * @P9_DEBUG_VPKT: Verbose packet debugging (full packet dump)
+ *
+ * These flags are passed at mount time to turn on various levels of
+ * verbosity and tracing which will be output to the system logs.
+ */
+
+enum p9_debug_flags {
+ P9_DEBUG_ERROR = (1<<0),
+ P9_DEBUG_9P = (1<<2),
+ P9_DEBUG_VFS = (1<<3),
+ P9_DEBUG_CONV = (1<<4),
+ P9_DEBUG_MUX = (1<<5),
+ P9_DEBUG_TRANS = (1<<6),
+ P9_DEBUG_SLABS = (1<<7),
+ P9_DEBUG_FCALL = (1<<8),
+ P9_DEBUG_FID = (1<<9),
+ P9_DEBUG_PKT = (1<<10),
+ P9_DEBUG_FSC = (1<<11),
+ P9_DEBUG_VPKT = (1<<12),
+};
+
+#ifdef CONFIG_NET_9P_DEBUG
+extern unsigned int p9_debug_level;
+__printf(3, 4)
+void _p9_debug(enum p9_debug_flags level, const char *func,
+ const char *fmt, ...);
+#define p9_debug(level, fmt, ...) \
+ _p9_debug(level, __func__, fmt, ##__VA_ARGS__)
+#else
+#define p9_debug(level, fmt, ...) \
+ no_printk(fmt, ##__VA_ARGS__)
+#endif
+
+/**
+ * enum p9_msg_t - 9P message types
+ * @P9_TLERROR: not used
+ * @P9_RLERROR: response for any failed request for 9P2000.L
+ * @P9_TSTATFS: file system status request
+ * @P9_RSTATFS: file system status response
+ * @P9_TSYMLINK: make symlink request
+ * @P9_RSYMLINK: make symlink response
+ * @P9_TMKNOD: create a special file object request
+ * @P9_RMKNOD: create a special file object response
+ * @P9_TLCREATE: prepare a handle for I/O on an new file for 9P2000.L
+ * @P9_RLCREATE: response with file access information for 9P2000.L
+ * @P9_TRENAME: rename request
+ * @P9_RRENAME: rename response
+ * @P9_TMKDIR: create a directory request
+ * @P9_RMKDIR: create a directory response
+ * @P9_TVERSION: version handshake request
+ * @P9_RVERSION: version handshake response
+ * @P9_TAUTH: request to establish authentication channel
+ * @P9_RAUTH: response with authentication information
+ * @P9_TATTACH: establish user access to file service
+ * @P9_RATTACH: response with top level handle to file hierarchy
+ * @P9_TERROR: not used
+ * @P9_RERROR: response for any failed request
+ * @P9_TFLUSH: request to abort a previous request
+ * @P9_RFLUSH: response when previous request has been cancelled
+ * @P9_TWALK: descend a directory hierarchy
+ * @P9_RWALK: response with new handle for position within hierarchy
+ * @P9_TOPEN: prepare a handle for I/O on an existing file
+ * @P9_ROPEN: response with file access information
+ * @P9_TCREATE: prepare a handle for I/O on a new file
+ * @P9_RCREATE: response with file access information
+ * @P9_TREAD: request to transfer data from a file or directory
+ * @P9_RREAD: response with data requested
+ * @P9_TWRITE: reuqest to transfer data to a file
+ * @P9_RWRITE: response with out much data was transferred to file
+ * @P9_TCLUNK: forget about a handle to an entity within the file system
+ * @P9_RCLUNK: response when server has forgotten about the handle
+ * @P9_TREMOVE: request to remove an entity from the hierarchy
+ * @P9_RREMOVE: response when server has removed the entity
+ * @P9_TSTAT: request file entity attributes
+ * @P9_RSTAT: response with file entity attributes
+ * @P9_TWSTAT: request to update file entity attributes
+ * @P9_RWSTAT: response when file entity attributes are updated
+ *
+ * There are 14 basic operations in 9P2000, paired as
+ * requests and responses. The one special case is ERROR
+ * as there is no @P9_TERROR request for clients to transmit to
+ * the server, but the server may respond to any other request
+ * with an @P9_RERROR.
+ *
+ * See Also: http://plan9.bell-labs.com/sys/man/5/INDEX.html
+ */
+
+enum p9_msg_t {
+ P9_TLERROR = 6,
+ P9_RLERROR,
+ P9_TSTATFS = 8,
+ P9_RSTATFS,
+ P9_TLOPEN = 12,
+ P9_RLOPEN,
+ P9_TLCREATE = 14,
+ P9_RLCREATE,
+ P9_TSYMLINK = 16,
+ P9_RSYMLINK,
+ P9_TMKNOD = 18,
+ P9_RMKNOD,
+ P9_TRENAME = 20,
+ P9_RRENAME,
+ P9_TREADLINK = 22,
+ P9_RREADLINK,
+ P9_TGETATTR = 24,
+ P9_RGETATTR,
+ P9_TSETATTR = 26,
+ P9_RSETATTR,
+ P9_TXATTRWALK = 30,
+ P9_RXATTRWALK,
+ P9_TXATTRCREATE = 32,
+ P9_RXATTRCREATE,
+ P9_TREADDIR = 40,
+ P9_RREADDIR,
+ P9_TFSYNC = 50,
+ P9_RFSYNC,
+ P9_TLOCK = 52,
+ P9_RLOCK,
+ P9_TGETLOCK = 54,
+ P9_RGETLOCK,
+ P9_TLINK = 70,
+ P9_RLINK,
+ P9_TMKDIR = 72,
+ P9_RMKDIR,
+ P9_TRENAMEAT = 74,
+ P9_RRENAMEAT,
+ P9_TUNLINKAT = 76,
+ P9_RUNLINKAT,
+ P9_TVERSION = 100,
+ P9_RVERSION,
+ P9_TAUTH = 102,
+ P9_RAUTH,
+ P9_TATTACH = 104,
+ P9_RATTACH,
+ P9_TERROR = 106,
+ P9_RERROR,
+ P9_TFLUSH = 108,
+ P9_RFLUSH,
+ P9_TWALK = 110,
+ P9_RWALK,
+ P9_TOPEN = 112,
+ P9_ROPEN,
+ P9_TCREATE = 114,
+ P9_RCREATE,
+ P9_TREAD = 116,
+ P9_RREAD,
+ P9_TWRITE = 118,
+ P9_RWRITE,
+ P9_TCLUNK = 120,
+ P9_RCLUNK,
+ P9_TREMOVE = 122,
+ P9_RREMOVE,
+ P9_TSTAT = 124,
+ P9_RSTAT,
+ P9_TWSTAT = 126,
+ P9_RWSTAT,
+};
+
+/**
+ * enum p9_open_mode_t - 9P open modes
+ * @P9_OREAD: open file for reading only
+ * @P9_OWRITE: open file for writing only
+ * @P9_ORDWR: open file for reading or writing
+ * @P9_OEXEC: open file for execution
+ * @P9_OTRUNC: truncate file to zero-length before opening it
+ * @P9_OREXEC: close the file when an exec(2) system call is made
+ * @P9_ORCLOSE: remove the file when the file is closed
+ * @P9_OAPPEND: open the file and seek to the end
+ * @P9_OEXCL: only create a file, do not open it
+ *
+ * 9P open modes differ slightly from Posix standard modes.
+ * In particular, there are extra modes which specify different
+ * semantic behaviors than may be available on standard Posix
+ * systems. For example, @P9_OREXEC and @P9_ORCLOSE are modes that
+ * most likely will not be issued from the Linux VFS client, but may
+ * be supported by servers.
+ *
+ * See Also: http://plan9.bell-labs.com/magic/man2html/2/open
+ */
+
+enum p9_open_mode_t {
+ P9_OREAD = 0x00,
+ P9_OWRITE = 0x01,
+ P9_ORDWR = 0x02,
+ P9_OEXEC = 0x03,
+ P9_OTRUNC = 0x10,
+ P9_OREXEC = 0x20,
+ P9_ORCLOSE = 0x40,
+ P9_OAPPEND = 0x80,
+ P9_OEXCL = 0x1000,
+};
+
+/**
+ * enum p9_perm_t - 9P permissions
+ * @P9_DMDIR: mode bit for directories
+ * @P9_DMAPPEND: mode bit for is append-only
+ * @P9_DMEXCL: mode bit for excluse use (only one open handle allowed)
+ * @P9_DMMOUNT: mode bit for mount points
+ * @P9_DMAUTH: mode bit for authentication file
+ * @P9_DMTMP: mode bit for non-backed-up files
+ * @P9_DMSYMLINK: mode bit for symbolic links (9P2000.u)
+ * @P9_DMLINK: mode bit for hard-link (9P2000.u)
+ * @P9_DMDEVICE: mode bit for device files (9P2000.u)
+ * @P9_DMNAMEDPIPE: mode bit for named pipe (9P2000.u)
+ * @P9_DMSOCKET: mode bit for socket (9P2000.u)
+ * @P9_DMSETUID: mode bit for setuid (9P2000.u)
+ * @P9_DMSETGID: mode bit for setgid (9P2000.u)
+ * @P9_DMSETVTX: mode bit for sticky bit (9P2000.u)
+ *
+ * 9P permissions differ slightly from Posix standard modes.
+ *
+ * See Also: http://plan9.bell-labs.com/magic/man2html/2/stat
+ */
+enum p9_perm_t {
+ P9_DMDIR = 0x80000000,
+ P9_DMAPPEND = 0x40000000,
+ P9_DMEXCL = 0x20000000,
+ P9_DMMOUNT = 0x10000000,
+ P9_DMAUTH = 0x08000000,
+ P9_DMTMP = 0x04000000,
+/* 9P2000.u extensions */
+ P9_DMSYMLINK = 0x02000000,
+ P9_DMLINK = 0x01000000,
+ P9_DMDEVICE = 0x00800000,
+ P9_DMNAMEDPIPE = 0x00200000,
+ P9_DMSOCKET = 0x00100000,
+ P9_DMSETUID = 0x00080000,
+ P9_DMSETGID = 0x00040000,
+ P9_DMSETVTX = 0x00010000,
+};
+
+/* 9p2000.L open flags */
+#define P9_DOTL_RDONLY 00000000
+#define P9_DOTL_WRONLY 00000001
+#define P9_DOTL_RDWR 00000002
+#define P9_DOTL_NOACCESS 00000003
+#define P9_DOTL_CREATE 00000100
+#define P9_DOTL_EXCL 00000200
+#define P9_DOTL_NOCTTY 00000400
+#define P9_DOTL_TRUNC 00001000
+#define P9_DOTL_APPEND 00002000
+#define P9_DOTL_NONBLOCK 00004000
+#define P9_DOTL_DSYNC 00010000
+#define P9_DOTL_FASYNC 00020000
+#define P9_DOTL_DIRECT 00040000
+#define P9_DOTL_LARGEFILE 00100000
+#define P9_DOTL_DIRECTORY 00200000
+#define P9_DOTL_NOFOLLOW 00400000
+#define P9_DOTL_NOATIME 01000000
+#define P9_DOTL_CLOEXEC 02000000
+#define P9_DOTL_SYNC 04000000
+
+/* 9p2000.L at flags */
+#define P9_DOTL_AT_REMOVEDIR 0x200
+
+/* 9p2000.L lock type */
+#define P9_LOCK_TYPE_RDLCK 0
+#define P9_LOCK_TYPE_WRLCK 1
+#define P9_LOCK_TYPE_UNLCK 2
+
+/**
+ * enum p9_qid_t - QID types
+ * @P9_QTDIR: directory
+ * @P9_QTAPPEND: append-only
+ * @P9_QTEXCL: excluse use (only one open handle allowed)
+ * @P9_QTMOUNT: mount points
+ * @P9_QTAUTH: authentication file
+ * @P9_QTTMP: non-backed-up files
+ * @P9_QTSYMLINK: symbolic links (9P2000.u)
+ * @P9_QTLINK: hard-link (9P2000.u)
+ * @P9_QTFILE: normal files
+ *
+ * QID types are a subset of permissions - they are primarily
+ * used to differentiate semantics for a file system entity via
+ * a jump-table. Their value is also the most significant 16 bits
+ * of the permission_t
+ *
+ * See Also: http://plan9.bell-labs.com/magic/man2html/2/stat
+ */
+enum p9_qid_t {
+ P9_QTDIR = 0x80,
+ P9_QTAPPEND = 0x40,
+ P9_QTEXCL = 0x20,
+ P9_QTMOUNT = 0x10,
+ P9_QTAUTH = 0x08,
+ P9_QTTMP = 0x04,
+ P9_QTSYMLINK = 0x02,
+ P9_QTLINK = 0x01,
+ P9_QTFILE = 0x00,
+};
+
+/* 9P Magic Numbers */
+#define P9_NOTAG ((u16)(~0))
+#define P9_NOFID ((u32)(~0))
+#define P9_MAXWELEM 16
+
+/* Minimal header size: size[4] type[1] tag[2] */
+#define P9_HDRSZ 7
+
+/* ample room for Twrite/Rread header */
+#define P9_IOHDRSZ 24
+
+/* Room for readdir header */
+#define P9_READDIRHDRSZ 24
+
+/* size of header for zero copy read/write */
+#define P9_ZC_HDR_SZ 4096
+
+/* maximum length of an error string */
+#define P9_ERRMAX 128
+
+/**
+ * struct p9_qid - file system entity information
+ * @type: 8-bit type &p9_qid_t
+ * @version: 16-bit monotonically incrementing version number
+ * @path: 64-bit per-server-unique ID for a file system element
+ *
+ * qids are identifiers used by 9P servers to track file system
+ * entities. The type is used to differentiate semantics for operations
+ * on the entity (ie. read means something different on a directory than
+ * on a file). The path provides a server unique index for an entity
+ * (roughly analogous to an inode number), while the version is updated
+ * every time a file is modified and can be used to maintain cache
+ * coherency between clients and serves.
+ * Servers will often differentiate purely synthetic entities by setting
+ * their version to 0, signaling that they should never be cached and
+ * should be accessed synchronously.
+ *
+ * See Also://plan9.bell-labs.com/magic/man2html/2/stat
+ */
+
+struct p9_qid {
+ u8 type;
+ u32 version;
+ u64 path;
+};
+
+/**
+ * struct p9_wstat - file system metadata information
+ * @size: length prefix for this stat structure instance
+ * @type: the type of the server (equivalent to a major number)
+ * @dev: the sub-type of the server (equivalent to a minor number)
+ * @qid: unique id from the server of type &p9_qid
+ * @mode: Plan 9 format permissions of type &p9_perm_t
+ * @atime: Last access/read time
+ * @mtime: Last modify/write time
+ * @length: file length
+ * @name: last element of path (aka filename)
+ * @uid: owner name
+ * @gid: group owner
+ * @muid: last modifier
+ * @extension: area used to encode extended UNIX support
+ * @n_uid: numeric user id of owner (part of 9p2000.u extension)
+ * @n_gid: numeric group id (part of 9p2000.u extension)
+ * @n_muid: numeric user id of laster modifier (part of 9p2000.u extension)
+ *
+ * See Also: http://plan9.bell-labs.com/magic/man2html/2/stat
+ */
+
+struct p9_wstat {
+ u16 size;
+ u16 type;
+ u32 dev;
+ struct p9_qid qid;
+ u32 mode;
+ u32 atime;
+ u32 mtime;
+ u64 length;
+ const char *name;
+ const char *uid;
+ const char *gid;
+ const char *muid;
+ char *extension; /* 9p2000.u extensions */
+ kuid_t n_uid; /* 9p2000.u extensions */
+ kgid_t n_gid; /* 9p2000.u extensions */
+ kuid_t n_muid; /* 9p2000.u extensions */
+};
+
+struct p9_stat_dotl {
+ u64 st_result_mask;
+ struct p9_qid qid;
+ u32 st_mode;
+ kuid_t st_uid;
+ kgid_t st_gid;
+ u64 st_nlink;
+ u64 st_rdev;
+ u64 st_size;
+ u64 st_blksize;
+ u64 st_blocks;
+ u64 st_atime_sec;
+ u64 st_atime_nsec;
+ u64 st_mtime_sec;
+ u64 st_mtime_nsec;
+ u64 st_ctime_sec;
+ u64 st_ctime_nsec;
+ u64 st_btime_sec;
+ u64 st_btime_nsec;
+ u64 st_gen;
+ u64 st_data_version;
+};
+
+#define P9_STATS_MODE 0x00000001ULL
+#define P9_STATS_NLINK 0x00000002ULL
+#define P9_STATS_UID 0x00000004ULL
+#define P9_STATS_GID 0x00000008ULL
+#define P9_STATS_RDEV 0x00000010ULL
+#define P9_STATS_ATIME 0x00000020ULL
+#define P9_STATS_MTIME 0x00000040ULL
+#define P9_STATS_CTIME 0x00000080ULL
+#define P9_STATS_INO 0x00000100ULL
+#define P9_STATS_SIZE 0x00000200ULL
+#define P9_STATS_BLOCKS 0x00000400ULL
+
+#define P9_STATS_BTIME 0x00000800ULL
+#define P9_STATS_GEN 0x00001000ULL
+#define P9_STATS_DATA_VERSION 0x00002000ULL
+
+#define P9_STATS_BASIC 0x000007ffULL /* Mask for fields up to BLOCKS */
+#define P9_STATS_ALL 0x00003fffULL /* Mask for All fields above */
+
+/**
+ * struct p9_iattr_dotl - P9 inode attribute for setattr
+ * @valid: bitfield specifying which fields are valid
+ * same as in struct iattr
+ * @mode: File permission bits
+ * @uid: user id of owner
+ * @gid: group id
+ * @size: File size
+ * @atime_sec: Last access time, seconds
+ * @atime_nsec: Last access time, nanoseconds
+ * @mtime_sec: Last modification time, seconds
+ * @mtime_nsec: Last modification time, nanoseconds
+ */
+
+struct p9_iattr_dotl {
+ u32 valid;
+ u32 mode;
+ kuid_t uid;
+ kgid_t gid;
+ u64 size;
+ u64 atime_sec;
+ u64 atime_nsec;
+ u64 mtime_sec;
+ u64 mtime_nsec;
+};
+
+#define P9_LOCK_SUCCESS 0
+#define P9_LOCK_BLOCKED 1
+#define P9_LOCK_ERROR 2
+#define P9_LOCK_GRACE 3
+
+#define P9_LOCK_FLAGS_BLOCK 1
+#define P9_LOCK_FLAGS_RECLAIM 2
+
+/* struct p9_flock: POSIX lock structure
+ * @type - type of lock
+ * @flags - lock flags
+ * @start - starting offset of the lock
+ * @length - number of bytes
+ * @proc_id - process id which wants to take lock
+ * @client_id - client id
+ */
+
+struct p9_flock {
+ u8 type;
+ u32 flags;
+ u64 start;
+ u64 length;
+ u32 proc_id;
+ char *client_id;
+};
+
+/* struct p9_getlock: getlock structure
+ * @type - type of lock
+ * @start - starting offset of the lock
+ * @length - number of bytes
+ * @proc_id - process id which wants to take lock
+ * @client_id - client id
+ */
+
+struct p9_getlock {
+ u8 type;
+ u64 start;
+ u64 length;
+ u32 proc_id;
+ char *client_id;
+};
+
+struct p9_rstatfs {
+ u32 type;
+ u32 bsize;
+ u64 blocks;
+ u64 bfree;
+ u64 bavail;
+ u64 files;
+ u64 ffree;
+ u64 fsid;
+ u32 namelen;
+};
+
+/**
+ * struct p9_fcall - primary packet structure
+ * @size: prefixed length of the structure
+ * @id: protocol operating identifier of type &p9_msg_t
+ * @tag: transaction id of the request
+ * @offset: used by marshalling routines to track current position in buffer
+ * @capacity: used by marshalling routines to track total malloc'd capacity
+ * @sdata: payload
+ *
+ * &p9_fcall represents the structure for all 9P RPC
+ * transactions. Requests are packaged into fcalls, and reponses
+ * must be extracted from them.
+ *
+ * See Also: http://plan9.bell-labs.com/magic/man2html/2/fcall
+ */
+
+struct p9_fcall {
+ u32 size;
+ u8 id;
+ u16 tag;
+
+ size_t offset;
+ size_t capacity;
+
+ struct kmem_cache *cache;
+ u8 *sdata;
+};
+
+int p9_errstr2errno(char *errstr, int len);
+
+int p9_error_init(void);
+#endif /* NET_9P_H */
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
new file mode 100644
index 000000000..78ebcf782
--- /dev/null
+++ b/include/net/9p/client.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * 9P Client Definitions
+ *
+ * Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com>
+ * Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net>
+ */
+
+#ifndef NET_9P_CLIENT_H
+#define NET_9P_CLIENT_H
+
+#include <linux/utsname.h>
+#include <linux/idr.h>
+#include <linux/tracepoint-defs.h>
+
+/* Number of requests per row */
+#define P9_ROW_MAXTAG 255
+
+/** enum p9_proto_versions - 9P protocol versions
+ * @p9_proto_legacy: 9P Legacy mode, pre-9P2000.u
+ * @p9_proto_2000u: 9P2000.u extension
+ * @p9_proto_2000L: 9P2000.L extension
+ */
+
+enum p9_proto_versions {
+ p9_proto_legacy,
+ p9_proto_2000u,
+ p9_proto_2000L,
+};
+
+
+/**
+ * enum p9_trans_status - different states of underlying transports
+ * @Connected: transport is connected and healthy
+ * @Disconnected: transport has been disconnected
+ * @Hung: transport is connected by wedged
+ *
+ * This enumeration details the various states a transport
+ * instatiation can be in.
+ */
+
+enum p9_trans_status {
+ Connected,
+ BeginDisconnect,
+ Disconnected,
+ Hung,
+};
+
+/**
+ * enum p9_req_status_t - status of a request
+ * @REQ_STATUS_ALLOC: request has been allocated but not sent
+ * @REQ_STATUS_UNSENT: request waiting to be sent
+ * @REQ_STATUS_SENT: request sent to server
+ * @REQ_STATUS_RCVD: response received from server
+ * @REQ_STATUS_FLSHD: request has been flushed
+ * @REQ_STATUS_ERROR: request encountered an error on the client side
+ */
+
+enum p9_req_status_t {
+ REQ_STATUS_ALLOC,
+ REQ_STATUS_UNSENT,
+ REQ_STATUS_SENT,
+ REQ_STATUS_RCVD,
+ REQ_STATUS_FLSHD,
+ REQ_STATUS_ERROR,
+};
+
+/**
+ * struct p9_req_t - request slots
+ * @status: status of this request slot
+ * @t_err: transport error
+ * @wq: wait_queue for the client to block on for this request
+ * @tc: the request fcall structure
+ * @rc: the response fcall structure
+ * @req_list: link for higher level objects to chain requests
+ */
+struct p9_req_t {
+ int status;
+ int t_err;
+ refcount_t refcount;
+ wait_queue_head_t wq;
+ struct p9_fcall tc;
+ struct p9_fcall rc;
+ struct list_head req_list;
+};
+
+/**
+ * struct p9_client - per client instance state
+ * @lock: protect @fids and @reqs
+ * @msize: maximum data size negotiated by protocol
+ * @proto_version: 9P protocol version to use
+ * @trans_mod: module API instantiated with this client
+ * @status: connection state
+ * @trans: tranport instance state and API
+ * @fids: All active FID handles
+ * @reqs: All active requests.
+ * @name: node name used as client id
+ *
+ * The client structure is used to keep track of various per-client
+ * state that has been instantiated.
+ */
+struct p9_client {
+ spinlock_t lock;
+ unsigned int msize;
+ unsigned char proto_version;
+ struct p9_trans_module *trans_mod;
+ enum p9_trans_status status;
+ void *trans;
+ struct kmem_cache *fcall_cache;
+
+ union {
+ struct {
+ int rfd;
+ int wfd;
+ } fd;
+ struct {
+ u16 port;
+ bool privport;
+
+ } tcp;
+ } trans_opts;
+
+ struct idr fids;
+ struct idr reqs;
+
+ char name[__NEW_UTS_LEN + 1];
+};
+
+/**
+ * struct p9_fid - file system entity handle
+ * @clnt: back pointer to instantiating &p9_client
+ * @fid: numeric identifier for this handle
+ * @mode: current mode of this fid (enum?)
+ * @qid: the &p9_qid server identifier this handle points to
+ * @iounit: the server reported maximum transaction size for this file
+ * @uid: the numeric uid of the local user who owns this handle
+ * @rdir: readdir accounting structure (allocated on demand)
+ * @dlist: per-dentry fid tracking
+ *
+ * TODO: This needs lots of explanation.
+ */
+enum fid_source {
+ FID_FROM_OTHER,
+ FID_FROM_INODE,
+ FID_FROM_DENTRY,
+};
+
+struct p9_fid {
+ struct p9_client *clnt;
+ u32 fid;
+ refcount_t count;
+ int mode;
+ struct p9_qid qid;
+ u32 iounit;
+ kuid_t uid;
+
+ void *rdir;
+
+ struct hlist_node dlist; /* list of all fids attached to a dentry */
+ struct hlist_node ilist;
+};
+
+/**
+ * struct p9_dirent - directory entry structure
+ * @qid: The p9 server qid for this dirent
+ * @d_off: offset to the next dirent
+ * @d_type: type of file
+ * @d_name: file name
+ */
+
+struct p9_dirent {
+ struct p9_qid qid;
+ u64 d_off;
+ unsigned char d_type;
+ char d_name[256];
+};
+
+struct iov_iter;
+
+int p9_show_client_options(struct seq_file *m, struct p9_client *clnt);
+int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb);
+int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid,
+ const char *name);
+int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name,
+ struct p9_fid *newdirfid, const char *new_name);
+struct p9_client *p9_client_create(const char *dev_name, char *options);
+void p9_client_destroy(struct p9_client *clnt);
+void p9_client_disconnect(struct p9_client *clnt);
+void p9_client_begin_disconnect(struct p9_client *clnt);
+struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
+ const char *uname, kuid_t n_uname, const char *aname);
+struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
+ const unsigned char * const *wnames, int clone);
+int p9_client_open(struct p9_fid *fid, int mode);
+int p9_client_fcreate(struct p9_fid *fid, const char *name, u32 perm, int mode,
+ char *extension);
+int p9_client_link(struct p9_fid *fid, struct p9_fid *oldfid, const char *newname);
+int p9_client_symlink(struct p9_fid *fid, const char *name, const char *symname,
+ kgid_t gid, struct p9_qid *qid);
+int p9_client_create_dotl(struct p9_fid *ofid, const char *name, u32 flags, u32 mode,
+ kgid_t gid, struct p9_qid *qid);
+int p9_client_clunk(struct p9_fid *fid);
+int p9_client_fsync(struct p9_fid *fid, int datasync);
+int p9_client_remove(struct p9_fid *fid);
+int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags);
+int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err);
+int p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
+ int *err);
+int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err);
+int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
+int p9dirent_read(struct p9_client *clnt, char *buf, int len,
+ struct p9_dirent *dirent);
+struct p9_wstat *p9_client_stat(struct p9_fid *fid);
+int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst);
+int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *attr);
+
+struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
+ u64 request_mask);
+
+int p9_client_mknod_dotl(struct p9_fid *oldfid, const char *name, int mode,
+ dev_t rdev, kgid_t gid, struct p9_qid *qid);
+int p9_client_mkdir_dotl(struct p9_fid *fid, const char *name, int mode,
+ kgid_t gid, struct p9_qid *qid);
+int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status);
+int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl);
+void p9_fcall_fini(struct p9_fcall *fc);
+struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag);
+
+static inline void p9_req_get(struct p9_req_t *r)
+{
+ refcount_inc(&r->refcount);
+}
+
+static inline int p9_req_try_get(struct p9_req_t *r)
+{
+ return refcount_inc_not_zero(&r->refcount);
+}
+
+int p9_req_put(struct p9_client *c, struct p9_req_t *r);
+
+/* We cannot have the real tracepoints in header files,
+ * use a wrapper function */
+DECLARE_TRACEPOINT(9p_fid_ref);
+void do_trace_9p_fid_get(struct p9_fid *fid);
+void do_trace_9p_fid_put(struct p9_fid *fid);
+
+/* fid reference counting helpers:
+ * - fids used for any length of time should always be referenced through
+ * p9_fid_get(), and released with p9_fid_put()
+ * - v9fs_fid_lookup() or similar will automatically call get for you
+ * and also require a put
+ * - the *_fid_add() helpers will stash the fid in the inode,
+ * at which point it is the responsibility of evict_inode()
+ * to call the put
+ * - the last put will automatically send a clunk to the server
+ */
+static inline struct p9_fid *p9_fid_get(struct p9_fid *fid)
+{
+ if (tracepoint_enabled(9p_fid_ref))
+ do_trace_9p_fid_get(fid);
+
+ refcount_inc(&fid->count);
+
+ return fid;
+}
+
+static inline int p9_fid_put(struct p9_fid *fid)
+{
+ if (!fid || IS_ERR(fid))
+ return 0;
+
+ if (tracepoint_enabled(9p_fid_ref))
+ do_trace_9p_fid_put(fid);
+
+ if (!refcount_dec_and_test(&fid->count))
+ return 0;
+
+ return p9_client_clunk(fid);
+}
+
+void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status);
+
+int p9_parse_header(struct p9_fcall *pdu, int32_t *size, int8_t *type,
+ int16_t *tag, int rewind);
+int p9stat_read(struct p9_client *clnt, char *buf, int len,
+ struct p9_wstat *st);
+void p9stat_free(struct p9_wstat *stbuf);
+
+int p9_is_proto_dotu(struct p9_client *clnt);
+int p9_is_proto_dotl(struct p9_client *clnt);
+struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
+ const char *attr_name, u64 *attr_size);
+int p9_client_xattrcreate(struct p9_fid *fid, const char *name,
+ u64 attr_size, int flags);
+int p9_client_readlink(struct p9_fid *fid, char **target);
+
+int p9_client_init(void);
+void p9_client_exit(void);
+
+#endif /* NET_9P_CLIENT_H */
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
new file mode 100644
index 000000000..766ec07c9
--- /dev/null
+++ b/include/net/9p/transport.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Transport Definition
+ *
+ * Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
+ * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
+ */
+
+#ifndef NET_9P_TRANSPORT_H
+#define NET_9P_TRANSPORT_H
+
+#include <linux/module.h>
+
+#define P9_DEF_MIN_RESVPORT (665U)
+#define P9_DEF_MAX_RESVPORT (1023U)
+
+/**
+ * struct p9_trans_module - transport module interface
+ * @list: used to maintain a list of currently available transports
+ * @name: the human-readable name of the transport
+ * @maxsize: transport provided maximum packet size
+ * @pooled_rbuffers: currently only set for RDMA transport which pulls the
+ * response buffers from a shared pool, and accordingly
+ * we're less flexible when choosing the response message
+ * size in this case
+ * @def: set if this transport should be considered the default
+ * @create: member function to create a new connection on this transport
+ * @close: member function to discard a connection on this transport
+ * @request: member function to issue a request to the transport
+ * @cancel: member function to cancel a request (if it hasn't been sent)
+ * @cancelled: member function to notify that a cancelled request will not
+ * receive a reply
+ *
+ * This is the basic API for a transport module which is registered by the
+ * transport module with the 9P core network module and used by the client
+ * to instantiate a new connection on a transport.
+ *
+ * The transport module list is protected by v9fs_trans_lock.
+ */
+
+struct p9_trans_module {
+ struct list_head list;
+ char *name; /* name of transport */
+ int maxsize; /* max message size of transport */
+ bool pooled_rbuffers;
+ int def; /* this transport should be default */
+ struct module *owner;
+ int (*create)(struct p9_client *client,
+ const char *devname, char *args);
+ void (*close)(struct p9_client *client);
+ int (*request)(struct p9_client *client, struct p9_req_t *req);
+ int (*cancel)(struct p9_client *client, struct p9_req_t *req);
+ int (*cancelled)(struct p9_client *client, struct p9_req_t *req);
+ int (*zc_request)(struct p9_client *client, struct p9_req_t *req,
+ struct iov_iter *uidata, struct iov_iter *uodata,
+ int inlen, int outlen, int in_hdr_len);
+ int (*show_options)(struct seq_file *m, struct p9_client *client);
+};
+
+void v9fs_register_trans(struct p9_trans_module *m);
+void v9fs_unregister_trans(struct p9_trans_module *m);
+struct p9_trans_module *v9fs_get_trans_by_name(const char *s);
+struct p9_trans_module *v9fs_get_default_trans(void);
+void v9fs_put_trans(struct p9_trans_module *m);
+
+#define MODULE_ALIAS_9P(transport) \
+ MODULE_ALIAS("9p-" transport)
+
+#endif /* NET_9P_TRANSPORT_H */
diff --git a/include/net/Space.h b/include/net/Space.h
new file mode 100644
index 000000000..08ca9cef0
--- /dev/null
+++ b/include/net/Space.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* A unified ethernet device probe. This is the easiest way to have every
+ * ethernet adaptor have the name "eth[0123...]".
+ */
+
+struct net_device *hp100_probe(int unit);
+struct net_device *ultra_probe(int unit);
+struct net_device *wd_probe(int unit);
+struct net_device *ne_probe(int unit);
+struct net_device *fmv18x_probe(int unit);
+struct net_device *ni65_probe(int unit);
+struct net_device *sonic_probe(int unit);
+struct net_device *smc_init(int unit);
+struct net_device *cs89x0_probe(int unit);
+struct net_device *tc515_probe(int unit);
+struct net_device *lance_probe(int unit);
+struct net_device *cops_probe(int unit);
+
+/* Fibre Channel adapters */
+int iph5526_probe(struct net_device *dev);
diff --git a/include/net/act_api.h b/include/net/act_api.h
new file mode 100644
index 000000000..61f2ceb39
--- /dev/null
+++ b/include/net/act_api.h
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_ACT_API_H
+#define __NET_ACT_API_H
+
+/*
+ * Public action API for classifiers/qdiscs
+*/
+
+#include <linux/refcount.h>
+#include <net/flow_offload.h>
+#include <net/sch_generic.h>
+#include <net/pkt_sched.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+struct tcf_idrinfo {
+ struct mutex lock;
+ struct idr action_idr;
+ struct net *net;
+};
+
+struct tc_action_ops;
+
+struct tc_action {
+ const struct tc_action_ops *ops;
+ __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
+ struct tcf_idrinfo *idrinfo;
+
+ u32 tcfa_index;
+ refcount_t tcfa_refcnt;
+ atomic_t tcfa_bindcnt;
+ int tcfa_action;
+ struct tcf_t tcfa_tm;
+ struct gnet_stats_basic_sync tcfa_bstats;
+ struct gnet_stats_basic_sync tcfa_bstats_hw;
+ struct gnet_stats_queue tcfa_qstats;
+ struct net_rate_estimator __rcu *tcfa_rate_est;
+ spinlock_t tcfa_lock;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats_hw;
+ struct gnet_stats_queue __percpu *cpu_qstats;
+ struct tc_cookie __rcu *act_cookie;
+ struct tcf_chain __rcu *goto_chain;
+ u32 tcfa_flags;
+ u8 hw_stats;
+ u8 used_hw_stats;
+ bool used_hw_stats_valid;
+ u32 in_hw_count;
+};
+#define tcf_index common.tcfa_index
+#define tcf_refcnt common.tcfa_refcnt
+#define tcf_bindcnt common.tcfa_bindcnt
+#define tcf_action common.tcfa_action
+#define tcf_tm common.tcfa_tm
+#define tcf_bstats common.tcfa_bstats
+#define tcf_qstats common.tcfa_qstats
+#define tcf_rate_est common.tcfa_rate_est
+#define tcf_lock common.tcfa_lock
+
+#define TCA_ACT_HW_STATS_ANY (TCA_ACT_HW_STATS_IMMEDIATE | \
+ TCA_ACT_HW_STATS_DELAYED)
+
+/* Reserve 16 bits for user-space. See TCA_ACT_FLAGS_NO_PERCPU_STATS. */
+#define TCA_ACT_FLAGS_USER_BITS 16
+#define TCA_ACT_FLAGS_USER_MASK 0xffff
+#define TCA_ACT_FLAGS_POLICE (1U << TCA_ACT_FLAGS_USER_BITS)
+#define TCA_ACT_FLAGS_BIND (1U << (TCA_ACT_FLAGS_USER_BITS + 1))
+#define TCA_ACT_FLAGS_REPLACE (1U << (TCA_ACT_FLAGS_USER_BITS + 2))
+#define TCA_ACT_FLAGS_NO_RTNL (1U << (TCA_ACT_FLAGS_USER_BITS + 3))
+
+/* Update lastuse only if needed, to avoid dirtying a cache line.
+ * We use a temp variable to avoid fetching jiffies twice.
+ */
+static inline void tcf_lastuse_update(struct tcf_t *tm)
+{
+ unsigned long now = jiffies;
+
+ if (tm->lastuse != now)
+ tm->lastuse = now;
+ if (unlikely(!tm->firstuse))
+ tm->firstuse = now;
+}
+
+static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm)
+{
+ dtm->install = jiffies_to_clock_t(jiffies - stm->install);
+ dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse);
+ dtm->firstuse = stm->firstuse ?
+ jiffies_to_clock_t(jiffies - stm->firstuse) : 0;
+ dtm->expires = jiffies_to_clock_t(stm->expires);
+}
+
+static inline enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
+{
+ if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
+ return FLOW_ACTION_HW_STATS_DONT_CARE;
+ else if (!hw_stats)
+ return FLOW_ACTION_HW_STATS_DISABLED;
+
+ return hw_stats;
+}
+
+#ifdef CONFIG_NET_CLS_ACT
+
+#define ACT_P_CREATED 1
+#define ACT_P_DELETED 1
+
+typedef void (*tc_action_priv_destructor)(void *priv);
+
+struct tc_action_ops {
+ struct list_head head;
+ char kind[IFNAMSIZ];
+ enum tca_id id; /* identifier should match kind */
+ unsigned int net_id;
+ size_t size;
+ struct module *owner;
+ int (*act)(struct sk_buff *, const struct tc_action *,
+ struct tcf_result *); /* called under RCU BH lock*/
+ int (*dump)(struct sk_buff *, struct tc_action *, int, int);
+ void (*cleanup)(struct tc_action *);
+ int (*lookup)(struct net *net, struct tc_action **a, u32 index);
+ int (*init)(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action **act,
+ struct tcf_proto *tp,
+ u32 flags, struct netlink_ext_ack *extack);
+ int (*walk)(struct net *, struct sk_buff *,
+ struct netlink_callback *, int,
+ const struct tc_action_ops *,
+ struct netlink_ext_ack *);
+ void (*stats_update)(struct tc_action *, u64, u64, u64, u64, bool);
+ size_t (*get_fill_size)(const struct tc_action *act);
+ struct net_device *(*get_dev)(const struct tc_action *a,
+ tc_action_priv_destructor *destructor);
+ struct psample_group *
+ (*get_psample_group)(const struct tc_action *a,
+ tc_action_priv_destructor *destructor);
+ int (*offload_act_setup)(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack);
+};
+
+struct tc_action_net {
+ struct tcf_idrinfo *idrinfo;
+ const struct tc_action_ops *ops;
+};
+
+static inline
+int tc_action_net_init(struct net *net, struct tc_action_net *tn,
+ const struct tc_action_ops *ops)
+{
+ int err = 0;
+
+ tn->idrinfo = kmalloc(sizeof(*tn->idrinfo), GFP_KERNEL);
+ if (!tn->idrinfo)
+ return -ENOMEM;
+ tn->ops = ops;
+ tn->idrinfo->net = net;
+ mutex_init(&tn->idrinfo->lock);
+ idr_init(&tn->idrinfo->action_idr);
+ return err;
+}
+
+void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
+ struct tcf_idrinfo *idrinfo);
+
+static inline void tc_action_net_exit(struct list_head *net_list,
+ unsigned int id)
+{
+ struct net *net;
+
+ rtnl_lock();
+ list_for_each_entry(net, net_list, exit_list) {
+ struct tc_action_net *tn = net_generic(net, id);
+
+ tcf_idrinfo_destroy(tn->ops, tn->idrinfo);
+ kfree(tn->idrinfo);
+ }
+ rtnl_unlock();
+}
+
+int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
+ struct netlink_callback *cb, int type,
+ const struct tc_action_ops *ops,
+ struct netlink_ext_ack *extack);
+int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
+int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
+ struct tc_action **a, const struct tc_action_ops *ops,
+ int bind, bool cpustats, u32 flags);
+int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
+ struct nlattr *est, struct tc_action **a,
+ const struct tc_action_ops *ops, int bind,
+ u32 flags);
+void tcf_idr_insert_many(struct tc_action *actions[]);
+void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
+int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+ struct tc_action **a, int bind);
+int tcf_idr_release(struct tc_action *a, bool bind);
+
+int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
+int tcf_unregister_action(struct tc_action_ops *a,
+ struct pernet_operations *ops);
+int tcf_action_destroy(struct tc_action *actions[], int bind);
+int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
+ int nr_actions, struct tcf_result *res);
+int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
+ struct nlattr *est,
+ struct tc_action *actions[], int init_res[], size_t *attr_size,
+ u32 flags, u32 fl_flags, struct netlink_ext_ack *extack);
+struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police,
+ bool rtnl_held,
+ struct netlink_ext_ack *extack);
+struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ struct nlattr *nla, struct nlattr *est,
+ struct tc_action_ops *a_o, int *init_res,
+ u32 flags, struct netlink_ext_ack *extack);
+int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
+ int ref, bool terse);
+int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
+int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
+
+static inline void tcf_action_update_bstats(struct tc_action *a,
+ struct sk_buff *skb)
+{
+ if (likely(a->cpu_bstats)) {
+ bstats_update(this_cpu_ptr(a->cpu_bstats), skb);
+ return;
+ }
+ spin_lock(&a->tcfa_lock);
+ bstats_update(&a->tcfa_bstats, skb);
+ spin_unlock(&a->tcfa_lock);
+}
+
+static inline void tcf_action_inc_drop_qstats(struct tc_action *a)
+{
+ if (likely(a->cpu_qstats)) {
+ qstats_drop_inc(this_cpu_ptr(a->cpu_qstats));
+ return;
+ }
+ spin_lock(&a->tcfa_lock);
+ qstats_drop_inc(&a->tcfa_qstats);
+ spin_unlock(&a->tcfa_lock);
+}
+
+static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a)
+{
+ if (likely(a->cpu_qstats)) {
+ qstats_overlimit_inc(this_cpu_ptr(a->cpu_qstats));
+ return;
+ }
+ spin_lock(&a->tcfa_lock);
+ qstats_overlimit_inc(&a->tcfa_qstats);
+ spin_unlock(&a->tcfa_lock);
+}
+
+void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
+ u64 drops, bool hw);
+int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
+
+int tcf_action_update_hw_stats(struct tc_action *action);
+int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
+ void *cb_priv, bool add);
+int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
+ struct tcf_chain **handle,
+ struct netlink_ext_ack *newchain);
+struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
+ struct tcf_chain *newchain);
+
+#ifdef CONFIG_INET
+DECLARE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
+#endif
+
+int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
+
+#else /* !CONFIG_NET_CLS_ACT */
+
+static inline int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
+ void *cb_priv, bool add) {
+ return 0;
+}
+
+#endif /* CONFIG_NET_CLS_ACT */
+
+static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
+ u64 packets, u64 drops,
+ u64 lastuse, bool hw)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (!a->ops->stats_update)
+ return;
+
+ a->ops->stats_update(a, bytes, packets, drops, lastuse, hw);
+#endif
+}
+
+
+#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
new file mode 100644
index 000000000..86eb2aba1
--- /dev/null
+++ b/include/net/addrconf.h
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ADDRCONF_H
+#define _ADDRCONF_H
+
+#define MAX_RTR_SOLICITATIONS -1 /* unlimited */
+#define RTR_SOLICITATION_INTERVAL (4*HZ)
+#define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */
+
+#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */
+
+#define TEMP_VALID_LIFETIME (7*86400)
+#define TEMP_PREFERRED_LIFETIME (86400)
+#define REGEN_MAX_RETRY (3)
+#define MAX_DESYNC_FACTOR (600)
+
+#define ADDR_CHECK_FREQUENCY (120*HZ)
+
+#define IPV6_MAX_ADDRESSES 16
+
+#define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ / 50 : 1)
+#define ADDRCONF_TIMER_FUZZ (HZ / 4)
+#define ADDRCONF_TIMER_FUZZ_MAX (HZ)
+
+#define ADDRCONF_NOTIFY_PRIORITY 0
+
+#include <linux/in.h>
+#include <linux/in6.h>
+
+struct prefix_info {
+ __u8 type;
+ __u8 length;
+ __u8 prefix_len;
+
+ union __packed {
+ __u8 flags;
+ struct __packed {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ __u8 onlink : 1,
+ autoconf : 1,
+ reserved : 6;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 reserved : 6,
+ autoconf : 1,
+ onlink : 1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ };
+ };
+ __be32 valid;
+ __be32 prefered;
+ __be32 reserved2;
+
+ struct in6_addr prefix;
+};
+
+/* rfc4861 4.6.2: IPv6 PIO is 32 bytes in size */
+static_assert(sizeof(struct prefix_info) == 32);
+
+#include <linux/ipv6.h>
+#include <linux/netdevice.h>
+#include <net/if_inet6.h>
+#include <net/ipv6.h>
+
+struct in6_validator_info {
+ struct in6_addr i6vi_addr;
+ struct inet6_dev *i6vi_dev;
+ struct netlink_ext_ack *extack;
+};
+
+struct ifa6_config {
+ const struct in6_addr *pfx;
+ unsigned int plen;
+
+ u8 ifa_proto;
+
+ const struct in6_addr *peer_pfx;
+
+ u32 rt_priority;
+ u32 ifa_flags;
+ u32 preferred_lft;
+ u32 valid_lft;
+ u16 scope;
+};
+
+int addrconf_init(void);
+void addrconf_cleanup(void);
+
+int addrconf_add_ifaddr(struct net *net, void __user *arg);
+int addrconf_del_ifaddr(struct net *net, void __user *arg);
+int addrconf_set_dstaddr(struct net *net, void __user *arg);
+
+int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, int strict);
+int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, bool skip_dev_check,
+ int strict, u32 banned_flags);
+
+#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
+#endif
+
+int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs,
+ unsigned char nsegs);
+
+bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
+ const unsigned int prefix_len,
+ struct net_device *dev);
+
+int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev);
+
+struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr,
+ struct net_device *dev);
+
+struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
+ const struct in6_addr *addr,
+ struct net_device *dev, int strict);
+
+int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
+ const struct in6_addr *daddr, unsigned int srcprefs,
+ struct in6_addr *saddr);
+int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
+ u32 banned_flags);
+bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
+ bool match_wildcard);
+bool inet_rcv_saddr_any(const struct sock *sk);
+void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
+void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
+
+void addrconf_add_linklocal(struct inet6_dev *idev,
+ const struct in6_addr *addr, u32 flags);
+
+int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
+ const struct prefix_info *pinfo,
+ struct inet6_dev *in6_dev,
+ const struct in6_addr *addr, int addr_type,
+ u32 addr_flags, bool sllao, bool tokenized,
+ __u32 valid_lft, u32 prefered_lft);
+
+static inline void addrconf_addr_eui48_base(u8 *eui, const char *const addr)
+{
+ memcpy(eui, addr, 3);
+ eui[3] = 0xFF;
+ eui[4] = 0xFE;
+ memcpy(eui + 5, addr + 3, 3);
+}
+
+static inline void addrconf_addr_eui48(u8 *eui, const char *const addr)
+{
+ addrconf_addr_eui48_base(eui, addr);
+ eui[0] ^= 2;
+}
+
+static inline int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
+{
+ if (dev->addr_len != ETH_ALEN)
+ return -1;
+
+ /*
+ * The zSeries OSA network cards can be shared among various
+ * OS instances, but the OSA cards have only one MAC address.
+ * This leads to duplicate address conflicts in conjunction
+ * with IPv6 if more than one instance uses the same card.
+ *
+ * The driver for these cards can deliver a unique 16-bit
+ * identifier for each instance sharing the same card. It is
+ * placed instead of 0xFFFE in the interface identifier. The
+ * "u" bit of the interface identifier is not inverted in this
+ * case. Hence the resulting interface identifier has local
+ * scope according to RFC2373.
+ */
+
+ addrconf_addr_eui48_base(eui, dev->dev_addr);
+
+ if (dev->dev_id) {
+ eui[3] = (dev->dev_id >> 8) & 0xFF;
+ eui[4] = dev->dev_id & 0xFF;
+ } else {
+ eui[0] ^= 2;
+ }
+
+ return 0;
+}
+
+static inline unsigned long addrconf_timeout_fixup(u32 timeout,
+ unsigned int unit)
+{
+ if (timeout == 0xffffffff)
+ return ~0UL;
+
+ /*
+ * Avoid arithmetic overflow.
+ * Assuming unit is constant and non-zero, this "if" statement
+ * will go away on 64bit archs.
+ */
+ if (0xfffffffe > LONG_MAX / unit && timeout > LONG_MAX / unit)
+ return LONG_MAX / unit;
+
+ return timeout;
+}
+
+static inline int addrconf_finite_timeout(unsigned long timeout)
+{
+ return ~timeout;
+}
+
+/*
+ * IPv6 Address Label subsystem (addrlabel.c)
+ */
+int ipv6_addr_label_init(void);
+void ipv6_addr_label_cleanup(void);
+int ipv6_addr_label_rtnl_register(void);
+u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr,
+ int type, int ifindex);
+
+/*
+ * multicast prototypes (mcast.c)
+ */
+static inline bool ipv6_mc_may_pull(struct sk_buff *skb,
+ unsigned int len)
+{
+ if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
+ return false;
+
+ return pskb_may_pull(skb, len);
+}
+
+int ipv6_sock_mc_join(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+void __ipv6_sock_mc_close(struct sock *sk);
+void ipv6_sock_mc_close(struct sock *sk);
+bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
+ const struct in6_addr *src_addr);
+
+int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr);
+int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr);
+int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr);
+void ipv6_mc_up(struct inet6_dev *idev);
+void ipv6_mc_down(struct inet6_dev *idev);
+void ipv6_mc_unmap(struct inet6_dev *idev);
+void ipv6_mc_remap(struct inet6_dev *idev);
+void ipv6_mc_init_dev(struct inet6_dev *idev);
+void ipv6_mc_destroy_dev(struct inet6_dev *idev);
+int ipv6_mc_check_mld(struct sk_buff *skb);
+void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp);
+
+bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
+ const struct in6_addr *src_addr);
+
+void ipv6_mc_dad_complete(struct inet6_dev *idev);
+
+/*
+ * identify MLD packets for MLD filter exceptions
+ */
+static inline bool ipv6_is_mld(struct sk_buff *skb, int nexthdr, int offset)
+{
+ struct icmp6hdr *hdr;
+
+ if (nexthdr != IPPROTO_ICMPV6 ||
+ !pskb_network_may_pull(skb, offset + sizeof(struct icmp6hdr)))
+ return false;
+
+ hdr = (struct icmp6hdr *)(skb_network_header(skb) + offset);
+
+ switch (hdr->icmp6_type) {
+ case ICMPV6_MGM_QUERY:
+ case ICMPV6_MGM_REPORT:
+ case ICMPV6_MGM_REDUCTION:
+ case ICMPV6_MLD2_REPORT:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+void addrconf_prefix_rcv(struct net_device *dev,
+ u8 *opt, int len, bool sllao);
+
+/*
+ * anycast prototypes (anycast.c)
+ */
+int ipv6_sock_ac_join(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+void __ipv6_sock_ac_close(struct sock *sk);
+void ipv6_sock_ac_close(struct sock *sk);
+
+int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr);
+int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
+void ipv6_ac_destroy_dev(struct inet6_dev *idev);
+bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
+ const struct in6_addr *addr);
+bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
+ const struct in6_addr *addr);
+int ipv6_anycast_init(void);
+void ipv6_anycast_cleanup(void);
+
+/* Device notifier */
+int register_inet6addr_notifier(struct notifier_block *nb);
+int unregister_inet6addr_notifier(struct notifier_block *nb);
+int inet6addr_notifier_call_chain(unsigned long val, void *v);
+
+int register_inet6addr_validator_notifier(struct notifier_block *nb);
+int unregister_inet6addr_validator_notifier(struct notifier_block *nb);
+int inet6addr_validator_notifier_call_chain(unsigned long val, void *v);
+
+void inet6_netconf_notify_devconf(struct net *net, int event, int type,
+ int ifindex, struct ipv6_devconf *devconf);
+
+/**
+ * __in6_dev_get - get inet6_dev pointer from netdevice
+ * @dev: network device
+ *
+ * Caller must hold rcu_read_lock or RTNL, because this function
+ * does not take a reference on the inet6_dev.
+ */
+static inline struct inet6_dev *__in6_dev_get(const struct net_device *dev)
+{
+ return rcu_dereference_rtnl(dev->ip6_ptr);
+}
+
+/**
+ * __in6_dev_stats_get - get inet6_dev pointer for stats
+ * @dev: network device
+ * @skb: skb for original incoming interface if neeeded
+ *
+ * Caller must hold rcu_read_lock or RTNL, because this function
+ * does not take a reference on the inet6_dev.
+ */
+static inline struct inet6_dev *__in6_dev_stats_get(const struct net_device *dev,
+ const struct sk_buff *skb)
+{
+ if (netif_is_l3_master(dev))
+ dev = dev_get_by_index_rcu(dev_net(dev), inet6_iif(skb));
+ return __in6_dev_get(dev);
+}
+
+/**
+ * __in6_dev_get_safely - get inet6_dev pointer from netdevice
+ * @dev: network device
+ *
+ * This is a safer version of __in6_dev_get
+ */
+static inline struct inet6_dev *__in6_dev_get_safely(const struct net_device *dev)
+{
+ if (likely(dev))
+ return rcu_dereference_rtnl(dev->ip6_ptr);
+ else
+ return NULL;
+}
+
+/**
+ * in6_dev_get - get inet6_dev pointer from netdevice
+ * @dev: network device
+ *
+ * This version can be used in any context, and takes a reference
+ * on the inet6_dev. Callers must use in6_dev_put() later to
+ * release this reference.
+ */
+static inline struct inet6_dev *in6_dev_get(const struct net_device *dev)
+{
+ struct inet6_dev *idev;
+
+ rcu_read_lock();
+ idev = rcu_dereference(dev->ip6_ptr);
+ if (idev)
+ refcount_inc(&idev->refcnt);
+ rcu_read_unlock();
+ return idev;
+}
+
+static inline struct neigh_parms *__in6_dev_nd_parms_get_rcu(const struct net_device *dev)
+{
+ struct inet6_dev *idev = __in6_dev_get(dev);
+
+ return idev ? idev->nd_parms : NULL;
+}
+
+void in6_dev_finish_destroy(struct inet6_dev *idev);
+
+static inline void in6_dev_put(struct inet6_dev *idev)
+{
+ if (refcount_dec_and_test(&idev->refcnt))
+ in6_dev_finish_destroy(idev);
+}
+
+static inline void in6_dev_put_clear(struct inet6_dev **pidev)
+{
+ struct inet6_dev *idev = *pidev;
+
+ if (idev) {
+ in6_dev_put(idev);
+ *pidev = NULL;
+ }
+}
+
+static inline void __in6_dev_put(struct inet6_dev *idev)
+{
+ refcount_dec(&idev->refcnt);
+}
+
+static inline void in6_dev_hold(struct inet6_dev *idev)
+{
+ refcount_inc(&idev->refcnt);
+}
+
+/* called with rcu_read_lock held */
+static inline bool ip6_ignore_linkdown(const struct net_device *dev)
+{
+ const struct inet6_dev *idev = __in6_dev_get(dev);
+
+ if (unlikely(!idev))
+ return true;
+
+ return !!idev->cnf.ignore_routes_with_linkdown;
+}
+
+void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
+
+static inline void in6_ifa_put(struct inet6_ifaddr *ifp)
+{
+ if (refcount_dec_and_test(&ifp->refcnt))
+ inet6_ifa_finish_destroy(ifp);
+}
+
+static inline void __in6_ifa_put(struct inet6_ifaddr *ifp)
+{
+ refcount_dec(&ifp->refcnt);
+}
+
+static inline void in6_ifa_hold(struct inet6_ifaddr *ifp)
+{
+ refcount_inc(&ifp->refcnt);
+}
+
+
+/*
+ * compute link-local solicited-node multicast address
+ */
+
+static inline void addrconf_addr_solict_mult(const struct in6_addr *addr,
+ struct in6_addr *solicited)
+{
+ ipv6_addr_set(solicited,
+ htonl(0xFF020000), 0,
+ htonl(0x1),
+ htonl(0xFF000000) | addr->s6_addr32[3]);
+}
+
+static inline bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ __be64 *p = (__force __be64 *)addr;
+ return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(1))) == 0UL;
+#else
+ return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
+ addr->s6_addr32[1] | addr->s6_addr32[2] |
+ (addr->s6_addr32[3] ^ htonl(0x00000001))) == 0;
+#endif
+}
+
+static inline bool ipv6_addr_is_ll_all_routers(const struct in6_addr *addr)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ __be64 *p = (__force __be64 *)addr;
+ return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(2))) == 0UL;
+#else
+ return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
+ addr->s6_addr32[1] | addr->s6_addr32[2] |
+ (addr->s6_addr32[3] ^ htonl(0x00000002))) == 0;
+#endif
+}
+
+static inline bool ipv6_addr_is_isatap(const struct in6_addr *addr)
+{
+ return (addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE);
+}
+
+static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ __be64 *p = (__force __be64 *)addr;
+ return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) |
+ ((p[1] ^ cpu_to_be64(0x00000001ff000000UL)) &
+ cpu_to_be64(0xffffffffff000000UL))) == 0UL;
+#else
+ return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
+ addr->s6_addr32[1] |
+ (addr->s6_addr32[2] ^ htonl(0x00000001)) |
+ (addr->s6_addr[12] ^ 0xff)) == 0;
+#endif
+}
+
+static inline bool ipv6_addr_is_all_snoopers(const struct in6_addr *addr)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ __be64 *p = (__force __be64 *)addr;
+
+ return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) |
+ (p[1] ^ cpu_to_be64(0x6a))) == 0UL;
+#else
+ return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
+ addr->s6_addr32[1] | addr->s6_addr32[2] |
+ (addr->s6_addr32[3] ^ htonl(0x0000006a))) == 0;
+#endif
+}
+
+#ifdef CONFIG_PROC_FS
+int if6_proc_init(void);
+void if6_proc_exit(void);
+#endif
+
+#endif
diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h
new file mode 100644
index 000000000..c16be82b9
--- /dev/null
+++ b/include/net/af_ieee802154.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.15.4 interface for userspace
+ *
+ * Copyright 2007, 2008 Siemens AG
+ *
+ * Written by:
+ * Sergey Lapin <slapin@ossfans.org>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ */
+
+#ifndef _AF_IEEE802154_H
+#define _AF_IEEE802154_H
+
+#include <linux/socket.h> /* for sa_family_t */
+
+enum {
+ IEEE802154_ADDR_NONE = 0x0,
+ /* RESERVED = 0x01, */
+ IEEE802154_ADDR_SHORT = 0x2, /* 16-bit address + PANid */
+ IEEE802154_ADDR_LONG = 0x3, /* 64-bit address + PANid */
+};
+
+/* address length, octets */
+#define IEEE802154_ADDR_LEN 8
+
+struct ieee802154_addr_sa {
+ int addr_type;
+ u16 pan_id;
+ union {
+ u8 hwaddr[IEEE802154_ADDR_LEN];
+ u16 short_addr;
+ };
+};
+
+#define IEEE802154_PANID_BROADCAST 0xffff
+#define IEEE802154_ADDR_BROADCAST 0xffff
+#define IEEE802154_ADDR_UNDEF 0xfffe
+
+struct sockaddr_ieee802154 {
+ sa_family_t family; /* AF_IEEE802154 */
+ struct ieee802154_addr_sa addr;
+};
+
+/* get/setsockopt */
+#define SOL_IEEE802154 0
+
+#define WPAN_WANTACK 0
+#define WPAN_SECURITY 1
+#define WPAN_SECURITY_LEVEL 2
+#define WPAN_WANTLQI 3
+
+#define WPAN_SECURITY_DEFAULT 0
+#define WPAN_SECURITY_OFF 1
+#define WPAN_SECURITY_ON 2
+
+#define WPAN_SECURITY_LEVEL_DEFAULT (-1)
+
+#endif
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
new file mode 100644
index 000000000..b69ca6959
--- /dev/null
+++ b/include/net/af_rxrpc.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* RxRPC kernel service interface definitions
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _NET_RXRPC_H
+#define _NET_RXRPC_H
+
+#include <linux/rxrpc.h>
+#include <linux/ktime.h>
+
+struct key;
+struct sock;
+struct socket;
+struct rxrpc_call;
+
+enum rxrpc_interruptibility {
+ RXRPC_INTERRUPTIBLE, /* Call is interruptible */
+ RXRPC_PREINTERRUPTIBLE, /* Call can be cancelled whilst waiting for a slot */
+ RXRPC_UNINTERRUPTIBLE, /* Call should not be interruptible at all */
+};
+
+/*
+ * Debug ID counter for tracing.
+ */
+extern atomic_t rxrpc_debug_id;
+
+typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *,
+ unsigned long);
+typedef void (*rxrpc_notify_end_tx_t)(struct sock *, struct rxrpc_call *,
+ unsigned long);
+typedef void (*rxrpc_notify_new_call_t)(struct sock *, struct rxrpc_call *,
+ unsigned long);
+typedef void (*rxrpc_discard_new_call_t)(struct rxrpc_call *, unsigned long);
+typedef void (*rxrpc_user_attach_call_t)(struct rxrpc_call *, unsigned long);
+
+void rxrpc_kernel_new_call_notification(struct socket *,
+ rxrpc_notify_new_call_t,
+ rxrpc_discard_new_call_t);
+struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
+ struct sockaddr_rxrpc *,
+ struct key *,
+ unsigned long,
+ s64,
+ gfp_t,
+ rxrpc_notify_rx_t,
+ bool,
+ enum rxrpc_interruptibility,
+ unsigned int);
+int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
+ struct msghdr *, size_t,
+ rxrpc_notify_end_tx_t);
+int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
+ struct iov_iter *, size_t *, bool, u32 *, u16 *);
+bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
+ u32, int, const char *);
+void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
+void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
+ struct sockaddr_rxrpc *);
+bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *);
+int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
+ rxrpc_user_attach_call_t, unsigned long, gfp_t,
+ unsigned int);
+void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
+bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
+u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
+bool rxrpc_kernel_call_is_complete(struct rxrpc_call *);
+void rxrpc_kernel_set_max_life(struct socket *, struct rxrpc_call *,
+ unsigned long);
+
+int rxrpc_sock_set_min_security_level(struct sock *sk, unsigned int val);
+
+#endif /* _NET_RXRPC_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
new file mode 100644
index 000000000..55ca217c6
--- /dev/null
+++ b/include/net/af_unix.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_NET_AFUNIX_H
+#define __LINUX_NET_AFUNIX_H
+
+#include <linux/socket.h>
+#include <linux/un.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
+#include <net/sock.h>
+
+void unix_inflight(struct user_struct *user, struct file *fp);
+void unix_notinflight(struct user_struct *user, struct file *fp);
+void unix_destruct_scm(struct sk_buff *skb);
+void unix_gc(void);
+void wait_for_unix_gc(void);
+struct sock *unix_get_socket(struct file *filp);
+struct sock *unix_peer_get(struct sock *sk);
+
+#define UNIX_HASH_MOD (256 - 1)
+#define UNIX_HASH_SIZE (256 * 2)
+#define UNIX_HASH_BITS 8
+
+extern unsigned int unix_tot_inflight;
+
+struct unix_address {
+ refcount_t refcnt;
+ int len;
+ struct sockaddr_un name[];
+};
+
+struct unix_skb_parms {
+ struct pid *pid; /* Skb credentials */
+ kuid_t uid;
+ kgid_t gid;
+ struct scm_fp_list *fp; /* Passed files */
+#ifdef CONFIG_SECURITY_NETWORK
+ u32 secid; /* Security ID */
+#endif
+ u32 consumed;
+} __randomize_layout;
+
+struct scm_stat {
+ atomic_t nr_fds;
+};
+
+#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
+
+#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
+#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
+#define unix_state_lock_nested(s) \
+ spin_lock_nested(&unix_sk(s)->lock, \
+ SINGLE_DEPTH_NESTING)
+
+/* The AF_UNIX socket */
+struct unix_sock {
+ /* WARNING: sk has to be the first member */
+ struct sock sk;
+ struct unix_address *addr;
+ struct path path;
+ struct mutex iolock, bindlock;
+ struct sock *peer;
+ struct list_head link;
+ atomic_long_t inflight;
+ spinlock_t lock;
+ unsigned long gc_flags;
+#define UNIX_GC_CANDIDATE 0
+#define UNIX_GC_MAYBE_CYCLE 1
+ struct socket_wq peer_wq;
+ wait_queue_entry_t peer_wake;
+ struct scm_stat scm_stat;
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ struct sk_buff *oob_skb;
+#endif
+};
+
+static inline struct unix_sock *unix_sk(const struct sock *sk)
+{
+ return (struct unix_sock *)sk;
+}
+#define unix_peer(sk) (unix_sk(sk)->peer)
+
+#define peer_wait peer_wq.wait
+
+long unix_inq_len(struct sock *sk);
+long unix_outq_len(struct sock *sk);
+
+int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
+ int flags);
+int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
+ int flags);
+#ifdef CONFIG_SYSCTL
+int unix_sysctl_register(struct net *net);
+void unix_sysctl_unregister(struct net *net);
+#else
+static inline int unix_sysctl_register(struct net *net) { return 0; }
+static inline void unix_sysctl_unregister(struct net *net) {}
+#endif
+
+#ifdef CONFIG_BPF_SYSCALL
+extern struct proto unix_dgram_proto;
+extern struct proto unix_stream_proto;
+
+int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+void __init unix_bpf_build_proto(void);
+#else
+static inline void __init unix_bpf_build_proto(void)
+{}
+#endif
+#endif
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
new file mode 100644
index 000000000..568a87c5e
--- /dev/null
+++ b/include/net/af_vsock.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * VMware vSockets Driver
+ *
+ * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
+ */
+
+#ifndef __AF_VSOCK_H__
+#define __AF_VSOCK_H__
+
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <net/sock.h>
+#include <uapi/linux/vm_sockets.h>
+
+#include "vsock_addr.h"
+
+#define LAST_RESERVED_PORT 1023
+
+#define VSOCK_HASH_SIZE 251
+extern struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
+extern struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
+extern spinlock_t vsock_table_lock;
+
+#define vsock_sk(__sk) ((struct vsock_sock *)__sk)
+#define sk_vsock(__vsk) (&(__vsk)->sk)
+
+struct vsock_sock {
+ /* sk must be the first member. */
+ struct sock sk;
+ const struct vsock_transport *transport;
+ struct sockaddr_vm local_addr;
+ struct sockaddr_vm remote_addr;
+ /* Links for the global tables of bound and connected sockets. */
+ struct list_head bound_table;
+ struct list_head connected_table;
+ /* Accessed without the socket lock held. This means it can never be
+ * modified outsided of socket create or destruct.
+ */
+ bool trusted;
+ bool cached_peer_allow_dgram; /* Dgram communication allowed to
+ * cached peer?
+ */
+ u32 cached_peer; /* Context ID of last dgram destination check. */
+ const struct cred *owner;
+ /* Rest are SOCK_STREAM only. */
+ long connect_timeout;
+ /* Listening socket that this came from. */
+ struct sock *listener;
+ /* Used for pending list and accept queue during connection handshake.
+ * The listening socket is the head for both lists. Sockets created
+ * for connection requests are placed in the pending list until they
+ * are connected, at which point they are put in the accept queue list
+ * so they can be accepted in accept(). If accept() cannot accept the
+ * connection, it is marked as rejected so the cleanup function knows
+ * to clean up the socket.
+ */
+ struct list_head pending_links;
+ struct list_head accept_queue;
+ bool rejected;
+ struct delayed_work connect_work;
+ struct delayed_work pending_work;
+ struct delayed_work close_work;
+ bool close_work_scheduled;
+ u32 peer_shutdown;
+ bool sent_request;
+ bool ignore_connecting_rst;
+
+ /* Protected by lock_sock(sk) */
+ u64 buffer_size;
+ u64 buffer_min_size;
+ u64 buffer_max_size;
+
+ /* Private to transport. */
+ void *trans;
+};
+
+s64 vsock_stream_has_data(struct vsock_sock *vsk);
+s64 vsock_stream_has_space(struct vsock_sock *vsk);
+struct sock *vsock_create_connected(struct sock *parent);
+void vsock_data_ready(struct sock *sk);
+
+/**** TRANSPORT ****/
+
+struct vsock_transport_recv_notify_data {
+ u64 data1; /* Transport-defined. */
+ u64 data2; /* Transport-defined. */
+ bool notify_on_block;
+};
+
+struct vsock_transport_send_notify_data {
+ u64 data1; /* Transport-defined. */
+ u64 data2; /* Transport-defined. */
+};
+
+/* Transport features flags */
+/* Transport provides host->guest communication */
+#define VSOCK_TRANSPORT_F_H2G 0x00000001
+/* Transport provides guest->host communication */
+#define VSOCK_TRANSPORT_F_G2H 0x00000002
+/* Transport provides DGRAM communication */
+#define VSOCK_TRANSPORT_F_DGRAM 0x00000004
+/* Transport provides local (loopback) communication */
+#define VSOCK_TRANSPORT_F_LOCAL 0x00000008
+
+struct vsock_transport {
+ struct module *module;
+
+ /* Initialize/tear-down socket. */
+ int (*init)(struct vsock_sock *, struct vsock_sock *);
+ void (*destruct)(struct vsock_sock *);
+ void (*release)(struct vsock_sock *);
+
+ /* Cancel all pending packets sent on vsock. */
+ int (*cancel_pkt)(struct vsock_sock *vsk);
+
+ /* Connections. */
+ int (*connect)(struct vsock_sock *);
+
+ /* DGRAM. */
+ int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *);
+ int (*dgram_dequeue)(struct vsock_sock *vsk, struct msghdr *msg,
+ size_t len, int flags);
+ int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *,
+ struct msghdr *, size_t len);
+ bool (*dgram_allow)(u32 cid, u32 port);
+
+ /* STREAM. */
+ /* TODO: stream_bind() */
+ ssize_t (*stream_dequeue)(struct vsock_sock *, struct msghdr *,
+ size_t len, int flags);
+ ssize_t (*stream_enqueue)(struct vsock_sock *, struct msghdr *,
+ size_t len);
+ s64 (*stream_has_data)(struct vsock_sock *);
+ s64 (*stream_has_space)(struct vsock_sock *);
+ u64 (*stream_rcvhiwat)(struct vsock_sock *);
+ bool (*stream_is_active)(struct vsock_sock *);
+ bool (*stream_allow)(u32 cid, u32 port);
+ int (*set_rcvlowat)(struct vsock_sock *vsk, int val);
+
+ /* SEQ_PACKET. */
+ ssize_t (*seqpacket_dequeue)(struct vsock_sock *vsk, struct msghdr *msg,
+ int flags);
+ int (*seqpacket_enqueue)(struct vsock_sock *vsk, struct msghdr *msg,
+ size_t len);
+ bool (*seqpacket_allow)(u32 remote_cid);
+ u32 (*seqpacket_has_data)(struct vsock_sock *vsk);
+
+ /* Notification. */
+ int (*notify_poll_in)(struct vsock_sock *, size_t, bool *);
+ int (*notify_poll_out)(struct vsock_sock *, size_t, bool *);
+ int (*notify_recv_init)(struct vsock_sock *, size_t,
+ struct vsock_transport_recv_notify_data *);
+ int (*notify_recv_pre_block)(struct vsock_sock *, size_t,
+ struct vsock_transport_recv_notify_data *);
+ int (*notify_recv_pre_dequeue)(struct vsock_sock *, size_t,
+ struct vsock_transport_recv_notify_data *);
+ int (*notify_recv_post_dequeue)(struct vsock_sock *, size_t,
+ ssize_t, bool, struct vsock_transport_recv_notify_data *);
+ int (*notify_send_init)(struct vsock_sock *,
+ struct vsock_transport_send_notify_data *);
+ int (*notify_send_pre_block)(struct vsock_sock *,
+ struct vsock_transport_send_notify_data *);
+ int (*notify_send_pre_enqueue)(struct vsock_sock *,
+ struct vsock_transport_send_notify_data *);
+ int (*notify_send_post_enqueue)(struct vsock_sock *, ssize_t,
+ struct vsock_transport_send_notify_data *);
+ /* sk_lock held by the caller */
+ void (*notify_buffer_size)(struct vsock_sock *, u64 *);
+
+ /* Shutdown. */
+ int (*shutdown)(struct vsock_sock *, int);
+
+ /* Addressing. */
+ u32 (*get_local_cid)(void);
+};
+
+/**** CORE ****/
+
+int vsock_core_register(const struct vsock_transport *t, int features);
+void vsock_core_unregister(const struct vsock_transport *t);
+
+/* The transport may downcast this to access transport-specific functions */
+const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk);
+
+/**** UTILS ****/
+
+/* vsock_table_lock must be held */
+static inline bool __vsock_in_bound_table(struct vsock_sock *vsk)
+{
+ return !list_empty(&vsk->bound_table);
+}
+
+/* vsock_table_lock must be held */
+static inline bool __vsock_in_connected_table(struct vsock_sock *vsk)
+{
+ return !list_empty(&vsk->connected_table);
+}
+
+void vsock_release_pending(struct sock *pending);
+void vsock_add_pending(struct sock *listener, struct sock *pending);
+void vsock_remove_pending(struct sock *listener, struct sock *pending);
+void vsock_enqueue_accept(struct sock *listener, struct sock *connected);
+void vsock_insert_connected(struct vsock_sock *vsk);
+void vsock_remove_bound(struct vsock_sock *vsk);
+void vsock_remove_connected(struct vsock_sock *vsk);
+struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr);
+struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
+ struct sockaddr_vm *dst);
+void vsock_remove_sock(struct vsock_sock *vsk);
+void vsock_for_each_connected_socket(struct vsock_transport *transport,
+ void (*fn)(struct sock *sk));
+int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk);
+bool vsock_find_cid(unsigned int cid);
+
+/**** TAP ****/
+
+struct vsock_tap {
+ struct net_device *dev;
+ struct module *module;
+ struct list_head list;
+};
+
+int vsock_init_tap(void);
+int vsock_add_tap(struct vsock_tap *vt);
+int vsock_remove_tap(struct vsock_tap *vt);
+void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque);
+
+#endif /* __AF_VSOCK_H__ */
diff --git a/include/net/ah.h b/include/net/ah.h
new file mode 100644
index 000000000..2d2dea521
--- /dev/null
+++ b/include/net/ah.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_AH_H
+#define _NET_AH_H
+
+#include <linux/skbuff.h>
+
+struct crypto_ahash;
+
+struct ah_data {
+ int icv_full_len;
+ int icv_trunc_len;
+
+ struct crypto_ahash *ahash;
+};
+
+struct ip_auth_hdr;
+
+static inline struct ip_auth_hdr *ip_auth_hdr(const struct sk_buff *skb)
+{
+ return (struct ip_auth_hdr *)skb_transport_header(skb);
+}
+
+#endif
diff --git a/include/net/amt.h b/include/net/amt.h
new file mode 100644
index 000000000..c881bc8b6
--- /dev/null
+++ b/include/net/amt.h
@@ -0,0 +1,408 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com>
+ */
+#ifndef _NET_AMT_H_
+#define _NET_AMT_H_
+
+#include <linux/siphash.h>
+#include <linux/jhash.h>
+#include <linux/netdevice.h>
+#include <net/gro_cells.h>
+#include <net/rtnetlink.h>
+
+enum amt_msg_type {
+ AMT_MSG_DISCOVERY = 1,
+ AMT_MSG_ADVERTISEMENT,
+ AMT_MSG_REQUEST,
+ AMT_MSG_MEMBERSHIP_QUERY,
+ AMT_MSG_MEMBERSHIP_UPDATE,
+ AMT_MSG_MULTICAST_DATA,
+ AMT_MSG_TEARDOWN,
+ __AMT_MSG_MAX,
+};
+
+#define AMT_MSG_MAX (__AMT_MSG_MAX - 1)
+
+enum amt_ops {
+ /* A*B */
+ AMT_OPS_INT,
+ /* A+B */
+ AMT_OPS_UNI,
+ /* A-B */
+ AMT_OPS_SUB,
+ /* B-A */
+ AMT_OPS_SUB_REV,
+ __AMT_OPS_MAX,
+};
+
+#define AMT_OPS_MAX (__AMT_OPS_MAX - 1)
+
+enum amt_filter {
+ AMT_FILTER_FWD,
+ AMT_FILTER_D_FWD,
+ AMT_FILTER_FWD_NEW,
+ AMT_FILTER_D_FWD_NEW,
+ AMT_FILTER_ALL,
+ AMT_FILTER_NONE_NEW,
+ AMT_FILTER_BOTH,
+ AMT_FILTER_BOTH_NEW,
+ __AMT_FILTER_MAX,
+};
+
+#define AMT_FILTER_MAX (__AMT_FILTER_MAX - 1)
+
+enum amt_act {
+ AMT_ACT_GMI,
+ AMT_ACT_GMI_ZERO,
+ AMT_ACT_GT,
+ AMT_ACT_STATUS_FWD_NEW,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ AMT_ACT_STATUS_NONE_NEW,
+ __AMT_ACT_MAX,
+};
+
+#define AMT_ACT_MAX (__AMT_ACT_MAX - 1)
+
+enum amt_status {
+ AMT_STATUS_INIT,
+ AMT_STATUS_SENT_DISCOVERY,
+ AMT_STATUS_RECEIVED_DISCOVERY,
+ AMT_STATUS_SENT_ADVERTISEMENT,
+ AMT_STATUS_RECEIVED_ADVERTISEMENT,
+ AMT_STATUS_SENT_REQUEST,
+ AMT_STATUS_RECEIVED_REQUEST,
+ AMT_STATUS_SENT_QUERY,
+ AMT_STATUS_RECEIVED_QUERY,
+ AMT_STATUS_SENT_UPDATE,
+ AMT_STATUS_RECEIVED_UPDATE,
+ __AMT_STATUS_MAX,
+};
+
+#define AMT_STATUS_MAX (__AMT_STATUS_MAX - 1)
+
+/* Gateway events only */
+enum amt_event {
+ AMT_EVENT_NONE,
+ AMT_EVENT_RECEIVE,
+ AMT_EVENT_SEND_DISCOVERY,
+ AMT_EVENT_SEND_REQUEST,
+ __AMT_EVENT_MAX,
+};
+
+struct amt_header {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 type:4,
+ version:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 version:4,
+ type:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+struct amt_header_discovery {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 type:4,
+ version:4,
+ reserved:24;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 version:4,
+ type:4,
+ reserved:24;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_advertisement {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 type:4,
+ version:4,
+ reserved:24;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 version:4,
+ type:4,
+ reserved:24;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+ __be32 ip4;
+} __packed;
+
+struct amt_header_request {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 type:4,
+ version:4,
+ reserved1:7,
+ p:1,
+ reserved2:16;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 version:4,
+ type:4,
+ p:1,
+ reserved1:7,
+ reserved2:16;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_membership_query {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 type:4,
+ version:4,
+ reserved:6,
+ l:1,
+ g:1,
+ response_mac:48;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u64 version:4,
+ type:4,
+ g:1,
+ l:1,
+ reserved:6,
+ response_mac:48;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_membership_update {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 type:4,
+ version:4,
+ reserved:8,
+ response_mac:48;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u64 version:4,
+ type:4,
+ reserved:8,
+ response_mac:48;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_mcast_data {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u16 type:4,
+ version:4,
+ reserved:8;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u16 version:4,
+ type:4,
+ reserved:8;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+struct amt_headers {
+ union {
+ struct amt_header_discovery discovery;
+ struct amt_header_advertisement advertisement;
+ struct amt_header_request request;
+ struct amt_header_membership_query query;
+ struct amt_header_membership_update update;
+ struct amt_header_mcast_data data;
+ };
+} __packed;
+
+struct amt_gw_headers {
+ union {
+ struct amt_header_discovery discovery;
+ struct amt_header_request request;
+ struct amt_header_membership_update update;
+ };
+} __packed;
+
+struct amt_relay_headers {
+ union {
+ struct amt_header_advertisement advertisement;
+ struct amt_header_membership_query query;
+ struct amt_header_mcast_data data;
+ };
+} __packed;
+
+struct amt_skb_cb {
+ struct amt_tunnel_list *tunnel;
+};
+
+struct amt_tunnel_list {
+ struct list_head list;
+ /* Protect All resources under an amt_tunne_list */
+ spinlock_t lock;
+ struct amt_dev *amt;
+ u32 nr_groups;
+ u32 nr_sources;
+ enum amt_status status;
+ struct delayed_work gc_wq;
+ __be16 source_port;
+ __be32 ip4;
+ __be32 nonce;
+ siphash_key_t key;
+ u64 mac:48,
+ reserved:16;
+ struct rcu_head rcu;
+ struct hlist_head groups[];
+};
+
+union amt_addr {
+ __be32 ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ip6;
+#endif
+};
+
+/* RFC 3810
+ *
+ * When the router is in EXCLUDE mode, the router state is represented
+ * by the notation EXCLUDE (X,Y), where X is called the "Requested List"
+ * and Y is called the "Exclude List". All sources, except those from
+ * the Exclude List, will be forwarded by the router
+ */
+enum amt_source_status {
+ AMT_SOURCE_STATUS_NONE,
+ /* Node of Requested List */
+ AMT_SOURCE_STATUS_FWD,
+ /* Node of Exclude List */
+ AMT_SOURCE_STATUS_D_FWD,
+};
+
+/* protected by gnode->lock */
+struct amt_source_node {
+ struct hlist_node node;
+ struct amt_group_node *gnode;
+ struct delayed_work source_timer;
+ union amt_addr source_addr;
+ enum amt_source_status status;
+#define AMT_SOURCE_OLD 0
+#define AMT_SOURCE_NEW 1
+ u8 flags;
+ struct rcu_head rcu;
+};
+
+/* Protected by amt_tunnel_list->lock */
+struct amt_group_node {
+ struct amt_dev *amt;
+ union amt_addr group_addr;
+ union amt_addr host_addr;
+ bool v6;
+ u8 filter_mode;
+ u32 nr_sources;
+ struct amt_tunnel_list *tunnel_list;
+ struct hlist_node node;
+ struct delayed_work group_timer;
+ struct rcu_head rcu;
+ struct hlist_head sources[];
+};
+
+#define AMT_MAX_EVENTS 16
+struct amt_events {
+ enum amt_event event;
+ struct sk_buff *skb;
+};
+
+struct amt_dev {
+ struct net_device *dev;
+ struct net_device *stream_dev;
+ struct net *net;
+ /* Global lock for amt device */
+ spinlock_t lock;
+ /* Used only in relay mode */
+ struct list_head tunnel_list;
+ struct gro_cells gro_cells;
+
+ /* Protected by RTNL */
+ struct delayed_work discovery_wq;
+ /* Protected by RTNL */
+ struct delayed_work req_wq;
+ /* Protected by RTNL */
+ struct delayed_work secret_wq;
+ struct work_struct event_wq;
+ /* AMT status */
+ enum amt_status status;
+ /* Generated key */
+ siphash_key_t key;
+ struct socket __rcu *sock;
+ u32 max_groups;
+ u32 max_sources;
+ u32 hash_buckets;
+ u32 hash_seed;
+ /* Default 128 */
+ u32 max_tunnels;
+ /* Default 128 */
+ u32 nr_tunnels;
+ /* Gateway or Relay mode */
+ u32 mode;
+ /* Default 2268 */
+ __be16 relay_port;
+ /* Default 2268 */
+ __be16 gw_port;
+ /* Outer local ip */
+ __be32 local_ip;
+ /* Outer remote ip */
+ __be32 remote_ip;
+ /* Outer discovery ip */
+ __be32 discovery_ip;
+ /* Only used in gateway mode */
+ __be32 nonce;
+ /* Gateway sent request and received query */
+ bool ready4;
+ bool ready6;
+ u8 req_cnt;
+ u8 qi;
+ u64 qrv;
+ u64 qri;
+ /* Used only in gateway mode */
+ u64 mac:48,
+ reserved:16;
+ /* AMT gateway side message handler queue */
+ struct amt_events events[AMT_MAX_EVENTS];
+ u8 event_idx;
+ u8 nr_events;
+};
+
+#define AMT_TOS 0xc0
+#define AMT_IPHDR_OPTS 4
+#define AMT_IP6HDR_OPTS 8
+#define AMT_GC_INTERVAL (30 * 1000)
+#define AMT_MAX_GROUP 32
+#define AMT_MAX_SOURCE 128
+#define AMT_HSIZE_SHIFT 8
+#define AMT_HSIZE (1 << AMT_HSIZE_SHIFT)
+
+#define AMT_DISCOVERY_TIMEOUT 5000
+#define AMT_INIT_REQ_TIMEOUT 1
+#define AMT_INIT_QUERY_INTERVAL 125
+#define AMT_MAX_REQ_TIMEOUT 120
+#define AMT_MAX_REQ_COUNT 3
+#define AMT_SECRET_TIMEOUT 60000
+#define IANA_AMT_UDP_PORT 2268
+#define AMT_MAX_TUNNELS 128
+#define AMT_MAX_REQS 128
+#define AMT_GW_HLEN (sizeof(struct iphdr) + \
+ sizeof(struct udphdr) + \
+ sizeof(struct amt_gw_headers))
+#define AMT_RELAY_HLEN (sizeof(struct iphdr) + \
+ sizeof(struct udphdr) + \
+ sizeof(struct amt_relay_headers))
+
+static inline bool netif_is_amt(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops && !strcmp(dev->rtnl_link_ops->kind, "amt");
+}
+
+static inline u64 amt_gmi(const struct amt_dev *amt)
+{
+ return ((amt->qrv * amt->qi) + amt->qri) * 1000;
+}
+
+#endif /* _NET_AMT_H_ */
diff --git a/include/net/arp.h b/include/net/arp.h
new file mode 100644
index 000000000..e8747e071
--- /dev/null
+++ b/include/net/arp.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* linux/net/inet/arp.h */
+#ifndef _ARP_H
+#define _ARP_H
+
+#include <linux/if_arp.h>
+#include <linux/hash.h>
+#include <net/neighbour.h>
+
+
+extern struct neigh_table arp_tbl;
+
+static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32 *hash_rnd)
+{
+ u32 key = *(const u32 *)pkey;
+ u32 val = key ^ hash32_ptr(dev);
+
+ return val * hash_rnd[0];
+}
+
+#ifdef CONFIG_INET
+static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
+{
+ if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
+ key = INADDR_ANY;
+
+ return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
+}
+#else
+static inline
+struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
+{
+ return NULL;
+}
+#endif
+
+static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
+{
+ struct neighbour *n;
+
+ rcu_read_lock();
+ n = __ipv4_neigh_lookup_noref(dev, key);
+ if (n && !refcount_inc_not_zero(&n->refcnt))
+ n = NULL;
+ rcu_read_unlock();
+
+ return n;
+}
+
+static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key)
+{
+ struct neighbour *n;
+
+ rcu_read_lock();
+ n = __ipv4_neigh_lookup_noref(dev, key);
+ neigh_confirm(n);
+ rcu_read_unlock();
+}
+
+void arp_init(void);
+int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg);
+void arp_send(int type, int ptype, __be32 dest_ip,
+ struct net_device *dev, __be32 src_ip,
+ const unsigned char *dest_hw,
+ const unsigned char *src_hw, const unsigned char *th);
+int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
+void arp_ifdown(struct net_device *dev);
+int arp_invalidate(struct net_device *dev, __be32 ip, bool force);
+
+struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
+ struct net_device *dev, __be32 src_ip,
+ const unsigned char *dest_hw,
+ const unsigned char *src_hw,
+ const unsigned char *target_hw);
+void arp_xmit(struct sk_buff *skb);
+
+#endif /* _ARP_H */
diff --git a/include/net/atmclip.h b/include/net/atmclip.h
new file mode 100644
index 000000000..70e350e0d
--- /dev/null
+++ b/include/net/atmclip.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* net/atm/atmarp.h - RFC1577 ATM ARP */
+
+/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
+
+
+#ifndef _ATMCLIP_H
+#define _ATMCLIP_H
+
+#include <linux/netdevice.h>
+#include <linux/atm.h>
+#include <linux/atmdev.h>
+#include <linux/atmarp.h>
+#include <linux/spinlock.h>
+#include <net/neighbour.h>
+
+
+#define CLIP_VCC(vcc) ((struct clip_vcc *) ((vcc)->user_back))
+
+struct sk_buff;
+
+struct clip_vcc {
+ struct atm_vcc *vcc; /* VCC descriptor */
+ struct atmarp_entry *entry; /* ATMARP table entry, NULL if IP addr.
+ isn't known yet */
+ int xoff; /* 1 if send buffer is full */
+ unsigned char encap; /* 0: NULL, 1: LLC/SNAP */
+ unsigned long last_use; /* last send or receive operation */
+ unsigned long idle_timeout; /* keep open idle for so many jiffies*/
+ void (*old_push)(struct atm_vcc *vcc,struct sk_buff *skb);
+ /* keep old push fn for chaining */
+ void (*old_pop)(struct atm_vcc *vcc,struct sk_buff *skb);
+ /* keep old pop fn for chaining */
+ struct clip_vcc *next; /* next VCC */
+};
+
+
+struct atmarp_entry {
+ struct clip_vcc *vccs; /* active VCCs; NULL if resolution is
+ pending */
+ unsigned long expires; /* entry expiration time */
+ struct neighbour *neigh; /* neighbour back-pointer */
+};
+
+#define PRIV(dev) ((struct clip_priv *) netdev_priv(dev))
+
+struct clip_priv {
+ int number; /* for convenience ... */
+ spinlock_t xoff_lock; /* ensures that pop is atomic (SMP) */
+ struct net_device *next; /* next CLIP interface */
+};
+
+#endif
diff --git a/include/net/ax25.h b/include/net/ax25.h
new file mode 100644
index 000000000..f8cf3629a
--- /dev/null
+++ b/include/net/ax25.h
@@ -0,0 +1,489 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Declarations of AX.25 type objects.
+ *
+ * Alan Cox (GW4PTS) 10/11/93
+ */
+#ifndef _AX25_H
+#define _AX25_H
+
+#include <linux/ax25.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/refcount.h>
+#include <net/neighbour.h>
+#include <net/sock.h>
+#include <linux/seq_file.h>
+
+#define AX25_T1CLAMPLO 1
+#define AX25_T1CLAMPHI (30 * HZ)
+
+#define AX25_BPQ_HEADER_LEN 16
+#define AX25_KISS_HEADER_LEN 1
+
+#define AX25_HEADER_LEN 17
+#define AX25_ADDR_LEN 7
+#define AX25_DIGI_HEADER_LEN (AX25_MAX_DIGIS * AX25_ADDR_LEN)
+#define AX25_MAX_HEADER_LEN (AX25_HEADER_LEN + AX25_DIGI_HEADER_LEN)
+
+/* AX.25 Protocol IDs */
+#define AX25_P_ROSE 0x01
+#define AX25_P_VJCOMP 0x06 /* Compressed TCP/IP packet */
+ /* Van Jacobsen (RFC 1144) */
+#define AX25_P_VJUNCOMP 0x07 /* Uncompressed TCP/IP packet */
+ /* Van Jacobsen (RFC 1144) */
+#define AX25_P_SEGMENT 0x08 /* Segmentation fragment */
+#define AX25_P_TEXNET 0xc3 /* TEXTNET datagram protocol */
+#define AX25_P_LQ 0xc4 /* Link Quality Protocol */
+#define AX25_P_ATALK 0xca /* Appletalk */
+#define AX25_P_ATALK_ARP 0xcb /* Appletalk ARP */
+#define AX25_P_IP 0xcc /* ARPA Internet Protocol */
+#define AX25_P_ARP 0xcd /* ARPA Address Resolution */
+#define AX25_P_FLEXNET 0xce /* FlexNet */
+#define AX25_P_NETROM 0xcf /* NET/ROM */
+#define AX25_P_TEXT 0xF0 /* No layer 3 protocol impl. */
+
+/* AX.25 Segment control values */
+#define AX25_SEG_REM 0x7F
+#define AX25_SEG_FIRST 0x80
+
+#define AX25_CBIT 0x80 /* Command/Response bit */
+#define AX25_EBIT 0x01 /* HDLC Address Extension bit */
+#define AX25_HBIT 0x80 /* Has been repeated bit */
+
+#define AX25_SSSID_SPARE 0x60 /* Unused bits in SSID for standard AX.25 */
+#define AX25_ESSID_SPARE 0x20 /* Unused bits in SSID for extended AX.25 */
+#define AX25_DAMA_FLAG 0x20 /* Well, it is *NOT* unused! (dl1bke 951121 */
+
+#define AX25_COND_ACK_PENDING 0x01
+#define AX25_COND_REJECT 0x02
+#define AX25_COND_PEER_RX_BUSY 0x04
+#define AX25_COND_OWN_RX_BUSY 0x08
+#define AX25_COND_DAMA_MODE 0x10
+
+#ifndef _LINUX_NETDEVICE_H
+#include <linux/netdevice.h>
+#endif
+
+/* Upper sub-layer (LAPB) definitions */
+
+/* Control field templates */
+#define AX25_I 0x00 /* Information frames */
+#define AX25_S 0x01 /* Supervisory frames */
+#define AX25_RR 0x01 /* Receiver ready */
+#define AX25_RNR 0x05 /* Receiver not ready */
+#define AX25_REJ 0x09 /* Reject */
+#define AX25_U 0x03 /* Unnumbered frames */
+#define AX25_SABM 0x2f /* Set Asynchronous Balanced Mode */
+#define AX25_SABME 0x6f /* Set Asynchronous Balanced Mode Extended */
+#define AX25_DISC 0x43 /* Disconnect */
+#define AX25_DM 0x0f /* Disconnected mode */
+#define AX25_UA 0x63 /* Unnumbered acknowledge */
+#define AX25_FRMR 0x87 /* Frame reject */
+#define AX25_UI 0x03 /* Unnumbered information */
+#define AX25_XID 0xaf /* Exchange information */
+#define AX25_TEST 0xe3 /* Test */
+
+#define AX25_PF 0x10 /* Poll/final bit for standard AX.25 */
+#define AX25_EPF 0x01 /* Poll/final bit for extended AX.25 */
+
+#define AX25_ILLEGAL 0x100 /* Impossible to be a real frame type */
+
+#define AX25_POLLOFF 0
+#define AX25_POLLON 1
+
+/* AX25 L2 C-bit */
+#define AX25_COMMAND 1
+#define AX25_RESPONSE 2
+
+/* Define Link State constants. */
+
+enum {
+ AX25_STATE_0, /* Listening */
+ AX25_STATE_1, /* SABM sent */
+ AX25_STATE_2, /* DISC sent */
+ AX25_STATE_3, /* Established */
+ AX25_STATE_4 /* Recovery */
+};
+
+#define AX25_MODULUS 8 /* Standard AX.25 modulus */
+#define AX25_EMODULUS 128 /* Extended AX.25 modulus */
+
+enum {
+ AX25_PROTO_STD_SIMPLEX,
+ AX25_PROTO_STD_DUPLEX,
+#ifdef CONFIG_AX25_DAMA_SLAVE
+ AX25_PROTO_DAMA_SLAVE,
+#ifdef CONFIG_AX25_DAMA_MASTER
+ AX25_PROTO_DAMA_MASTER,
+#define AX25_PROTO_MAX AX25_PROTO_DAMA_MASTER
+#endif
+#endif
+ __AX25_PROTO_MAX,
+ AX25_PROTO_MAX = __AX25_PROTO_MAX -1
+};
+
+enum {
+ AX25_VALUES_IPDEFMODE, /* 0=DG 1=VC */
+ AX25_VALUES_AXDEFMODE, /* 0=Normal 1=Extended Seq Nos */
+ AX25_VALUES_BACKOFF, /* 0=None 1=Linear 2=Exponential */
+ AX25_VALUES_CONMODE, /* Allow connected modes - 0=No 1=no "PID text" 2=all PIDs */
+ AX25_VALUES_WINDOW, /* Default window size for standard AX.25 */
+ AX25_VALUES_EWINDOW, /* Default window size for extended AX.25 */
+ AX25_VALUES_T1, /* Default T1 timeout value */
+ AX25_VALUES_T2, /* Default T2 timeout value */
+ AX25_VALUES_T3, /* Default T3 timeout value */
+ AX25_VALUES_IDLE, /* Connected mode idle timer */
+ AX25_VALUES_N2, /* Default N2 value */
+ AX25_VALUES_PACLEN, /* AX.25 MTU */
+ AX25_VALUES_PROTOCOL, /* Std AX.25, DAMA Slave, DAMA Master */
+ AX25_VALUES_DS_TIMEOUT, /* DAMA Slave timeout */
+ AX25_MAX_VALUES /* THIS MUST REMAIN THE LAST ENTRY OF THIS LIST */
+};
+
+#define AX25_DEF_IPDEFMODE 0 /* Datagram */
+#define AX25_DEF_AXDEFMODE 0 /* Normal */
+#define AX25_DEF_BACKOFF 1 /* Linear backoff */
+#define AX25_DEF_CONMODE 2 /* Connected mode allowed */
+#define AX25_DEF_WINDOW 2 /* Window=2 */
+#define AX25_DEF_EWINDOW 32 /* Module-128 Window=32 */
+#define AX25_DEF_T1 10000 /* T1=10s */
+#define AX25_DEF_T2 3000 /* T2=3s */
+#define AX25_DEF_T3 300000 /* T3=300s */
+#define AX25_DEF_N2 10 /* N2=10 */
+#define AX25_DEF_IDLE 0 /* Idle=None */
+#define AX25_DEF_PACLEN 256 /* Paclen=256 */
+#define AX25_DEF_PROTOCOL AX25_PROTO_STD_SIMPLEX /* Standard AX.25 */
+#define AX25_DEF_DS_TIMEOUT 180000 /* DAMA timeout 3 minutes */
+
+typedef struct ax25_uid_assoc {
+ struct hlist_node uid_node;
+ refcount_t refcount;
+ kuid_t uid;
+ ax25_address call;
+} ax25_uid_assoc;
+
+#define ax25_uid_for_each(__ax25, list) \
+ hlist_for_each_entry(__ax25, list, uid_node)
+
+#define ax25_uid_hold(ax25) \
+ refcount_inc(&((ax25)->refcount))
+
+static inline void ax25_uid_put(ax25_uid_assoc *assoc)
+{
+ if (refcount_dec_and_test(&assoc->refcount)) {
+ kfree(assoc);
+ }
+}
+
+typedef struct {
+ ax25_address calls[AX25_MAX_DIGIS];
+ unsigned char repeated[AX25_MAX_DIGIS];
+ unsigned char ndigi;
+ signed char lastrepeat;
+} ax25_digi;
+
+typedef struct ax25_route {
+ struct ax25_route *next;
+ ax25_address callsign;
+ struct net_device *dev;
+ ax25_digi *digipeat;
+ char ip_mode;
+} ax25_route;
+
+void __ax25_put_route(ax25_route *ax25_rt);
+
+extern rwlock_t ax25_route_lock;
+
+static inline void ax25_route_lock_use(void)
+{
+ read_lock(&ax25_route_lock);
+}
+
+static inline void ax25_route_lock_unuse(void)
+{
+ read_unlock(&ax25_route_lock);
+}
+
+typedef struct {
+ char slave; /* slave_mode? */
+ struct timer_list slave_timer; /* timeout timer */
+ unsigned short slave_timeout; /* when? */
+} ax25_dama_info;
+
+struct ctl_table;
+
+typedef struct ax25_dev {
+ struct ax25_dev *next;
+
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
+ struct net_device *forward;
+ struct ctl_table_header *sysheader;
+ int values[AX25_MAX_VALUES];
+#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
+ ax25_dama_info dama;
+#endif
+ refcount_t refcount;
+ bool device_up;
+} ax25_dev;
+
+typedef struct ax25_cb {
+ struct hlist_node ax25_node;
+ ax25_address source_addr, dest_addr;
+ ax25_digi *digipeat;
+ ax25_dev *ax25_dev;
+ netdevice_tracker dev_tracker;
+ unsigned char iamdigi;
+ unsigned char state, modulus, pidincl;
+ unsigned short vs, vr, va;
+ unsigned char condition, backoff;
+ unsigned char n2, n2count;
+ struct timer_list t1timer, t2timer, t3timer, idletimer;
+ unsigned long t1, t2, t3, idle, rtt;
+ unsigned short paclen, fragno, fraglen;
+ struct sk_buff_head write_queue;
+ struct sk_buff_head reseq_queue;
+ struct sk_buff_head ack_queue;
+ struct sk_buff_head frag_queue;
+ unsigned char window;
+ struct timer_list timer, dtimer;
+ struct sock *sk; /* Backlink to socket */
+ refcount_t refcount;
+} ax25_cb;
+
+struct ax25_sock {
+ struct sock sk;
+ struct ax25_cb *cb;
+};
+
+static inline struct ax25_sock *ax25_sk(const struct sock *sk)
+{
+ return (struct ax25_sock *) sk;
+}
+
+static inline struct ax25_cb *sk_to_ax25(const struct sock *sk)
+{
+ return ax25_sk(sk)->cb;
+}
+
+#define ax25_for_each(__ax25, list) \
+ hlist_for_each_entry(__ax25, list, ax25_node)
+
+#define ax25_cb_hold(__ax25) \
+ refcount_inc(&((__ax25)->refcount))
+
+static __inline__ void ax25_cb_put(ax25_cb *ax25)
+{
+ if (refcount_dec_and_test(&ax25->refcount)) {
+ kfree(ax25->digipeat);
+ kfree(ax25);
+ }
+}
+
+static inline void ax25_dev_hold(ax25_dev *ax25_dev)
+{
+ refcount_inc(&ax25_dev->refcount);
+}
+
+static inline void ax25_dev_put(ax25_dev *ax25_dev)
+{
+ if (refcount_dec_and_test(&ax25_dev->refcount)) {
+ kfree(ax25_dev);
+ }
+}
+static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb->pkt_type = PACKET_HOST;
+ return htons(ETH_P_AX25);
+}
+
+/* af_ax25.c */
+extern struct hlist_head ax25_list;
+extern spinlock_t ax25_list_lock;
+void ax25_cb_add(ax25_cb *);
+struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int);
+struct sock *ax25_get_socket(ax25_address *, ax25_address *, int);
+ax25_cb *ax25_find_cb(const ax25_address *, ax25_address *, ax25_digi *,
+ struct net_device *);
+void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
+void ax25_destroy_socket(ax25_cb *);
+ax25_cb * __must_check ax25_create_cb(void);
+void ax25_fillin_cb(ax25_cb *, ax25_dev *);
+struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
+
+/* ax25_addr.c */
+extern const ax25_address ax25_bcast;
+extern const ax25_address ax25_defaddr;
+extern const ax25_address null_ax25_address;
+char *ax2asc(char *buf, const ax25_address *);
+void asc2ax(ax25_address *addr, const char *callsign);
+int ax25cmp(const ax25_address *, const ax25_address *);
+int ax25digicmp(const ax25_digi *, const ax25_digi *);
+const unsigned char *ax25_addr_parse(const unsigned char *, int,
+ ax25_address *, ax25_address *, ax25_digi *, int *, int *);
+int ax25_addr_build(unsigned char *, const ax25_address *,
+ const ax25_address *, const ax25_digi *, int, int);
+int ax25_addr_size(const ax25_digi *);
+void ax25_digi_invert(const ax25_digi *, ax25_digi *);
+
+/* ax25_dev.c */
+extern ax25_dev *ax25_dev_list;
+extern spinlock_t ax25_dev_lock;
+
+#if IS_ENABLED(CONFIG_AX25)
+static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev)
+{
+ return dev->ax25_ptr;
+}
+#endif
+
+ax25_dev *ax25_addr_ax25dev(ax25_address *);
+void ax25_dev_device_up(struct net_device *);
+void ax25_dev_device_down(struct net_device *);
+int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *);
+struct net_device *ax25_fwd_dev(struct net_device *);
+void ax25_dev_free(void);
+
+/* ax25_ds_in.c */
+int ax25_ds_frame_in(ax25_cb *, struct sk_buff *, int);
+
+/* ax25_ds_subr.c */
+void ax25_ds_nr_error_recovery(ax25_cb *);
+void ax25_ds_enquiry_response(ax25_cb *);
+void ax25_ds_establish_data_link(ax25_cb *);
+void ax25_dev_dama_off(ax25_dev *);
+void ax25_dama_on(ax25_cb *);
+void ax25_dama_off(ax25_cb *);
+
+/* ax25_ds_timer.c */
+void ax25_ds_setup_timer(ax25_dev *);
+void ax25_ds_set_timer(ax25_dev *);
+void ax25_ds_del_timer(ax25_dev *);
+void ax25_ds_timer(ax25_cb *);
+void ax25_ds_t1_timeout(ax25_cb *);
+void ax25_ds_heartbeat_expiry(ax25_cb *);
+void ax25_ds_t3timer_expiry(ax25_cb *);
+void ax25_ds_idletimer_expiry(ax25_cb *);
+
+/* ax25_iface.c */
+
+struct ax25_protocol {
+ struct ax25_protocol *next;
+ unsigned int pid;
+ int (*func)(struct sk_buff *, ax25_cb *);
+};
+
+void ax25_register_pid(struct ax25_protocol *ap);
+void ax25_protocol_release(unsigned int);
+
+struct ax25_linkfail {
+ struct hlist_node lf_node;
+ void (*func)(ax25_cb *, int);
+};
+
+void ax25_linkfail_register(struct ax25_linkfail *lf);
+void ax25_linkfail_release(struct ax25_linkfail *lf);
+int __must_check ax25_listen_register(const ax25_address *,
+ struct net_device *);
+void ax25_listen_release(const ax25_address *, struct net_device *);
+int(*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
+int ax25_listen_mine(const ax25_address *, struct net_device *);
+void ax25_link_failed(ax25_cb *, int);
+int ax25_protocol_is_registered(unsigned int);
+
+/* ax25_in.c */
+int ax25_rx_iframe(ax25_cb *, struct sk_buff *);
+int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *,
+ struct net_device *);
+
+/* ax25_ip.c */
+netdev_tx_t ax25_ip_xmit(struct sk_buff *skb);
+extern const struct header_ops ax25_header_ops;
+
+/* ax25_out.c */
+ax25_cb *ax25_send_frame(struct sk_buff *, int, const ax25_address *,
+ ax25_address *, ax25_digi *, struct net_device *);
+void ax25_output(ax25_cb *, int, struct sk_buff *);
+void ax25_kick(ax25_cb *);
+void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
+void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev);
+int ax25_check_iframes_acked(ax25_cb *, unsigned short);
+
+/* ax25_route.c */
+void ax25_rt_device_down(struct net_device *);
+int ax25_rt_ioctl(unsigned int, void __user *);
+extern const struct seq_operations ax25_rt_seqops;
+ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev);
+int ax25_rt_autobind(ax25_cb *, ax25_address *);
+struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *,
+ ax25_address *, ax25_digi *);
+void ax25_rt_free(void);
+
+/* ax25_std_in.c */
+int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int);
+
+/* ax25_std_subr.c */
+void ax25_std_nr_error_recovery(ax25_cb *);
+void ax25_std_establish_data_link(ax25_cb *);
+void ax25_std_transmit_enquiry(ax25_cb *);
+void ax25_std_enquiry_response(ax25_cb *);
+void ax25_std_timeout_response(ax25_cb *);
+
+/* ax25_std_timer.c */
+void ax25_std_heartbeat_expiry(ax25_cb *);
+void ax25_std_t1timer_expiry(ax25_cb *);
+void ax25_std_t2timer_expiry(ax25_cb *);
+void ax25_std_t3timer_expiry(ax25_cb *);
+void ax25_std_idletimer_expiry(ax25_cb *);
+
+/* ax25_subr.c */
+void ax25_clear_queues(ax25_cb *);
+void ax25_frames_acked(ax25_cb *, unsigned short);
+void ax25_requeue_frames(ax25_cb *);
+int ax25_validate_nr(ax25_cb *, unsigned short);
+int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *);
+void ax25_send_control(ax25_cb *, int, int, int);
+void ax25_return_dm(struct net_device *, ax25_address *, ax25_address *,
+ ax25_digi *);
+void ax25_calculate_t1(ax25_cb *);
+void ax25_calculate_rtt(ax25_cb *);
+void ax25_disconnect(ax25_cb *, int);
+
+/* ax25_timer.c */
+void ax25_setup_timers(ax25_cb *);
+void ax25_start_heartbeat(ax25_cb *);
+void ax25_start_t1timer(ax25_cb *);
+void ax25_start_t2timer(ax25_cb *);
+void ax25_start_t3timer(ax25_cb *);
+void ax25_start_idletimer(ax25_cb *);
+void ax25_stop_heartbeat(ax25_cb *);
+void ax25_stop_t1timer(ax25_cb *);
+void ax25_stop_t2timer(ax25_cb *);
+void ax25_stop_t3timer(ax25_cb *);
+void ax25_stop_idletimer(ax25_cb *);
+int ax25_t1timer_running(ax25_cb *);
+unsigned long ax25_display_timer(struct timer_list *);
+
+/* ax25_uid.c */
+extern int ax25_uid_policy;
+ax25_uid_assoc *ax25_findbyuid(kuid_t);
+int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *);
+extern const struct seq_operations ax25_uid_seqops;
+void ax25_uid_free(void);
+
+/* sysctl_net_ax25.c */
+#ifdef CONFIG_SYSCTL
+int ax25_register_dev_sysctl(ax25_dev *ax25_dev);
+void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev);
+#else
+static inline int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { return 0; }
+static inline void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) {}
+#endif /* CONFIG_SYSCTL */
+
+#endif
diff --git a/include/net/ax88796.h b/include/net/ax88796.h
new file mode 100644
index 000000000..303100f08
--- /dev/null
+++ b/include/net/ax88796.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* include/net/ax88796.h
+ *
+ * Copyright 2005 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+*/
+
+#ifndef __NET_AX88796_PLAT_H
+#define __NET_AX88796_PLAT_H
+
+#include <linux/types.h>
+
+struct sk_buff;
+struct net_device;
+struct platform_device;
+
+#define AXFLG_HAS_EEPROM (1<<0)
+#define AXFLG_MAC_FROMDEV (1<<1) /* device already has MAC */
+#define AXFLG_HAS_93CX6 (1<<2) /* use eeprom_93cx6 driver */
+#define AXFLG_MAC_FROMPLATFORM (1<<3) /* MAC given by platform data */
+
+struct ax_plat_data {
+ unsigned int flags;
+ unsigned char wordlength; /* 1 or 2 */
+ unsigned char dcr_val; /* default value for DCR */
+ unsigned char rcr_val; /* default value for RCR */
+ unsigned char gpoc_val; /* default value for GPOC */
+ u32 *reg_offsets; /* register offsets */
+ u8 *mac_addr; /* MAC addr (only used when
+ AXFLG_MAC_FROMPLATFORM is used */
+
+ /* uses default ax88796 buffer if set to NULL */
+ void (*block_output)(struct net_device *dev, int count,
+ const unsigned char *buf, int star_page);
+ void (*block_input)(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+ /* returns nonzero if a pending interrupt request might be caused by
+ * the ax88796. Handles all interrupts if set to NULL
+ */
+ int (*check_irq)(struct platform_device *pdev);
+};
+
+/* exported from ax88796.c for xsurf100.c */
+extern void ax_NS8390_reinit(struct net_device *dev);
+
+#endif /* __NET_AX88796_PLAT_H */
diff --git a/include/net/bareudp.h b/include/net/bareudp.h
new file mode 100644
index 000000000..17610c8d6
--- /dev/null
+++ b/include/net/bareudp.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NET_BAREUDP_H
+#define __NET_BAREUDP_H
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <net/rtnetlink.h>
+
+static inline bool netif_is_bareudp(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "bareudp");
+}
+
+#endif
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
new file mode 100644
index 000000000..bcc5a4cd2
--- /dev/null
+++ b/include/net/bluetooth/bluetooth.h
@@ -0,0 +1,634 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __BLUETOOTH_H
+#define __BLUETOOTH_H
+
+#include <linux/poll.h>
+#include <net/sock.h>
+#include <linux/seq_file.h>
+
+#define BT_SUBSYS_VERSION 2
+#define BT_SUBSYS_REVISION 22
+
+#ifndef AF_BLUETOOTH
+#define AF_BLUETOOTH 31
+#define PF_BLUETOOTH AF_BLUETOOTH
+#endif
+
+/* Bluetooth versions */
+#define BLUETOOTH_VER_1_1 1
+#define BLUETOOTH_VER_1_2 2
+#define BLUETOOTH_VER_2_0 3
+#define BLUETOOTH_VER_2_1 4
+#define BLUETOOTH_VER_4_0 6
+
+/* Reserv for core and drivers use */
+#define BT_SKB_RESERVE 8
+
+#define BTPROTO_L2CAP 0
+#define BTPROTO_HCI 1
+#define BTPROTO_SCO 2
+#define BTPROTO_RFCOMM 3
+#define BTPROTO_BNEP 4
+#define BTPROTO_CMTP 5
+#define BTPROTO_HIDP 6
+#define BTPROTO_AVDTP 7
+#define BTPROTO_ISO 8
+#define BTPROTO_LAST BTPROTO_ISO
+
+#define SOL_HCI 0
+#define SOL_L2CAP 6
+#define SOL_SCO 17
+#define SOL_RFCOMM 18
+
+#define BT_SECURITY 4
+struct bt_security {
+ __u8 level;
+ __u8 key_size;
+};
+#define BT_SECURITY_SDP 0
+#define BT_SECURITY_LOW 1
+#define BT_SECURITY_MEDIUM 2
+#define BT_SECURITY_HIGH 3
+#define BT_SECURITY_FIPS 4
+
+#define BT_DEFER_SETUP 7
+
+#define BT_FLUSHABLE 8
+
+#define BT_FLUSHABLE_OFF 0
+#define BT_FLUSHABLE_ON 1
+
+#define BT_POWER 9
+struct bt_power {
+ __u8 force_active;
+};
+#define BT_POWER_FORCE_ACTIVE_OFF 0
+#define BT_POWER_FORCE_ACTIVE_ON 1
+
+#define BT_CHANNEL_POLICY 10
+
+/* BR/EDR only (default policy)
+ * AMP controllers cannot be used.
+ * Channel move requests from the remote device are denied.
+ * If the L2CAP channel is currently using AMP, move the channel to BR/EDR.
+ */
+#define BT_CHANNEL_POLICY_BREDR_ONLY 0
+
+/* BR/EDR Preferred
+ * Allow use of AMP controllers.
+ * If the L2CAP channel is currently on AMP, move it to BR/EDR.
+ * Channel move requests from the remote device are allowed.
+ */
+#define BT_CHANNEL_POLICY_BREDR_PREFERRED 1
+
+/* AMP Preferred
+ * Allow use of AMP controllers
+ * If the L2CAP channel is currently on BR/EDR and AMP controller
+ * resources are available, initiate a channel move to AMP.
+ * Channel move requests from the remote device are allowed.
+ * If the L2CAP socket has not been connected yet, try to create
+ * and configure the channel directly on an AMP controller rather
+ * than BR/EDR.
+ */
+#define BT_CHANNEL_POLICY_AMP_PREFERRED 2
+
+#define BT_VOICE 11
+struct bt_voice {
+ __u16 setting;
+};
+
+#define BT_VOICE_TRANSPARENT 0x0003
+#define BT_VOICE_CVSD_16BIT 0x0060
+
+#define BT_SNDMTU 12
+#define BT_RCVMTU 13
+#define BT_PHY 14
+
+#define BT_PHY_BR_1M_1SLOT 0x00000001
+#define BT_PHY_BR_1M_3SLOT 0x00000002
+#define BT_PHY_BR_1M_5SLOT 0x00000004
+#define BT_PHY_EDR_2M_1SLOT 0x00000008
+#define BT_PHY_EDR_2M_3SLOT 0x00000010
+#define BT_PHY_EDR_2M_5SLOT 0x00000020
+#define BT_PHY_EDR_3M_1SLOT 0x00000040
+#define BT_PHY_EDR_3M_3SLOT 0x00000080
+#define BT_PHY_EDR_3M_5SLOT 0x00000100
+#define BT_PHY_LE_1M_TX 0x00000200
+#define BT_PHY_LE_1M_RX 0x00000400
+#define BT_PHY_LE_2M_TX 0x00000800
+#define BT_PHY_LE_2M_RX 0x00001000
+#define BT_PHY_LE_CODED_TX 0x00002000
+#define BT_PHY_LE_CODED_RX 0x00004000
+
+#define BT_MODE 15
+
+#define BT_MODE_BASIC 0x00
+#define BT_MODE_ERTM 0x01
+#define BT_MODE_STREAMING 0x02
+#define BT_MODE_LE_FLOWCTL 0x03
+#define BT_MODE_EXT_FLOWCTL 0x04
+
+#define BT_PKT_STATUS 16
+
+#define BT_SCM_PKT_STATUS 0x03
+
+#define BT_ISO_QOS 17
+
+#define BT_ISO_QOS_CIG_UNSET 0xff
+#define BT_ISO_QOS_CIS_UNSET 0xff
+
+#define BT_ISO_QOS_BIG_UNSET 0xff
+#define BT_ISO_QOS_BIS_UNSET 0xff
+
+struct bt_iso_io_qos {
+ __u32 interval;
+ __u16 latency;
+ __u16 sdu;
+ __u8 phy;
+ __u8 rtn;
+};
+
+struct bt_iso_qos {
+ union {
+ __u8 cig;
+ __u8 big;
+ };
+ union {
+ __u8 cis;
+ __u8 bis;
+ };
+ union {
+ __u8 sca;
+ __u8 sync_interval;
+ };
+ __u8 packing;
+ __u8 framing;
+ struct bt_iso_io_qos in;
+ struct bt_iso_io_qos out;
+};
+
+#define BT_ISO_PHY_1M 0x01
+#define BT_ISO_PHY_2M 0x02
+#define BT_ISO_PHY_CODED 0x04
+#define BT_ISO_PHY_ANY (BT_ISO_PHY_1M | BT_ISO_PHY_2M | \
+ BT_ISO_PHY_CODED)
+
+#define BT_CODEC 19
+
+struct bt_codec_caps {
+ __u8 len;
+ __u8 data[];
+} __packed;
+
+struct bt_codec {
+ __u8 id;
+ __u16 cid;
+ __u16 vid;
+ __u8 data_path;
+ __u8 num_caps;
+} __packed;
+
+struct bt_codecs {
+ __u8 num_codecs;
+ struct bt_codec codecs[];
+} __packed;
+
+#define BT_CODEC_CVSD 0x02
+#define BT_CODEC_TRANSPARENT 0x03
+#define BT_CODEC_MSBC 0x05
+
+#define BT_ISO_BASE 20
+
+__printf(1, 2)
+void bt_info(const char *fmt, ...);
+__printf(1, 2)
+void bt_warn(const char *fmt, ...);
+__printf(1, 2)
+void bt_err(const char *fmt, ...);
+#if IS_ENABLED(CONFIG_BT_FEATURE_DEBUG)
+void bt_dbg_set(bool enable);
+bool bt_dbg_get(void);
+__printf(1, 2)
+void bt_dbg(const char *fmt, ...);
+#endif
+__printf(1, 2)
+void bt_warn_ratelimited(const char *fmt, ...);
+__printf(1, 2)
+void bt_err_ratelimited(const char *fmt, ...);
+
+#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__)
+#define BT_WARN(fmt, ...) bt_warn(fmt "\n", ##__VA_ARGS__)
+#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__)
+
+#if IS_ENABLED(CONFIG_BT_FEATURE_DEBUG)
+#define BT_DBG(fmt, ...) bt_dbg(fmt "\n", ##__VA_ARGS__)
+#else
+#define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__)
+#endif
+
+#define bt_dev_name(hdev) ((hdev) ? (hdev)->name : "null")
+
+#define bt_dev_info(hdev, fmt, ...) \
+ BT_INFO("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
+#define bt_dev_warn(hdev, fmt, ...) \
+ BT_WARN("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
+#define bt_dev_err(hdev, fmt, ...) \
+ BT_ERR("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
+#define bt_dev_dbg(hdev, fmt, ...) \
+ BT_DBG("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
+
+#define bt_dev_warn_ratelimited(hdev, fmt, ...) \
+ bt_warn_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
+#define bt_dev_err_ratelimited(hdev, fmt, ...) \
+ bt_err_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
+
+/* Connection and socket states */
+enum {
+ BT_CONNECTED = 1, /* Equal to TCP_ESTABLISHED to make net code happy */
+ BT_OPEN,
+ BT_BOUND,
+ BT_LISTEN,
+ BT_CONNECT,
+ BT_CONNECT2,
+ BT_CONFIG,
+ BT_DISCONN,
+ BT_CLOSED
+};
+
+/* If unused will be removed by compiler */
+static inline const char *state_to_string(int state)
+{
+ switch (state) {
+ case BT_CONNECTED:
+ return "BT_CONNECTED";
+ case BT_OPEN:
+ return "BT_OPEN";
+ case BT_BOUND:
+ return "BT_BOUND";
+ case BT_LISTEN:
+ return "BT_LISTEN";
+ case BT_CONNECT:
+ return "BT_CONNECT";
+ case BT_CONNECT2:
+ return "BT_CONNECT2";
+ case BT_CONFIG:
+ return "BT_CONFIG";
+ case BT_DISCONN:
+ return "BT_DISCONN";
+ case BT_CLOSED:
+ return "BT_CLOSED";
+ }
+
+ return "invalid state";
+}
+
+/* BD Address */
+typedef struct {
+ __u8 b[6];
+} __packed bdaddr_t;
+
+/* BD Address type */
+#define BDADDR_BREDR 0x00
+#define BDADDR_LE_PUBLIC 0x01
+#define BDADDR_LE_RANDOM 0x02
+
+static inline bool bdaddr_type_is_valid(u8 type)
+{
+ switch (type) {
+ case BDADDR_BREDR:
+ case BDADDR_LE_PUBLIC:
+ case BDADDR_LE_RANDOM:
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool bdaddr_type_is_le(u8 type)
+{
+ switch (type) {
+ case BDADDR_LE_PUBLIC:
+ case BDADDR_LE_RANDOM:
+ return true;
+ }
+
+ return false;
+}
+
+#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}})
+#define BDADDR_NONE (&(bdaddr_t) {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}})
+
+/* Copy, swap, convert BD Address */
+static inline int bacmp(const bdaddr_t *ba1, const bdaddr_t *ba2)
+{
+ return memcmp(ba1, ba2, sizeof(bdaddr_t));
+}
+static inline void bacpy(bdaddr_t *dst, const bdaddr_t *src)
+{
+ memcpy(dst, src, sizeof(bdaddr_t));
+}
+
+void baswap(bdaddr_t *dst, const bdaddr_t *src);
+
+/* Common socket structures and functions */
+
+#define bt_sk(__sk) ((struct bt_sock *) __sk)
+
+struct bt_sock {
+ struct sock sk;
+ struct list_head accept_q;
+ struct sock *parent;
+ unsigned long flags;
+ void (*skb_msg_name)(struct sk_buff *, void *, int *);
+ void (*skb_put_cmsg)(struct sk_buff *, struct msghdr *, struct sock *);
+};
+
+enum {
+ BT_SK_DEFER_SETUP,
+ BT_SK_SUSPEND,
+};
+
+struct bt_sock_list {
+ struct hlist_head head;
+ rwlock_t lock;
+#ifdef CONFIG_PROC_FS
+ int (* custom_seq_show)(struct seq_file *, void *);
+#endif
+};
+
+int bt_sock_register(int proto, const struct net_proto_family *ops);
+void bt_sock_unregister(int proto);
+void bt_sock_link(struct bt_sock_list *l, struct sock *s);
+void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
+int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags);
+int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags);
+__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
+int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
+int bt_sock_wait_ready(struct sock *sk, unsigned int msg_flags);
+
+void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh);
+void bt_accept_unlink(struct sock *sk);
+struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
+
+/* Skb helpers */
+struct l2cap_ctrl {
+ u8 sframe:1,
+ poll:1,
+ final:1,
+ fcs:1,
+ sar:2,
+ super:2;
+
+ u16 reqseq;
+ u16 txseq;
+ u8 retries;
+ __le16 psm;
+ bdaddr_t bdaddr;
+ struct l2cap_chan *chan;
+};
+
+struct sco_ctrl {
+ u8 pkt_status;
+};
+
+struct hci_dev;
+
+typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
+typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
+ u16 opcode, struct sk_buff *skb);
+
+#define HCI_REQ_START BIT(0)
+#define HCI_REQ_SKB BIT(1)
+
+struct hci_ctrl {
+ struct sock *sk;
+ u16 opcode;
+ u8 req_flags;
+ u8 req_event;
+ union {
+ hci_req_complete_t req_complete;
+ hci_req_complete_skb_t req_complete_skb;
+ };
+};
+
+struct mgmt_ctrl {
+ struct hci_dev *hdev;
+ u16 opcode;
+};
+
+struct bt_skb_cb {
+ u8 pkt_type;
+ u8 force_active;
+ u16 expect;
+ u8 incoming:1;
+ union {
+ struct l2cap_ctrl l2cap;
+ struct sco_ctrl sco;
+ struct hci_ctrl hci;
+ struct mgmt_ctrl mgmt;
+ };
+};
+#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
+
+#define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type
+#define hci_skb_expect(skb) bt_cb((skb))->expect
+#define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode
+#define hci_skb_event(skb) bt_cb((skb))->hci.req_event
+#define hci_skb_sk(skb) bt_cb((skb))->hci.sk
+
+static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len + BT_SKB_RESERVE, how);
+ if (skb)
+ skb_reserve(skb, BT_SKB_RESERVE);
+ return skb;
+}
+
+static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk,
+ unsigned long len, int nb, int *err)
+{
+ struct sk_buff *skb;
+
+ skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err);
+ if (skb)
+ skb_reserve(skb, BT_SKB_RESERVE);
+
+ if (!skb && *err)
+ return NULL;
+
+ *err = sock_error(sk);
+ if (*err)
+ goto out;
+
+ if (sk->sk_shutdown) {
+ *err = -ECONNRESET;
+ goto out;
+ }
+
+ return skb;
+
+out:
+ kfree_skb(skb);
+ return NULL;
+}
+
+/* Shall not be called with lock_sock held */
+static inline struct sk_buff *bt_skb_sendmsg(struct sock *sk,
+ struct msghdr *msg,
+ size_t len, size_t mtu,
+ size_t headroom, size_t tailroom)
+{
+ struct sk_buff *skb;
+ size_t size = min_t(size_t, len, mtu);
+ int err;
+
+ skb = bt_skb_send_alloc(sk, size + headroom + tailroom,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return ERR_PTR(err);
+
+ skb_reserve(skb, headroom);
+ skb_tailroom_reserve(skb, mtu, tailroom);
+
+ if (!copy_from_iter_full(skb_put(skb, size), size, &msg->msg_iter)) {
+ kfree_skb(skb);
+ return ERR_PTR(-EFAULT);
+ }
+
+ skb->priority = sk->sk_priority;
+
+ return skb;
+}
+
+/* Similar to bt_skb_sendmsg but can split the msg into multiple fragments
+ * accourding to the MTU.
+ */
+static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
+ struct msghdr *msg,
+ size_t len, size_t mtu,
+ size_t headroom, size_t tailroom)
+{
+ struct sk_buff *skb, **frag;
+
+ skb = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
+ if (IS_ERR(skb))
+ return skb;
+
+ len -= skb->len;
+ if (!len)
+ return skb;
+
+ /* Add remaining data over MTU as continuation fragments */
+ frag = &skb_shinfo(skb)->frag_list;
+ while (len) {
+ struct sk_buff *tmp;
+
+ tmp = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
+ if (IS_ERR(tmp)) {
+ return skb;
+ }
+
+ len -= tmp->len;
+
+ *frag = tmp;
+ frag = &(*frag)->next;
+ }
+
+ return skb;
+}
+
+int bt_to_errno(u16 code);
+__u8 bt_status(int err);
+
+void hci_sock_set_flag(struct sock *sk, int nr);
+void hci_sock_clear_flag(struct sock *sk, int nr);
+int hci_sock_test_flag(struct sock *sk, int nr);
+unsigned short hci_sock_get_channel(struct sock *sk);
+u32 hci_sock_get_cookie(struct sock *sk);
+
+int hci_sock_init(void);
+void hci_sock_cleanup(void);
+
+int bt_sysfs_init(void);
+void bt_sysfs_cleanup(void);
+
+int bt_procfs_init(struct net *net, const char *name,
+ struct bt_sock_list *sk_list,
+ int (*seq_show)(struct seq_file *, void *));
+void bt_procfs_cleanup(struct net *net, const char *name);
+
+extern struct dentry *bt_debugfs;
+
+int l2cap_init(void);
+void l2cap_exit(void);
+
+#if IS_ENABLED(CONFIG_BT_BREDR)
+int sco_init(void);
+void sco_exit(void);
+#else
+static inline int sco_init(void)
+{
+ return 0;
+}
+
+static inline void sco_exit(void)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_BT_LE)
+int iso_init(void);
+int iso_exit(void);
+bool iso_enabled(void);
+#else
+static inline int iso_init(void)
+{
+ return 0;
+}
+
+static inline int iso_exit(void)
+{
+ return 0;
+}
+
+static inline bool iso_enabled(void)
+{
+ return false;
+}
+#endif
+
+int mgmt_init(void);
+void mgmt_exit(void);
+void mgmt_cleanup(struct sock *sk);
+
+void bt_sock_reclassify_lock(struct sock *sk, int proto);
+
+#endif /* __BLUETOOTH_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
new file mode 100644
index 000000000..a674221d1
--- /dev/null
+++ b/include/net/bluetooth/hci.h
@@ -0,0 +1,2967 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __HCI_H
+#define __HCI_H
+
+#define HCI_MAX_ACL_SIZE 1024
+#define HCI_MAX_SCO_SIZE 255
+#define HCI_MAX_ISO_SIZE 251
+#define HCI_MAX_EVENT_SIZE 260
+#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
+
+#define HCI_LINK_KEY_SIZE 16
+#define HCI_AMP_LINK_KEY_SIZE (2 * HCI_LINK_KEY_SIZE)
+
+#define HCI_MAX_AMP_ASSOC_SIZE 672
+
+#define HCI_MAX_CPB_DATA_SIZE 252
+
+/* HCI dev events */
+#define HCI_DEV_REG 1
+#define HCI_DEV_UNREG 2
+#define HCI_DEV_UP 3
+#define HCI_DEV_DOWN 4
+#define HCI_DEV_SUSPEND 5
+#define HCI_DEV_RESUME 6
+#define HCI_DEV_OPEN 7
+#define HCI_DEV_CLOSE 8
+#define HCI_DEV_SETUP 9
+
+/* HCI notify events */
+#define HCI_NOTIFY_CONN_ADD 1
+#define HCI_NOTIFY_CONN_DEL 2
+#define HCI_NOTIFY_VOICE_SETTING 3
+#define HCI_NOTIFY_ENABLE_SCO_CVSD 4
+#define HCI_NOTIFY_ENABLE_SCO_TRANSP 5
+#define HCI_NOTIFY_DISABLE_SCO 6
+
+/* HCI bus types */
+#define HCI_VIRTUAL 0
+#define HCI_USB 1
+#define HCI_PCCARD 2
+#define HCI_UART 3
+#define HCI_RS232 4
+#define HCI_PCI 5
+#define HCI_SDIO 6
+#define HCI_SPI 7
+#define HCI_I2C 8
+#define HCI_SMD 9
+#define HCI_VIRTIO 10
+
+/* HCI controller types */
+#define HCI_PRIMARY 0x00
+#define HCI_AMP 0x01
+
+/* First BR/EDR Controller shall have ID = 0 */
+#define AMP_ID_BREDR 0x00
+
+/* AMP controller types */
+#define AMP_TYPE_BREDR 0x00
+#define AMP_TYPE_80211 0x01
+
+/* AMP controller status */
+#define AMP_STATUS_POWERED_DOWN 0x00
+#define AMP_STATUS_BLUETOOTH_ONLY 0x01
+#define AMP_STATUS_NO_CAPACITY 0x02
+#define AMP_STATUS_LOW_CAPACITY 0x03
+#define AMP_STATUS_MEDIUM_CAPACITY 0x04
+#define AMP_STATUS_HIGH_CAPACITY 0x05
+#define AMP_STATUS_FULL_CAPACITY 0x06
+
+/* HCI device quirks */
+enum {
+ /* When this quirk is set, the HCI Reset command is send when
+ * closing the transport instead of when opening it.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_RESET_ON_CLOSE,
+
+ /* When this quirk is set, the device is turned into a raw-only
+ * device and it will stay in unconfigured state.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_RAW_DEVICE,
+
+ /* When this quirk is set, the buffer sizes reported by
+ * HCI Read Buffer Size command are corrected if invalid.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_FIXUP_BUFFER_SIZE,
+
+ /* When this quirk is set, then a controller that does not
+ * indicate support for Inquiry Result with RSSI is assumed to
+ * support it anyway. Some early Bluetooth 1.2 controllers had
+ * wrongly configured local features that will require forcing
+ * them to enable this mode. Getting RSSI information with the
+ * inquiry responses is preferred since it allows for a better
+ * user experience.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_FIXUP_INQUIRY_MODE,
+
+ /* When this quirk is set, then the HCI Read Local Supported
+ * Commands command is not supported. In general Bluetooth 1.2
+ * and later controllers should support this command. However
+ * some controllers indicate Bluetooth 1.2 support, but do
+ * not support this command.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_BROKEN_LOCAL_COMMANDS,
+
+ /* When this quirk is set, then no stored link key handling
+ * is performed. This is mainly due to the fact that the
+ * HCI Delete Stored Link Key command is advertised, but
+ * not supported.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_BROKEN_STORED_LINK_KEY,
+
+ /* When this quirk is set, an external configuration step
+ * is required and will be indicated with the controller
+ * configuration.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_EXTERNAL_CONFIG,
+
+ /* When this quirk is set, the public Bluetooth address
+ * initially reported by HCI Read BD Address command
+ * is considered invalid. Controller configuration is
+ * required before this device can be used.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_INVALID_BDADDR,
+
+ /* When this quirk is set, the public Bluetooth address
+ * initially reported by HCI Read BD Address command
+ * is considered invalid. The public BD Address can be
+ * specified in the fwnode property 'local-bd-address'.
+ * If this property does not exist or is invalid controller
+ * configuration is required before this device can be used.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_USE_BDADDR_PROPERTY,
+
+ /* When this quirk is set, the duplicate filtering during
+ * scanning is based on Bluetooth devices addresses. To allow
+ * RSSI based updates, restart scanning if needed.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_STRICT_DUPLICATE_FILTER,
+
+ /* When this quirk is set, LE scan and BR/EDR inquiry is done
+ * simultaneously, otherwise it's interleaved.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
+
+ /* When this quirk is set, the enabling of diagnostic mode is
+ * not persistent over HCI Reset. Every time the controller
+ * is brought up it needs to be reprogrammed.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_NON_PERSISTENT_DIAG,
+
+ /* When this quirk is set, setup() would be run after every
+ * open() and not just after the first open().
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ *
+ */
+ HCI_QUIRK_NON_PERSISTENT_SETUP,
+
+ /* When this quirk is set, wide band speech is supported by
+ * the driver since no reliable mechanism exist to report
+ * this from the hardware, a driver flag is use to convey
+ * this support
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
+
+ /* When this quirk is set, the controller has validated that
+ * LE states reported through the HCI_LE_READ_SUPPORTED_STATES are
+ * valid. This mechanism is necessary as many controllers have
+ * been seen has having trouble initiating a connectable
+ * advertisement despite the state combination being reported as
+ * supported.
+ */
+ HCI_QUIRK_VALID_LE_STATES,
+
+ /* When this quirk is set, then erroneous data reporting
+ * is ignored. This is mainly due to the fact that the HCI
+ * Read Default Erroneous Data Reporting command is advertised,
+ * but not supported; these controllers often reply with unknown
+ * command and tend to lock up randomly. Needing a hard reset.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_ERR_DATA_REPORTING,
+
+ /*
+ * When this quirk is set, then the hci_suspend_notifier is not
+ * registered. This is intended for devices which drop completely
+ * from the bus on system-suspend and which will show up as a new
+ * HCI after resume.
+ */
+ HCI_QUIRK_NO_SUSPEND_NOTIFIER,
+
+ /*
+ * When this quirk is set, LE tx power is not queried on startup
+ * and the min/max tx power values default to HCI_TX_POWER_INVALID.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER,
+
+ /* When this quirk is set, HCI_OP_SET_EVENT_FLT requests with
+ * HCI_FLT_CLEAR_ALL are ignored and event filtering is
+ * completely avoided. A subset of the CSR controller
+ * clones struggle with this and instantly lock up.
+ *
+ * Note that devices using this must (separately) disable
+ * runtime suspend, because event filtering takes place there.
+ */
+ HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL,
+
+ /*
+ * When this quirk is set, disables the use of
+ * HCI_OP_ENHANCED_SETUP_SYNC_CONN command to setup SCO connections.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN,
+
+ /*
+ * When this quirk is set, the HCI_OP_LE_SET_EXT_SCAN_ENABLE command is
+ * disabled. This is required for some Broadcom controllers which
+ * erroneously claim to support extended scanning.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_EXT_SCAN,
+
+ /*
+ * When this quirk is set, the HCI_OP_GET_MWS_TRANSPORT_CONFIG command is
+ * disabled. This is required for some Broadcom controllers which
+ * erroneously claim to support MWS Transport Layer Configuration.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG,
+
+ /* When this quirk is set, max_page for local extended features
+ * is set to 1, even if controller reports higher number. Some
+ * controllers (e.g. RTL8723CS) report more pages, but they
+ * don't actually support features declared there.
+ */
+ HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
+
+ /*
+ * When this quirk is set, the HCI_OP_LE_SET_RPA_TIMEOUT command is
+ * skipped during initialization. This is required for the Actions
+ * Semiconductor ATS2851 based controllers, which erroneously claims
+ * to support it.
+ */
+ HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT,
+};
+
+/* HCI device flags */
+enum {
+ HCI_UP,
+ HCI_INIT,
+ HCI_RUNNING,
+
+ HCI_PSCAN,
+ HCI_ISCAN,
+ HCI_AUTH,
+ HCI_ENCRYPT,
+ HCI_INQUIRY,
+
+ HCI_RAW,
+
+ HCI_RESET,
+};
+
+/* HCI socket flags */
+enum {
+ HCI_SOCK_TRUSTED,
+ HCI_MGMT_INDEX_EVENTS,
+ HCI_MGMT_UNCONF_INDEX_EVENTS,
+ HCI_MGMT_EXT_INDEX_EVENTS,
+ HCI_MGMT_EXT_INFO_EVENTS,
+ HCI_MGMT_OPTION_EVENTS,
+ HCI_MGMT_SETTING_EVENTS,
+ HCI_MGMT_DEV_CLASS_EVENTS,
+ HCI_MGMT_LOCAL_NAME_EVENTS,
+ HCI_MGMT_OOB_DATA_EVENTS,
+ HCI_MGMT_EXP_FEATURE_EVENTS,
+};
+
+/*
+ * BR/EDR and/or LE controller flags: the flags defined here should represent
+ * states from the controller.
+ */
+enum {
+ HCI_SETUP,
+ HCI_CONFIG,
+ HCI_DEBUGFS_CREATED,
+ HCI_AUTO_OFF,
+ HCI_RFKILLED,
+ HCI_MGMT,
+ HCI_BONDABLE,
+ HCI_SERVICE_CACHE,
+ HCI_KEEP_DEBUG_KEYS,
+ HCI_USE_DEBUG_KEYS,
+ HCI_UNREGISTER,
+ HCI_UNCONFIGURED,
+ HCI_USER_CHANNEL,
+ HCI_EXT_CONFIGURED,
+ HCI_LE_ADV,
+ HCI_LE_PER_ADV,
+ HCI_LE_SCAN,
+ HCI_SSP_ENABLED,
+ HCI_SC_ENABLED,
+ HCI_SC_ONLY,
+ HCI_PRIVACY,
+ HCI_LIMITED_PRIVACY,
+ HCI_RPA_EXPIRED,
+ HCI_RPA_RESOLVING,
+ HCI_HS_ENABLED,
+ HCI_LE_ENABLED,
+ HCI_ADVERTISING,
+ HCI_ADVERTISING_CONNECTABLE,
+ HCI_CONNECTABLE,
+ HCI_DISCOVERABLE,
+ HCI_LIMITED_DISCOVERABLE,
+ HCI_LINK_SECURITY,
+ HCI_PERIODIC_INQ,
+ HCI_FAST_CONNECTABLE,
+ HCI_BREDR_ENABLED,
+ HCI_LE_SCAN_INTERRUPTED,
+ HCI_WIDEBAND_SPEECH_ENABLED,
+ HCI_EVENT_FILTER_CONFIGURED,
+ HCI_PA_SYNC,
+
+ HCI_DUT_MODE,
+ HCI_VENDOR_DIAG,
+ HCI_FORCE_BREDR_SMP,
+ HCI_FORCE_STATIC_ADDR,
+ HCI_LL_RPA_RESOLUTION,
+ HCI_ENABLE_LL_PRIVACY,
+ HCI_CMD_PENDING,
+ HCI_FORCE_NO_MITM,
+ HCI_QUALITY_REPORT,
+ HCI_OFFLOAD_CODECS_ENABLED,
+ HCI_LE_SIMULTANEOUS_ROLES,
+ HCI_CMD_DRAIN_WORKQUEUE,
+
+ HCI_MESH_EXPERIMENTAL,
+ HCI_MESH,
+ HCI_MESH_SENDING,
+
+ __HCI_NUM_FLAGS,
+};
+
+/* HCI timeouts */
+#define HCI_DISCONN_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_PAIRING_TIMEOUT msecs_to_jiffies(60000) /* 60 seconds */
+#define HCI_INIT_TIMEOUT msecs_to_jiffies(10000) /* 10 seconds */
+#define HCI_CMD_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_NCMD_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
+#define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */
+#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_POWER_OFF_TIMEOUT msecs_to_jiffies(5000) /* 5 seconds */
+#define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
+#define HCI_LE_AUTOCONN_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
+
+/* HCI data types */
+#define HCI_COMMAND_PKT 0x01
+#define HCI_ACLDATA_PKT 0x02
+#define HCI_SCODATA_PKT 0x03
+#define HCI_EVENT_PKT 0x04
+#define HCI_ISODATA_PKT 0x05
+#define HCI_DIAG_PKT 0xf0
+#define HCI_VENDOR_PKT 0xff
+
+/* HCI packet types */
+#define HCI_DM1 0x0008
+#define HCI_DM3 0x0400
+#define HCI_DM5 0x4000
+#define HCI_DH1 0x0010
+#define HCI_DH3 0x0800
+#define HCI_DH5 0x8000
+
+/* HCI packet types inverted masks */
+#define HCI_2DH1 0x0002
+#define HCI_3DH1 0x0004
+#define HCI_2DH3 0x0100
+#define HCI_3DH3 0x0200
+#define HCI_2DH5 0x1000
+#define HCI_3DH5 0x2000
+
+#define HCI_HV1 0x0020
+#define HCI_HV2 0x0040
+#define HCI_HV3 0x0080
+
+#define SCO_PTYPE_MASK (HCI_HV1 | HCI_HV2 | HCI_HV3)
+#define ACL_PTYPE_MASK (~SCO_PTYPE_MASK)
+
+/* eSCO packet types */
+#define ESCO_HV1 0x0001
+#define ESCO_HV2 0x0002
+#define ESCO_HV3 0x0004
+#define ESCO_EV3 0x0008
+#define ESCO_EV4 0x0010
+#define ESCO_EV5 0x0020
+#define ESCO_2EV3 0x0040
+#define ESCO_3EV3 0x0080
+#define ESCO_2EV5 0x0100
+#define ESCO_3EV5 0x0200
+
+#define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
+#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
+
+/* ACL flags */
+#define ACL_START_NO_FLUSH 0x00
+#define ACL_CONT 0x01
+#define ACL_START 0x02
+#define ACL_COMPLETE 0x03
+#define ACL_ACTIVE_BCAST 0x04
+#define ACL_PICO_BCAST 0x08
+
+/* ISO PB flags */
+#define ISO_START 0x00
+#define ISO_CONT 0x01
+#define ISO_SINGLE 0x02
+#define ISO_END 0x03
+
+/* ISO TS flags */
+#define ISO_TS 0x01
+
+/* Baseband links */
+#define SCO_LINK 0x00
+#define ACL_LINK 0x01
+#define ESCO_LINK 0x02
+/* Low Energy links do not have defined link type. Use invented one */
+#define LE_LINK 0x80
+#define AMP_LINK 0x81
+#define ISO_LINK 0x82
+#define INVALID_LINK 0xff
+
+/* LMP features */
+#define LMP_3SLOT 0x01
+#define LMP_5SLOT 0x02
+#define LMP_ENCRYPT 0x04
+#define LMP_SOFFSET 0x08
+#define LMP_TACCURACY 0x10
+#define LMP_RSWITCH 0x20
+#define LMP_HOLD 0x40
+#define LMP_SNIFF 0x80
+
+#define LMP_PARK 0x01
+#define LMP_RSSI 0x02
+#define LMP_QUALITY 0x04
+#define LMP_SCO 0x08
+#define LMP_HV2 0x10
+#define LMP_HV3 0x20
+#define LMP_ULAW 0x40
+#define LMP_ALAW 0x80
+
+#define LMP_CVSD 0x01
+#define LMP_PSCHEME 0x02
+#define LMP_PCONTROL 0x04
+#define LMP_TRANSPARENT 0x08
+
+#define LMP_EDR_2M 0x02
+#define LMP_EDR_3M 0x04
+#define LMP_RSSI_INQ 0x40
+#define LMP_ESCO 0x80
+
+#define LMP_EV4 0x01
+#define LMP_EV5 0x02
+#define LMP_NO_BREDR 0x20
+#define LMP_LE 0x40
+#define LMP_EDR_3SLOT 0x80
+
+#define LMP_EDR_5SLOT 0x01
+#define LMP_SNIFF_SUBR 0x02
+#define LMP_PAUSE_ENC 0x04
+#define LMP_EDR_ESCO_2M 0x20
+#define LMP_EDR_ESCO_3M 0x40
+#define LMP_EDR_3S_ESCO 0x80
+
+#define LMP_EXT_INQ 0x01
+#define LMP_SIMUL_LE_BR 0x02
+#define LMP_SIMPLE_PAIR 0x08
+#define LMP_ERR_DATA_REPORTING 0x20
+#define LMP_NO_FLUSH 0x40
+
+#define LMP_LSTO 0x01
+#define LMP_INQ_TX_PWR 0x02
+#define LMP_EXTFEATURES 0x80
+
+/* Extended LMP features */
+#define LMP_CPB_CENTRAL 0x01
+#define LMP_CPB_PERIPHERAL 0x02
+#define LMP_SYNC_TRAIN 0x04
+#define LMP_SYNC_SCAN 0x08
+
+#define LMP_SC 0x01
+#define LMP_PING 0x02
+
+/* Host features */
+#define LMP_HOST_SSP 0x01
+#define LMP_HOST_LE 0x02
+#define LMP_HOST_LE_BREDR 0x04
+#define LMP_HOST_SC 0x08
+
+/* LE features */
+#define HCI_LE_ENCRYPTION 0x01
+#define HCI_LE_CONN_PARAM_REQ_PROC 0x02
+#define HCI_LE_PERIPHERAL_FEATURES 0x08
+#define HCI_LE_PING 0x10
+#define HCI_LE_DATA_LEN_EXT 0x20
+#define HCI_LE_LL_PRIVACY 0x40
+#define HCI_LE_EXT_SCAN_POLICY 0x80
+#define HCI_LE_PHY_2M 0x01
+#define HCI_LE_PHY_CODED 0x08
+#define HCI_LE_EXT_ADV 0x10
+#define HCI_LE_PERIODIC_ADV 0x20
+#define HCI_LE_CHAN_SEL_ALG2 0x40
+#define HCI_LE_CIS_CENTRAL 0x10
+#define HCI_LE_CIS_PERIPHERAL 0x20
+#define HCI_LE_ISO_BROADCASTER 0x40
+
+/* Connection modes */
+#define HCI_CM_ACTIVE 0x0000
+#define HCI_CM_HOLD 0x0001
+#define HCI_CM_SNIFF 0x0002
+#define HCI_CM_PARK 0x0003
+
+/* Link policies */
+#define HCI_LP_RSWITCH 0x0001
+#define HCI_LP_HOLD 0x0002
+#define HCI_LP_SNIFF 0x0004
+#define HCI_LP_PARK 0x0008
+
+/* Link modes */
+#define HCI_LM_ACCEPT 0x8000
+#define HCI_LM_MASTER 0x0001
+#define HCI_LM_AUTH 0x0002
+#define HCI_LM_ENCRYPT 0x0004
+#define HCI_LM_TRUSTED 0x0008
+#define HCI_LM_RELIABLE 0x0010
+#define HCI_LM_SECURE 0x0020
+#define HCI_LM_FIPS 0x0040
+
+/* Authentication types */
+#define HCI_AT_NO_BONDING 0x00
+#define HCI_AT_NO_BONDING_MITM 0x01
+#define HCI_AT_DEDICATED_BONDING 0x02
+#define HCI_AT_DEDICATED_BONDING_MITM 0x03
+#define HCI_AT_GENERAL_BONDING 0x04
+#define HCI_AT_GENERAL_BONDING_MITM 0x05
+
+/* I/O capabilities */
+#define HCI_IO_DISPLAY_ONLY 0x00
+#define HCI_IO_DISPLAY_YESNO 0x01
+#define HCI_IO_KEYBOARD_ONLY 0x02
+#define HCI_IO_NO_INPUT_OUTPUT 0x03
+
+/* Link Key types */
+#define HCI_LK_COMBINATION 0x00
+#define HCI_LK_LOCAL_UNIT 0x01
+#define HCI_LK_REMOTE_UNIT 0x02
+#define HCI_LK_DEBUG_COMBINATION 0x03
+#define HCI_LK_UNAUTH_COMBINATION_P192 0x04
+#define HCI_LK_AUTH_COMBINATION_P192 0x05
+#define HCI_LK_CHANGED_COMBINATION 0x06
+#define HCI_LK_UNAUTH_COMBINATION_P256 0x07
+#define HCI_LK_AUTH_COMBINATION_P256 0x08
+
+/* ---- HCI Error Codes ---- */
+#define HCI_ERROR_UNKNOWN_CONN_ID 0x02
+#define HCI_ERROR_AUTH_FAILURE 0x05
+#define HCI_ERROR_PIN_OR_KEY_MISSING 0x06
+#define HCI_ERROR_MEMORY_EXCEEDED 0x07
+#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
+#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
+#define HCI_ERROR_REJ_BAD_ADDR 0x0f
+#define HCI_ERROR_INVALID_PARAMETERS 0x12
+#define HCI_ERROR_REMOTE_USER_TERM 0x13
+#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
+#define HCI_ERROR_REMOTE_POWER_OFF 0x15
+#define HCI_ERROR_LOCAL_HOST_TERM 0x16
+#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
+#define HCI_ERROR_INVALID_LL_PARAMS 0x1e
+#define HCI_ERROR_UNSPECIFIED 0x1f
+#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
+#define HCI_ERROR_CANCELLED_BY_HOST 0x44
+
+/* Flow control modes */
+#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00
+#define HCI_FLOW_CTL_MODE_BLOCK_BASED 0x01
+
+/* The core spec defines 127 as the "not available" value */
+#define HCI_TX_POWER_INVALID 127
+#define HCI_RSSI_INVALID 127
+
+#define HCI_ROLE_MASTER 0x00
+#define HCI_ROLE_SLAVE 0x01
+
+/* Extended Inquiry Response field types */
+#define EIR_FLAGS 0x01 /* flags */
+#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
+#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
+#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
+#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
+#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
+#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
+#define EIR_NAME_SHORT 0x08 /* shortened local name */
+#define EIR_NAME_COMPLETE 0x09 /* complete local name */
+#define EIR_TX_POWER 0x0A /* transmit power level */
+#define EIR_CLASS_OF_DEV 0x0D /* Class of Device */
+#define EIR_SSP_HASH_C192 0x0E /* Simple Pairing Hash C-192 */
+#define EIR_SSP_RAND_R192 0x0F /* Simple Pairing Randomizer R-192 */
+#define EIR_DEVICE_ID 0x10 /* device ID */
+#define EIR_APPEARANCE 0x19 /* Device appearance */
+#define EIR_SERVICE_DATA 0x16 /* Service Data */
+#define EIR_LE_BDADDR 0x1B /* LE Bluetooth device address */
+#define EIR_LE_ROLE 0x1C /* LE role */
+#define EIR_SSP_HASH_C256 0x1D /* Simple Pairing Hash C-256 */
+#define EIR_SSP_RAND_R256 0x1E /* Simple Pairing Rand R-256 */
+#define EIR_LE_SC_CONFIRM 0x22 /* LE SC Confirmation Value */
+#define EIR_LE_SC_RANDOM 0x23 /* LE SC Random Value */
+
+/* Low Energy Advertising Flags */
+#define LE_AD_LIMITED 0x01 /* Limited Discoverable */
+#define LE_AD_GENERAL 0x02 /* General Discoverable */
+#define LE_AD_NO_BREDR 0x04 /* BR/EDR not supported */
+#define LE_AD_SIM_LE_BREDR_CTRL 0x08 /* Simultaneous LE & BR/EDR Controller */
+#define LE_AD_SIM_LE_BREDR_HOST 0x10 /* Simultaneous LE & BR/EDR Host */
+
+/* ----- HCI Commands ---- */
+#define HCI_OP_NOP 0x0000
+
+#define HCI_OP_INQUIRY 0x0401
+struct hci_cp_inquiry {
+ __u8 lap[3];
+ __u8 length;
+ __u8 num_rsp;
+} __packed;
+
+#define HCI_OP_INQUIRY_CANCEL 0x0402
+
+#define HCI_OP_PERIODIC_INQ 0x0403
+
+#define HCI_OP_EXIT_PERIODIC_INQ 0x0404
+
+#define HCI_OP_CREATE_CONN 0x0405
+struct hci_cp_create_conn {
+ bdaddr_t bdaddr;
+ __le16 pkt_type;
+ __u8 pscan_rep_mode;
+ __u8 pscan_mode;
+ __le16 clock_offset;
+ __u8 role_switch;
+} __packed;
+
+#define HCI_OP_DISCONNECT 0x0406
+struct hci_cp_disconnect {
+ __le16 handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_OP_ADD_SCO 0x0407
+struct hci_cp_add_sco {
+ __le16 handle;
+ __le16 pkt_type;
+} __packed;
+
+#define HCI_OP_CREATE_CONN_CANCEL 0x0408
+struct hci_cp_create_conn_cancel {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_ACCEPT_CONN_REQ 0x0409
+struct hci_cp_accept_conn_req {
+ bdaddr_t bdaddr;
+ __u8 role;
+} __packed;
+
+#define HCI_OP_REJECT_CONN_REQ 0x040a
+struct hci_cp_reject_conn_req {
+ bdaddr_t bdaddr;
+ __u8 reason;
+} __packed;
+
+#define HCI_OP_LINK_KEY_REPLY 0x040b
+struct hci_cp_link_key_reply {
+ bdaddr_t bdaddr;
+ __u8 link_key[HCI_LINK_KEY_SIZE];
+} __packed;
+
+#define HCI_OP_LINK_KEY_NEG_REPLY 0x040c
+struct hci_cp_link_key_neg_reply {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_PIN_CODE_REPLY 0x040d
+struct hci_cp_pin_code_reply {
+ bdaddr_t bdaddr;
+ __u8 pin_len;
+ __u8 pin_code[16];
+} __packed;
+struct hci_rp_pin_code_reply {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_PIN_CODE_NEG_REPLY 0x040e
+struct hci_cp_pin_code_neg_reply {
+ bdaddr_t bdaddr;
+} __packed;
+struct hci_rp_pin_code_neg_reply {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_CHANGE_CONN_PTYPE 0x040f
+struct hci_cp_change_conn_ptype {
+ __le16 handle;
+ __le16 pkt_type;
+} __packed;
+
+#define HCI_OP_AUTH_REQUESTED 0x0411
+struct hci_cp_auth_requested {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_SET_CONN_ENCRYPT 0x0413
+struct hci_cp_set_conn_encrypt {
+ __le16 handle;
+ __u8 encrypt;
+} __packed;
+
+#define HCI_OP_CHANGE_CONN_LINK_KEY 0x0415
+struct hci_cp_change_conn_link_key {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_REMOTE_NAME_REQ 0x0419
+struct hci_cp_remote_name_req {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+ __u8 pscan_mode;
+ __le16 clock_offset;
+} __packed;
+
+#define HCI_OP_REMOTE_NAME_REQ_CANCEL 0x041a
+struct hci_cp_remote_name_req_cancel {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_READ_REMOTE_FEATURES 0x041b
+struct hci_cp_read_remote_features {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_READ_REMOTE_EXT_FEATURES 0x041c
+struct hci_cp_read_remote_ext_features {
+ __le16 handle;
+ __u8 page;
+} __packed;
+
+#define HCI_OP_READ_REMOTE_VERSION 0x041d
+struct hci_cp_read_remote_version {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_READ_CLOCK_OFFSET 0x041f
+struct hci_cp_read_clock_offset {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_SETUP_SYNC_CONN 0x0428
+struct hci_cp_setup_sync_conn {
+ __le16 handle;
+ __le32 tx_bandwidth;
+ __le32 rx_bandwidth;
+ __le16 max_latency;
+ __le16 voice_setting;
+ __u8 retrans_effort;
+ __le16 pkt_type;
+} __packed;
+
+#define HCI_OP_ACCEPT_SYNC_CONN_REQ 0x0429
+struct hci_cp_accept_sync_conn_req {
+ bdaddr_t bdaddr;
+ __le32 tx_bandwidth;
+ __le32 rx_bandwidth;
+ __le16 max_latency;
+ __le16 content_format;
+ __u8 retrans_effort;
+ __le16 pkt_type;
+} __packed;
+
+#define HCI_OP_REJECT_SYNC_CONN_REQ 0x042a
+struct hci_cp_reject_sync_conn_req {
+ bdaddr_t bdaddr;
+ __u8 reason;
+} __packed;
+
+#define HCI_OP_IO_CAPABILITY_REPLY 0x042b
+struct hci_cp_io_capability_reply {
+ bdaddr_t bdaddr;
+ __u8 capability;
+ __u8 oob_data;
+ __u8 authentication;
+} __packed;
+
+#define HCI_OP_USER_CONFIRM_REPLY 0x042c
+struct hci_cp_user_confirm_reply {
+ bdaddr_t bdaddr;
+} __packed;
+struct hci_rp_user_confirm_reply {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_USER_CONFIRM_NEG_REPLY 0x042d
+
+#define HCI_OP_USER_PASSKEY_REPLY 0x042e
+struct hci_cp_user_passkey_reply {
+ bdaddr_t bdaddr;
+ __le32 passkey;
+} __packed;
+
+#define HCI_OP_USER_PASSKEY_NEG_REPLY 0x042f
+
+#define HCI_OP_REMOTE_OOB_DATA_REPLY 0x0430
+struct hci_cp_remote_oob_data_reply {
+ bdaddr_t bdaddr;
+ __u8 hash[16];
+ __u8 rand[16];
+} __packed;
+
+#define HCI_OP_REMOTE_OOB_DATA_NEG_REPLY 0x0433
+struct hci_cp_remote_oob_data_neg_reply {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_IO_CAPABILITY_NEG_REPLY 0x0434
+struct hci_cp_io_capability_neg_reply {
+ bdaddr_t bdaddr;
+ __u8 reason;
+} __packed;
+
+#define HCI_OP_CREATE_PHY_LINK 0x0435
+struct hci_cp_create_phy_link {
+ __u8 phy_handle;
+ __u8 key_len;
+ __u8 key_type;
+ __u8 key[HCI_AMP_LINK_KEY_SIZE];
+} __packed;
+
+#define HCI_OP_ACCEPT_PHY_LINK 0x0436
+struct hci_cp_accept_phy_link {
+ __u8 phy_handle;
+ __u8 key_len;
+ __u8 key_type;
+ __u8 key[HCI_AMP_LINK_KEY_SIZE];
+} __packed;
+
+#define HCI_OP_DISCONN_PHY_LINK 0x0437
+struct hci_cp_disconn_phy_link {
+ __u8 phy_handle;
+ __u8 reason;
+} __packed;
+
+struct ext_flow_spec {
+ __u8 id;
+ __u8 stype;
+ __le16 msdu;
+ __le32 sdu_itime;
+ __le32 acc_lat;
+ __le32 flush_to;
+} __packed;
+
+#define HCI_OP_CREATE_LOGICAL_LINK 0x0438
+#define HCI_OP_ACCEPT_LOGICAL_LINK 0x0439
+struct hci_cp_create_accept_logical_link {
+ __u8 phy_handle;
+ struct ext_flow_spec tx_flow_spec;
+ struct ext_flow_spec rx_flow_spec;
+} __packed;
+
+#define HCI_OP_DISCONN_LOGICAL_LINK 0x043a
+struct hci_cp_disconn_logical_link {
+ __le16 log_handle;
+} __packed;
+
+#define HCI_OP_LOGICAL_LINK_CANCEL 0x043b
+struct hci_cp_logical_link_cancel {
+ __u8 phy_handle;
+ __u8 flow_spec_id;
+} __packed;
+
+#define HCI_OP_ENHANCED_SETUP_SYNC_CONN 0x043d
+struct hci_coding_format {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+} __packed;
+
+struct hci_cp_enhanced_setup_sync_conn {
+ __le16 handle;
+ __le32 tx_bandwidth;
+ __le32 rx_bandwidth;
+ struct hci_coding_format tx_coding_format;
+ struct hci_coding_format rx_coding_format;
+ __le16 tx_codec_frame_size;
+ __le16 rx_codec_frame_size;
+ __le32 in_bandwidth;
+ __le32 out_bandwidth;
+ struct hci_coding_format in_coding_format;
+ struct hci_coding_format out_coding_format;
+ __le16 in_coded_data_size;
+ __le16 out_coded_data_size;
+ __u8 in_pcm_data_format;
+ __u8 out_pcm_data_format;
+ __u8 in_pcm_sample_payload_msb_pos;
+ __u8 out_pcm_sample_payload_msb_pos;
+ __u8 in_data_path;
+ __u8 out_data_path;
+ __u8 in_transport_unit_size;
+ __u8 out_transport_unit_size;
+ __le16 max_latency;
+ __le16 pkt_type;
+ __u8 retrans_effort;
+} __packed;
+
+struct hci_rp_logical_link_cancel {
+ __u8 status;
+ __u8 phy_handle;
+ __u8 flow_spec_id;
+} __packed;
+
+#define HCI_OP_SET_CPB 0x0441
+struct hci_cp_set_cpb {
+ __u8 enable;
+ __u8 lt_addr;
+ __u8 lpo_allowed;
+ __le16 packet_type;
+ __le16 interval_min;
+ __le16 interval_max;
+ __le16 cpb_sv_tout;
+} __packed;
+struct hci_rp_set_cpb {
+ __u8 status;
+ __u8 lt_addr;
+ __le16 interval;
+} __packed;
+
+#define HCI_OP_START_SYNC_TRAIN 0x0443
+
+#define HCI_OP_REMOTE_OOB_EXT_DATA_REPLY 0x0445
+struct hci_cp_remote_oob_ext_data_reply {
+ bdaddr_t bdaddr;
+ __u8 hash192[16];
+ __u8 rand192[16];
+ __u8 hash256[16];
+ __u8 rand256[16];
+} __packed;
+
+#define HCI_OP_SNIFF_MODE 0x0803
+struct hci_cp_sniff_mode {
+ __le16 handle;
+ __le16 max_interval;
+ __le16 min_interval;
+ __le16 attempt;
+ __le16 timeout;
+} __packed;
+
+#define HCI_OP_EXIT_SNIFF_MODE 0x0804
+struct hci_cp_exit_sniff_mode {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_ROLE_DISCOVERY 0x0809
+struct hci_cp_role_discovery {
+ __le16 handle;
+} __packed;
+struct hci_rp_role_discovery {
+ __u8 status;
+ __le16 handle;
+ __u8 role;
+} __packed;
+
+#define HCI_OP_SWITCH_ROLE 0x080b
+struct hci_cp_switch_role {
+ bdaddr_t bdaddr;
+ __u8 role;
+} __packed;
+
+#define HCI_OP_READ_LINK_POLICY 0x080c
+struct hci_cp_read_link_policy {
+ __le16 handle;
+} __packed;
+struct hci_rp_read_link_policy {
+ __u8 status;
+ __le16 handle;
+ __le16 policy;
+} __packed;
+
+#define HCI_OP_WRITE_LINK_POLICY 0x080d
+struct hci_cp_write_link_policy {
+ __le16 handle;
+ __le16 policy;
+} __packed;
+struct hci_rp_write_link_policy {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_READ_DEF_LINK_POLICY 0x080e
+struct hci_rp_read_def_link_policy {
+ __u8 status;
+ __le16 policy;
+} __packed;
+
+#define HCI_OP_WRITE_DEF_LINK_POLICY 0x080f
+struct hci_cp_write_def_link_policy {
+ __le16 policy;
+} __packed;
+
+#define HCI_OP_SNIFF_SUBRATE 0x0811
+struct hci_cp_sniff_subrate {
+ __le16 handle;
+ __le16 max_latency;
+ __le16 min_remote_timeout;
+ __le16 min_local_timeout;
+} __packed;
+
+#define HCI_OP_SET_EVENT_MASK 0x0c01
+
+#define HCI_OP_RESET 0x0c03
+
+#define HCI_OP_SET_EVENT_FLT 0x0c05
+#define HCI_SET_EVENT_FLT_SIZE 9
+struct hci_cp_set_event_filter {
+ __u8 flt_type;
+ __u8 cond_type;
+ struct {
+ bdaddr_t bdaddr;
+ __u8 auto_accept;
+ } __packed addr_conn_flt;
+} __packed;
+
+/* Filter types */
+#define HCI_FLT_CLEAR_ALL 0x00
+#define HCI_FLT_INQ_RESULT 0x01
+#define HCI_FLT_CONN_SETUP 0x02
+
+/* CONN_SETUP Condition types */
+#define HCI_CONN_SETUP_ALLOW_ALL 0x00
+#define HCI_CONN_SETUP_ALLOW_CLASS 0x01
+#define HCI_CONN_SETUP_ALLOW_BDADDR 0x02
+
+/* CONN_SETUP Conditions */
+#define HCI_CONN_SETUP_AUTO_OFF 0x01
+#define HCI_CONN_SETUP_AUTO_ON 0x02
+#define HCI_CONN_SETUP_AUTO_ON_WITH_RS 0x03
+
+#define HCI_OP_READ_STORED_LINK_KEY 0x0c0d
+struct hci_cp_read_stored_link_key {
+ bdaddr_t bdaddr;
+ __u8 read_all;
+} __packed;
+struct hci_rp_read_stored_link_key {
+ __u8 status;
+ __le16 max_keys;
+ __le16 num_keys;
+} __packed;
+
+#define HCI_OP_DELETE_STORED_LINK_KEY 0x0c12
+struct hci_cp_delete_stored_link_key {
+ bdaddr_t bdaddr;
+ __u8 delete_all;
+} __packed;
+struct hci_rp_delete_stored_link_key {
+ __u8 status;
+ __le16 num_keys;
+} __packed;
+
+#define HCI_MAX_NAME_LENGTH 248
+
+#define HCI_OP_WRITE_LOCAL_NAME 0x0c13
+struct hci_cp_write_local_name {
+ __u8 name[HCI_MAX_NAME_LENGTH];
+} __packed;
+
+#define HCI_OP_READ_LOCAL_NAME 0x0c14
+struct hci_rp_read_local_name {
+ __u8 status;
+ __u8 name[HCI_MAX_NAME_LENGTH];
+} __packed;
+
+#define HCI_OP_WRITE_CA_TIMEOUT 0x0c16
+
+#define HCI_OP_WRITE_PG_TIMEOUT 0x0c18
+
+#define HCI_OP_WRITE_SCAN_ENABLE 0x0c1a
+ #define SCAN_DISABLED 0x00
+ #define SCAN_INQUIRY 0x01
+ #define SCAN_PAGE 0x02
+
+#define HCI_OP_READ_AUTH_ENABLE 0x0c1f
+
+#define HCI_OP_WRITE_AUTH_ENABLE 0x0c20
+ #define AUTH_DISABLED 0x00
+ #define AUTH_ENABLED 0x01
+
+#define HCI_OP_READ_ENCRYPT_MODE 0x0c21
+
+#define HCI_OP_WRITE_ENCRYPT_MODE 0x0c22
+ #define ENCRYPT_DISABLED 0x00
+ #define ENCRYPT_P2P 0x01
+ #define ENCRYPT_BOTH 0x02
+
+#define HCI_OP_READ_CLASS_OF_DEV 0x0c23
+struct hci_rp_read_class_of_dev {
+ __u8 status;
+ __u8 dev_class[3];
+} __packed;
+
+#define HCI_OP_WRITE_CLASS_OF_DEV 0x0c24
+struct hci_cp_write_class_of_dev {
+ __u8 dev_class[3];
+} __packed;
+
+#define HCI_OP_READ_VOICE_SETTING 0x0c25
+struct hci_rp_read_voice_setting {
+ __u8 status;
+ __le16 voice_setting;
+} __packed;
+
+#define HCI_OP_WRITE_VOICE_SETTING 0x0c26
+struct hci_cp_write_voice_setting {
+ __le16 voice_setting;
+} __packed;
+
+#define HCI_OP_HOST_BUFFER_SIZE 0x0c33
+struct hci_cp_host_buffer_size {
+ __le16 acl_mtu;
+ __u8 sco_mtu;
+ __le16 acl_max_pkt;
+ __le16 sco_max_pkt;
+} __packed;
+
+#define HCI_OP_READ_NUM_SUPPORTED_IAC 0x0c38
+struct hci_rp_read_num_supported_iac {
+ __u8 status;
+ __u8 num_iac;
+} __packed;
+
+#define HCI_OP_READ_CURRENT_IAC_LAP 0x0c39
+
+#define HCI_OP_WRITE_CURRENT_IAC_LAP 0x0c3a
+struct hci_cp_write_current_iac_lap {
+ __u8 num_iac;
+ __u8 iac_lap[6];
+} __packed;
+
+#define HCI_OP_WRITE_INQUIRY_MODE 0x0c45
+
+#define HCI_MAX_EIR_LENGTH 240
+
+#define HCI_OP_WRITE_EIR 0x0c52
+struct hci_cp_write_eir {
+ __u8 fec;
+ __u8 data[HCI_MAX_EIR_LENGTH];
+} __packed;
+
+#define HCI_OP_READ_SSP_MODE 0x0c55
+struct hci_rp_read_ssp_mode {
+ __u8 status;
+ __u8 mode;
+} __packed;
+
+#define HCI_OP_WRITE_SSP_MODE 0x0c56
+struct hci_cp_write_ssp_mode {
+ __u8 mode;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_OOB_DATA 0x0c57
+struct hci_rp_read_local_oob_data {
+ __u8 status;
+ __u8 hash[16];
+ __u8 rand[16];
+} __packed;
+
+#define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58
+struct hci_rp_read_inq_rsp_tx_power {
+ __u8 status;
+ __s8 tx_power;
+} __packed;
+
+#define HCI_OP_READ_DEF_ERR_DATA_REPORTING 0x0c5a
+ #define ERR_DATA_REPORTING_DISABLED 0x00
+ #define ERR_DATA_REPORTING_ENABLED 0x01
+struct hci_rp_read_def_err_data_reporting {
+ __u8 status;
+ __u8 err_data_reporting;
+} __packed;
+
+#define HCI_OP_WRITE_DEF_ERR_DATA_REPORTING 0x0c5b
+struct hci_cp_write_def_err_data_reporting {
+ __u8 err_data_reporting;
+} __packed;
+
+#define HCI_OP_SET_EVENT_MASK_PAGE_2 0x0c63
+
+#define HCI_OP_READ_LOCATION_DATA 0x0c64
+
+#define HCI_OP_READ_FLOW_CONTROL_MODE 0x0c66
+struct hci_rp_read_flow_control_mode {
+ __u8 status;
+ __u8 mode;
+} __packed;
+
+#define HCI_OP_WRITE_LE_HOST_SUPPORTED 0x0c6d
+struct hci_cp_write_le_host_supported {
+ __u8 le;
+ __u8 simul;
+} __packed;
+
+#define HCI_OP_SET_RESERVED_LT_ADDR 0x0c74
+struct hci_cp_set_reserved_lt_addr {
+ __u8 lt_addr;
+} __packed;
+struct hci_rp_set_reserved_lt_addr {
+ __u8 status;
+ __u8 lt_addr;
+} __packed;
+
+#define HCI_OP_DELETE_RESERVED_LT_ADDR 0x0c75
+struct hci_cp_delete_reserved_lt_addr {
+ __u8 lt_addr;
+} __packed;
+struct hci_rp_delete_reserved_lt_addr {
+ __u8 status;
+ __u8 lt_addr;
+} __packed;
+
+#define HCI_OP_SET_CPB_DATA 0x0c76
+struct hci_cp_set_cpb_data {
+ __u8 lt_addr;
+ __u8 fragment;
+ __u8 data_length;
+ __u8 data[HCI_MAX_CPB_DATA_SIZE];
+} __packed;
+struct hci_rp_set_cpb_data {
+ __u8 status;
+ __u8 lt_addr;
+} __packed;
+
+#define HCI_OP_READ_SYNC_TRAIN_PARAMS 0x0c77
+
+#define HCI_OP_WRITE_SYNC_TRAIN_PARAMS 0x0c78
+struct hci_cp_write_sync_train_params {
+ __le16 interval_min;
+ __le16 interval_max;
+ __le32 sync_train_tout;
+ __u8 service_data;
+} __packed;
+struct hci_rp_write_sync_train_params {
+ __u8 status;
+ __le16 sync_train_int;
+} __packed;
+
+#define HCI_OP_READ_SC_SUPPORT 0x0c79
+struct hci_rp_read_sc_support {
+ __u8 status;
+ __u8 support;
+} __packed;
+
+#define HCI_OP_WRITE_SC_SUPPORT 0x0c7a
+struct hci_cp_write_sc_support {
+ __u8 support;
+} __packed;
+
+#define HCI_OP_READ_AUTH_PAYLOAD_TO 0x0c7b
+struct hci_cp_read_auth_payload_to {
+ __le16 handle;
+} __packed;
+struct hci_rp_read_auth_payload_to {
+ __u8 status;
+ __le16 handle;
+ __le16 timeout;
+} __packed;
+
+#define HCI_OP_WRITE_AUTH_PAYLOAD_TO 0x0c7c
+struct hci_cp_write_auth_payload_to {
+ __le16 handle;
+ __le16 timeout;
+} __packed;
+struct hci_rp_write_auth_payload_to {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_OOB_EXT_DATA 0x0c7d
+struct hci_rp_read_local_oob_ext_data {
+ __u8 status;
+ __u8 hash192[16];
+ __u8 rand192[16];
+ __u8 hash256[16];
+ __u8 rand256[16];
+} __packed;
+
+#define HCI_CONFIGURE_DATA_PATH 0x0c83
+struct hci_op_configure_data_path {
+ __u8 direction;
+ __u8 data_path_id;
+ __u8 vnd_len;
+ __u8 vnd_data[];
+} __packed;
+
+#define HCI_OP_READ_LOCAL_VERSION 0x1001
+struct hci_rp_read_local_version {
+ __u8 status;
+ __u8 hci_ver;
+ __le16 hci_rev;
+ __u8 lmp_ver;
+ __le16 manufacturer;
+ __le16 lmp_subver;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_COMMANDS 0x1002
+struct hci_rp_read_local_commands {
+ __u8 status;
+ __u8 commands[64];
+} __packed;
+
+#define HCI_OP_READ_LOCAL_FEATURES 0x1003
+struct hci_rp_read_local_features {
+ __u8 status;
+ __u8 features[8];
+} __packed;
+
+#define HCI_OP_READ_LOCAL_EXT_FEATURES 0x1004
+struct hci_cp_read_local_ext_features {
+ __u8 page;
+} __packed;
+struct hci_rp_read_local_ext_features {
+ __u8 status;
+ __u8 page;
+ __u8 max_page;
+ __u8 features[8];
+} __packed;
+
+#define HCI_OP_READ_BUFFER_SIZE 0x1005
+struct hci_rp_read_buffer_size {
+ __u8 status;
+ __le16 acl_mtu;
+ __u8 sco_mtu;
+ __le16 acl_max_pkt;
+ __le16 sco_max_pkt;
+} __packed;
+
+#define HCI_OP_READ_BD_ADDR 0x1009
+struct hci_rp_read_bd_addr {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_READ_DATA_BLOCK_SIZE 0x100a
+struct hci_rp_read_data_block_size {
+ __u8 status;
+ __le16 max_acl_len;
+ __le16 block_len;
+ __le16 num_blocks;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_CODECS 0x100b
+struct hci_std_codecs {
+ __u8 num;
+ __u8 codec[];
+} __packed;
+
+struct hci_vnd_codec {
+ /* company id */
+ __le16 cid;
+ /* vendor codec id */
+ __le16 vid;
+} __packed;
+
+struct hci_vnd_codecs {
+ __u8 num;
+ struct hci_vnd_codec codec[];
+} __packed;
+
+struct hci_rp_read_local_supported_codecs {
+ __u8 status;
+ struct hci_std_codecs std_codecs;
+ struct hci_vnd_codecs vnd_codecs;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_PAIRING_OPTS 0x100c
+struct hci_rp_read_local_pairing_opts {
+ __u8 status;
+ __u8 pairing_opts;
+ __u8 max_key_size;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_CODECS_V2 0x100d
+struct hci_std_codec_v2 {
+ __u8 id;
+ __u8 transport;
+} __packed;
+
+struct hci_std_codecs_v2 {
+ __u8 num;
+ struct hci_std_codec_v2 codec[];
+} __packed;
+
+struct hci_vnd_codec_v2 {
+ __le16 cid;
+ __le16 vid;
+ __u8 transport;
+} __packed;
+
+struct hci_vnd_codecs_v2 {
+ __u8 num;
+ struct hci_vnd_codec_v2 codec[];
+} __packed;
+
+struct hci_rp_read_local_supported_codecs_v2 {
+ __u8 status;
+ struct hci_std_codecs_v2 std_codecs;
+ struct hci_vnd_codecs_v2 vendor_codecs;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_CODEC_CAPS 0x100e
+struct hci_op_read_local_codec_caps {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+ __u8 transport;
+ __u8 direction;
+} __packed;
+
+struct hci_codec_caps {
+ __u8 len;
+ __u8 data[];
+} __packed;
+
+struct hci_rp_read_local_codec_caps {
+ __u8 status;
+ __u8 num_caps;
+} __packed;
+
+#define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b
+struct hci_rp_read_page_scan_activity {
+ __u8 status;
+ __le16 interval;
+ __le16 window;
+} __packed;
+
+#define HCI_OP_WRITE_PAGE_SCAN_ACTIVITY 0x0c1c
+struct hci_cp_write_page_scan_activity {
+ __le16 interval;
+ __le16 window;
+} __packed;
+
+#define HCI_OP_READ_TX_POWER 0x0c2d
+struct hci_cp_read_tx_power {
+ __le16 handle;
+ __u8 type;
+} __packed;
+struct hci_rp_read_tx_power {
+ __u8 status;
+ __le16 handle;
+ __s8 tx_power;
+} __packed;
+
+#define HCI_OP_READ_PAGE_SCAN_TYPE 0x0c46
+struct hci_rp_read_page_scan_type {
+ __u8 status;
+ __u8 type;
+} __packed;
+
+#define HCI_OP_WRITE_PAGE_SCAN_TYPE 0x0c47
+ #define PAGE_SCAN_TYPE_STANDARD 0x00
+ #define PAGE_SCAN_TYPE_INTERLACED 0x01
+
+#define HCI_OP_READ_RSSI 0x1405
+struct hci_cp_read_rssi {
+ __le16 handle;
+} __packed;
+struct hci_rp_read_rssi {
+ __u8 status;
+ __le16 handle;
+ __s8 rssi;
+} __packed;
+
+#define HCI_OP_READ_CLOCK 0x1407
+struct hci_cp_read_clock {
+ __le16 handle;
+ __u8 which;
+} __packed;
+struct hci_rp_read_clock {
+ __u8 status;
+ __le16 handle;
+ __le32 clock;
+ __le16 accuracy;
+} __packed;
+
+#define HCI_OP_READ_ENC_KEY_SIZE 0x1408
+struct hci_cp_read_enc_key_size {
+ __le16 handle;
+} __packed;
+struct hci_rp_read_enc_key_size {
+ __u8 status;
+ __le16 handle;
+ __u8 key_size;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409
+struct hci_rp_read_local_amp_info {
+ __u8 status;
+ __u8 amp_status;
+ __le32 total_bw;
+ __le32 max_bw;
+ __le32 min_latency;
+ __le32 max_pdu;
+ __u8 amp_type;
+ __le16 pal_cap;
+ __le16 max_assoc_size;
+ __le32 max_flush_to;
+ __le32 be_flush_to;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_AMP_ASSOC 0x140a
+struct hci_cp_read_local_amp_assoc {
+ __u8 phy_handle;
+ __le16 len_so_far;
+ __le16 max_len;
+} __packed;
+struct hci_rp_read_local_amp_assoc {
+ __u8 status;
+ __u8 phy_handle;
+ __le16 rem_len;
+ __u8 frag[];
+} __packed;
+
+#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b
+struct hci_cp_write_remote_amp_assoc {
+ __u8 phy_handle;
+ __le16 len_so_far;
+ __le16 rem_len;
+ __u8 frag[];
+} __packed;
+struct hci_rp_write_remote_amp_assoc {
+ __u8 status;
+ __u8 phy_handle;
+} __packed;
+
+#define HCI_OP_GET_MWS_TRANSPORT_CONFIG 0x140c
+
+#define HCI_OP_ENABLE_DUT_MODE 0x1803
+
+#define HCI_OP_WRITE_SSP_DEBUG_MODE 0x1804
+
+#define HCI_OP_LE_SET_EVENT_MASK 0x2001
+struct hci_cp_le_set_event_mask {
+ __u8 mask[8];
+} __packed;
+
+#define HCI_OP_LE_READ_BUFFER_SIZE 0x2002
+struct hci_rp_le_read_buffer_size {
+ __u8 status;
+ __le16 le_mtu;
+ __u8 le_max_pkt;
+} __packed;
+
+#define HCI_OP_LE_READ_LOCAL_FEATURES 0x2003
+struct hci_rp_le_read_local_features {
+ __u8 status;
+ __u8 features[8];
+} __packed;
+
+#define HCI_OP_LE_SET_RANDOM_ADDR 0x2005
+
+#define HCI_OP_LE_SET_ADV_PARAM 0x2006
+struct hci_cp_le_set_adv_param {
+ __le16 min_interval;
+ __le16 max_interval;
+ __u8 type;
+ __u8 own_address_type;
+ __u8 direct_addr_type;
+ bdaddr_t direct_addr;
+ __u8 channel_map;
+ __u8 filter_policy;
+} __packed;
+
+#define HCI_OP_LE_READ_ADV_TX_POWER 0x2007
+struct hci_rp_le_read_adv_tx_power {
+ __u8 status;
+ __s8 tx_power;
+} __packed;
+
+#define HCI_MAX_AD_LENGTH 31
+
+#define HCI_OP_LE_SET_ADV_DATA 0x2008
+struct hci_cp_le_set_adv_data {
+ __u8 length;
+ __u8 data[HCI_MAX_AD_LENGTH];
+} __packed;
+
+#define HCI_OP_LE_SET_SCAN_RSP_DATA 0x2009
+struct hci_cp_le_set_scan_rsp_data {
+ __u8 length;
+ __u8 data[HCI_MAX_AD_LENGTH];
+} __packed;
+
+#define HCI_OP_LE_SET_ADV_ENABLE 0x200a
+
+#define LE_SCAN_PASSIVE 0x00
+#define LE_SCAN_ACTIVE 0x01
+
+#define HCI_OP_LE_SET_SCAN_PARAM 0x200b
+struct hci_cp_le_set_scan_param {
+ __u8 type;
+ __le16 interval;
+ __le16 window;
+ __u8 own_address_type;
+ __u8 filter_policy;
+} __packed;
+
+#define LE_SCAN_DISABLE 0x00
+#define LE_SCAN_ENABLE 0x01
+#define LE_SCAN_FILTER_DUP_DISABLE 0x00
+#define LE_SCAN_FILTER_DUP_ENABLE 0x01
+
+#define HCI_OP_LE_SET_SCAN_ENABLE 0x200c
+struct hci_cp_le_set_scan_enable {
+ __u8 enable;
+ __u8 filter_dup;
+} __packed;
+
+#define HCI_LE_USE_PEER_ADDR 0x00
+#define HCI_LE_USE_ACCEPT_LIST 0x01
+
+#define HCI_OP_LE_CREATE_CONN 0x200d
+struct hci_cp_le_create_conn {
+ __le16 scan_interval;
+ __le16 scan_window;
+ __u8 filter_policy;
+ __u8 peer_addr_type;
+ bdaddr_t peer_addr;
+ __u8 own_address_type;
+ __le16 conn_interval_min;
+ __le16 conn_interval_max;
+ __le16 conn_latency;
+ __le16 supervision_timeout;
+ __le16 min_ce_len;
+ __le16 max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_CREATE_CONN_CANCEL 0x200e
+
+#define HCI_OP_LE_READ_ACCEPT_LIST_SIZE 0x200f
+struct hci_rp_le_read_accept_list_size {
+ __u8 status;
+ __u8 size;
+} __packed;
+
+#define HCI_OP_LE_CLEAR_ACCEPT_LIST 0x2010
+
+#define HCI_OP_LE_ADD_TO_ACCEPT_LIST 0x2011
+struct hci_cp_le_add_to_accept_list {
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_LE_DEL_FROM_ACCEPT_LIST 0x2012
+struct hci_cp_le_del_from_accept_list {
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_LE_CONN_UPDATE 0x2013
+struct hci_cp_le_conn_update {
+ __le16 handle;
+ __le16 conn_interval_min;
+ __le16 conn_interval_max;
+ __le16 conn_latency;
+ __le16 supervision_timeout;
+ __le16 min_ce_len;
+ __le16 max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_READ_REMOTE_FEATURES 0x2016
+struct hci_cp_le_read_remote_features {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_START_ENC 0x2019
+struct hci_cp_le_start_enc {
+ __le16 handle;
+ __le64 rand;
+ __le16 ediv;
+ __u8 ltk[16];
+} __packed;
+
+#define HCI_OP_LE_LTK_REPLY 0x201a
+struct hci_cp_le_ltk_reply {
+ __le16 handle;
+ __u8 ltk[16];
+} __packed;
+struct hci_rp_le_ltk_reply {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_LTK_NEG_REPLY 0x201b
+struct hci_cp_le_ltk_neg_reply {
+ __le16 handle;
+} __packed;
+struct hci_rp_le_ltk_neg_reply {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_READ_SUPPORTED_STATES 0x201c
+struct hci_rp_le_read_supported_states {
+ __u8 status;
+ __u8 le_states[8];
+} __packed;
+
+#define HCI_OP_LE_CONN_PARAM_REQ_REPLY 0x2020
+struct hci_cp_le_conn_param_req_reply {
+ __le16 handle;
+ __le16 interval_min;
+ __le16 interval_max;
+ __le16 latency;
+ __le16 timeout;
+ __le16 min_ce_len;
+ __le16 max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY 0x2021
+struct hci_cp_le_conn_param_req_neg_reply {
+ __le16 handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_OP_LE_SET_DATA_LEN 0x2022
+struct hci_cp_le_set_data_len {
+ __le16 handle;
+ __le16 tx_len;
+ __le16 tx_time;
+} __packed;
+struct hci_rp_le_set_data_len {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_READ_DEF_DATA_LEN 0x2023
+struct hci_rp_le_read_def_data_len {
+ __u8 status;
+ __le16 tx_len;
+ __le16 tx_time;
+} __packed;
+
+#define HCI_OP_LE_WRITE_DEF_DATA_LEN 0x2024
+struct hci_cp_le_write_def_data_len {
+ __le16 tx_len;
+ __le16 tx_time;
+} __packed;
+
+#define HCI_OP_LE_ADD_TO_RESOLV_LIST 0x2027
+struct hci_cp_le_add_to_resolv_list {
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 peer_irk[16];
+ __u8 local_irk[16];
+} __packed;
+
+#define HCI_OP_LE_DEL_FROM_RESOLV_LIST 0x2028
+struct hci_cp_le_del_from_resolv_list {
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_LE_CLEAR_RESOLV_LIST 0x2029
+
+#define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a
+struct hci_rp_le_read_resolv_list_size {
+ __u8 status;
+ __u8 size;
+} __packed;
+
+#define HCI_OP_LE_SET_ADDR_RESOLV_ENABLE 0x202d
+
+#define HCI_OP_LE_SET_RPA_TIMEOUT 0x202e
+
+#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f
+struct hci_rp_le_read_max_data_len {
+ __u8 status;
+ __le16 tx_len;
+ __le16 tx_time;
+ __le16 rx_len;
+ __le16 rx_time;
+} __packed;
+
+#define HCI_OP_LE_SET_DEFAULT_PHY 0x2031
+struct hci_cp_le_set_default_phy {
+ __u8 all_phys;
+ __u8 tx_phys;
+ __u8 rx_phys;
+} __packed;
+
+#define HCI_LE_SET_PHY_1M 0x01
+#define HCI_LE_SET_PHY_2M 0x02
+#define HCI_LE_SET_PHY_CODED 0x04
+
+#define HCI_OP_LE_SET_EXT_SCAN_PARAMS 0x2041
+struct hci_cp_le_set_ext_scan_params {
+ __u8 own_addr_type;
+ __u8 filter_policy;
+ __u8 scanning_phys;
+ __u8 data[];
+} __packed;
+
+#define LE_SCAN_PHY_1M 0x01
+#define LE_SCAN_PHY_2M 0x02
+#define LE_SCAN_PHY_CODED 0x04
+
+struct hci_cp_le_scan_phy_params {
+ __u8 type;
+ __le16 interval;
+ __le16 window;
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_SCAN_ENABLE 0x2042
+struct hci_cp_le_set_ext_scan_enable {
+ __u8 enable;
+ __u8 filter_dup;
+ __le16 duration;
+ __le16 period;
+} __packed;
+
+#define HCI_OP_LE_EXT_CREATE_CONN 0x2043
+struct hci_cp_le_ext_create_conn {
+ __u8 filter_policy;
+ __u8 own_addr_type;
+ __u8 peer_addr_type;
+ bdaddr_t peer_addr;
+ __u8 phys;
+ __u8 data[];
+} __packed;
+
+struct hci_cp_le_ext_conn_param {
+ __le16 scan_interval;
+ __le16 scan_window;
+ __le16 conn_interval_min;
+ __le16 conn_interval_max;
+ __le16 conn_latency;
+ __le16 supervision_timeout;
+ __le16 min_ce_len;
+ __le16 max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_PA_CREATE_SYNC 0x2044
+struct hci_cp_le_pa_create_sync {
+ __u8 options;
+ __u8 sid;
+ __u8 addr_type;
+ bdaddr_t addr;
+ __le16 skip;
+ __le16 sync_timeout;
+ __u8 sync_cte_type;
+} __packed;
+
+#define HCI_OP_LE_PA_TERM_SYNC 0x2046
+struct hci_cp_le_pa_term_sync {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS 0x203b
+struct hci_rp_le_read_num_supported_adv_sets {
+ __u8 status;
+ __u8 num_of_sets;
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_ADV_PARAMS 0x2036
+struct hci_cp_le_set_ext_adv_params {
+ __u8 handle;
+ __le16 evt_properties;
+ __u8 min_interval[3];
+ __u8 max_interval[3];
+ __u8 channel_map;
+ __u8 own_addr_type;
+ __u8 peer_addr_type;
+ bdaddr_t peer_addr;
+ __u8 filter_policy;
+ __u8 tx_power;
+ __u8 primary_phy;
+ __u8 secondary_max_skip;
+ __u8 secondary_phy;
+ __u8 sid;
+ __u8 notif_enable;
+} __packed;
+
+#define HCI_ADV_PHY_1M 0X01
+#define HCI_ADV_PHY_2M 0x02
+#define HCI_ADV_PHY_CODED 0x03
+
+struct hci_rp_le_set_ext_adv_params {
+ __u8 status;
+ __u8 tx_power;
+} __packed;
+
+struct hci_cp_ext_adv_set {
+ __u8 handle;
+ __le16 duration;
+ __u8 max_events;
+} __packed;
+
+#define HCI_MAX_EXT_AD_LENGTH 251
+
+#define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037
+struct hci_cp_le_set_ext_adv_data {
+ __u8 handle;
+ __u8 operation;
+ __u8 frag_pref;
+ __u8 length;
+ __u8 data[];
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038
+struct hci_cp_le_set_ext_scan_rsp_data {
+ __u8 handle;
+ __u8 operation;
+ __u8 frag_pref;
+ __u8 length;
+ __u8 data[];
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039
+struct hci_cp_le_set_ext_adv_enable {
+ __u8 enable;
+ __u8 num_of_sets;
+ __u8 data[];
+} __packed;
+
+#define HCI_OP_LE_SET_PER_ADV_PARAMS 0x203e
+struct hci_cp_le_set_per_adv_params {
+ __u8 handle;
+ __le16 min_interval;
+ __le16 max_interval;
+ __le16 periodic_properties;
+} __packed;
+
+#define HCI_MAX_PER_AD_LENGTH 252
+
+#define HCI_OP_LE_SET_PER_ADV_DATA 0x203f
+struct hci_cp_le_set_per_adv_data {
+ __u8 handle;
+ __u8 operation;
+ __u8 length;
+ __u8 data[];
+} __packed;
+
+#define HCI_OP_LE_SET_PER_ADV_ENABLE 0x2040
+struct hci_cp_le_set_per_adv_enable {
+ __u8 enable;
+ __u8 handle;
+} __packed;
+
+#define LE_SET_ADV_DATA_OP_COMPLETE 0x03
+
+#define LE_SET_ADV_DATA_NO_FRAG 0x01
+
+#define HCI_OP_LE_REMOVE_ADV_SET 0x203c
+
+#define HCI_OP_LE_CLEAR_ADV_SETS 0x203d
+
+#define HCI_OP_LE_SET_ADV_SET_RAND_ADDR 0x2035
+struct hci_cp_le_set_adv_set_rand_addr {
+ __u8 handle;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_LE_READ_TRANSMIT_POWER 0x204b
+struct hci_rp_le_read_transmit_power {
+ __u8 status;
+ __s8 min_le_tx_power;
+ __s8 max_le_tx_power;
+} __packed;
+
+#define HCI_NETWORK_PRIVACY 0x00
+#define HCI_DEVICE_PRIVACY 0x01
+
+#define HCI_OP_LE_SET_PRIVACY_MODE 0x204e
+struct hci_cp_le_set_privacy_mode {
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 mode;
+} __packed;
+
+#define HCI_OP_LE_READ_BUFFER_SIZE_V2 0x2060
+struct hci_rp_le_read_buffer_size_v2 {
+ __u8 status;
+ __le16 acl_mtu;
+ __u8 acl_max_pkt;
+ __le16 iso_mtu;
+ __u8 iso_max_pkt;
+} __packed;
+
+#define HCI_OP_LE_READ_ISO_TX_SYNC 0x2061
+struct hci_cp_le_read_iso_tx_sync {
+ __le16 handle;
+} __packed;
+
+struct hci_rp_le_read_iso_tx_sync {
+ __u8 status;
+ __le16 handle;
+ __le16 seq;
+ __le32 imestamp;
+ __u8 offset[3];
+} __packed;
+
+#define HCI_OP_LE_SET_CIG_PARAMS 0x2062
+struct hci_cis_params {
+ __u8 cis_id;
+ __le16 c_sdu;
+ __le16 p_sdu;
+ __u8 c_phy;
+ __u8 p_phy;
+ __u8 c_rtn;
+ __u8 p_rtn;
+} __packed;
+
+struct hci_cp_le_set_cig_params {
+ __u8 cig_id;
+ __u8 c_interval[3];
+ __u8 p_interval[3];
+ __u8 sca;
+ __u8 packing;
+ __u8 framing;
+ __le16 c_latency;
+ __le16 p_latency;
+ __u8 num_cis;
+ struct hci_cis_params cis[];
+} __packed;
+
+struct hci_rp_le_set_cig_params {
+ __u8 status;
+ __u8 cig_id;
+ __u8 num_handles;
+ __le16 handle[];
+} __packed;
+
+#define HCI_OP_LE_CREATE_CIS 0x2064
+struct hci_cis {
+ __le16 cis_handle;
+ __le16 acl_handle;
+} __packed;
+
+struct hci_cp_le_create_cis {
+ __u8 num_cis;
+ struct hci_cis cis[];
+} __packed;
+
+#define HCI_OP_LE_REMOVE_CIG 0x2065
+struct hci_cp_le_remove_cig {
+ __u8 cig_id;
+} __packed;
+
+#define HCI_OP_LE_ACCEPT_CIS 0x2066
+struct hci_cp_le_accept_cis {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_REJECT_CIS 0x2067
+struct hci_cp_le_reject_cis {
+ __le16 handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_OP_LE_CREATE_BIG 0x2068
+struct hci_bis {
+ __u8 sdu_interval[3];
+ __le16 sdu;
+ __le16 latency;
+ __u8 rtn;
+ __u8 phy;
+ __u8 packing;
+ __u8 framing;
+ __u8 encryption;
+ __u8 bcode[16];
+} __packed;
+
+struct hci_cp_le_create_big {
+ __u8 handle;
+ __u8 adv_handle;
+ __u8 num_bis;
+ struct hci_bis bis;
+} __packed;
+
+#define HCI_OP_LE_TERM_BIG 0x206a
+struct hci_cp_le_term_big {
+ __u8 handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_OP_LE_BIG_CREATE_SYNC 0x206b
+struct hci_cp_le_big_create_sync {
+ __u8 handle;
+ __le16 sync_handle;
+ __u8 encryption;
+ __u8 bcode[16];
+ __u8 mse;
+ __le16 timeout;
+ __u8 num_bis;
+ __u8 bis[0];
+} __packed;
+
+#define HCI_OP_LE_BIG_TERM_SYNC 0x206c
+struct hci_cp_le_big_term_sync {
+ __u8 handle;
+} __packed;
+
+#define HCI_OP_LE_SETUP_ISO_PATH 0x206e
+struct hci_cp_le_setup_iso_path {
+ __le16 handle;
+ __u8 direction;
+ __u8 path;
+ __u8 codec;
+ __le16 codec_cid;
+ __le16 codec_vid;
+ __u8 delay[3];
+ __u8 codec_cfg_len;
+ __u8 codec_cfg[0];
+} __packed;
+
+struct hci_rp_le_setup_iso_path {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_SET_HOST_FEATURE 0x2074
+struct hci_cp_le_set_host_feature {
+ __u8 bit_number;
+ __u8 bit_value;
+} __packed;
+
+/* ---- HCI Events ---- */
+struct hci_ev_status {
+ __u8 status;
+} __packed;
+
+#define HCI_EV_INQUIRY_COMPLETE 0x01
+
+#define HCI_EV_INQUIRY_RESULT 0x02
+struct inquiry_info {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+ __u8 pscan_period_mode;
+ __u8 pscan_mode;
+ __u8 dev_class[3];
+ __le16 clock_offset;
+} __packed;
+
+struct hci_ev_inquiry_result {
+ __u8 num;
+ struct inquiry_info info[];
+};
+
+#define HCI_EV_CONN_COMPLETE 0x03
+struct hci_ev_conn_complete {
+ __u8 status;
+ __le16 handle;
+ bdaddr_t bdaddr;
+ __u8 link_type;
+ __u8 encr_mode;
+} __packed;
+
+#define HCI_EV_CONN_REQUEST 0x04
+struct hci_ev_conn_request {
+ bdaddr_t bdaddr;
+ __u8 dev_class[3];
+ __u8 link_type;
+} __packed;
+
+#define HCI_EV_DISCONN_COMPLETE 0x05
+struct hci_ev_disconn_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_EV_AUTH_COMPLETE 0x06
+struct hci_ev_auth_complete {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_EV_REMOTE_NAME 0x07
+struct hci_ev_remote_name {
+ __u8 status;
+ bdaddr_t bdaddr;
+ __u8 name[HCI_MAX_NAME_LENGTH];
+} __packed;
+
+#define HCI_EV_ENCRYPT_CHANGE 0x08
+struct hci_ev_encrypt_change {
+ __u8 status;
+ __le16 handle;
+ __u8 encrypt;
+} __packed;
+
+#define HCI_EV_CHANGE_LINK_KEY_COMPLETE 0x09
+struct hci_ev_change_link_key_complete {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_EV_REMOTE_FEATURES 0x0b
+struct hci_ev_remote_features {
+ __u8 status;
+ __le16 handle;
+ __u8 features[8];
+} __packed;
+
+#define HCI_EV_REMOTE_VERSION 0x0c
+struct hci_ev_remote_version {
+ __u8 status;
+ __le16 handle;
+ __u8 lmp_ver;
+ __le16 manufacturer;
+ __le16 lmp_subver;
+} __packed;
+
+#define HCI_EV_QOS_SETUP_COMPLETE 0x0d
+struct hci_qos {
+ __u8 service_type;
+ __u32 token_rate;
+ __u32 peak_bandwidth;
+ __u32 latency;
+ __u32 delay_variation;
+} __packed;
+struct hci_ev_qos_setup_complete {
+ __u8 status;
+ __le16 handle;
+ struct hci_qos qos;
+} __packed;
+
+#define HCI_EV_CMD_COMPLETE 0x0e
+struct hci_ev_cmd_complete {
+ __u8 ncmd;
+ __le16 opcode;
+} __packed;
+
+#define HCI_EV_CMD_STATUS 0x0f
+struct hci_ev_cmd_status {
+ __u8 status;
+ __u8 ncmd;
+ __le16 opcode;
+} __packed;
+
+#define HCI_EV_HARDWARE_ERROR 0x10
+struct hci_ev_hardware_error {
+ __u8 code;
+} __packed;
+
+#define HCI_EV_ROLE_CHANGE 0x12
+struct hci_ev_role_change {
+ __u8 status;
+ bdaddr_t bdaddr;
+ __u8 role;
+} __packed;
+
+#define HCI_EV_NUM_COMP_PKTS 0x13
+struct hci_comp_pkts_info {
+ __le16 handle;
+ __le16 count;
+} __packed;
+
+struct hci_ev_num_comp_pkts {
+ __u8 num;
+ struct hci_comp_pkts_info handles[];
+} __packed;
+
+#define HCI_EV_MODE_CHANGE 0x14
+struct hci_ev_mode_change {
+ __u8 status;
+ __le16 handle;
+ __u8 mode;
+ __le16 interval;
+} __packed;
+
+#define HCI_EV_PIN_CODE_REQ 0x16
+struct hci_ev_pin_code_req {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_EV_LINK_KEY_REQ 0x17
+struct hci_ev_link_key_req {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_EV_LINK_KEY_NOTIFY 0x18
+struct hci_ev_link_key_notify {
+ bdaddr_t bdaddr;
+ __u8 link_key[HCI_LINK_KEY_SIZE];
+ __u8 key_type;
+} __packed;
+
+#define HCI_EV_CLOCK_OFFSET 0x1c
+struct hci_ev_clock_offset {
+ __u8 status;
+ __le16 handle;
+ __le16 clock_offset;
+} __packed;
+
+#define HCI_EV_PKT_TYPE_CHANGE 0x1d
+struct hci_ev_pkt_type_change {
+ __u8 status;
+ __le16 handle;
+ __le16 pkt_type;
+} __packed;
+
+#define HCI_EV_PSCAN_REP_MODE 0x20
+struct hci_ev_pscan_rep_mode {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+} __packed;
+
+#define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22
+struct inquiry_info_rssi {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+ __u8 pscan_period_mode;
+ __u8 dev_class[3];
+ __le16 clock_offset;
+ __s8 rssi;
+} __packed;
+struct inquiry_info_rssi_pscan {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+ __u8 pscan_period_mode;
+ __u8 pscan_mode;
+ __u8 dev_class[3];
+ __le16 clock_offset;
+ __s8 rssi;
+} __packed;
+struct hci_ev_inquiry_result_rssi {
+ __u8 num;
+ __u8 data[];
+} __packed;
+
+#define HCI_EV_REMOTE_EXT_FEATURES 0x23
+struct hci_ev_remote_ext_features {
+ __u8 status;
+ __le16 handle;
+ __u8 page;
+ __u8 max_page;
+ __u8 features[8];
+} __packed;
+
+#define HCI_EV_SYNC_CONN_COMPLETE 0x2c
+struct hci_ev_sync_conn_complete {
+ __u8 status;
+ __le16 handle;
+ bdaddr_t bdaddr;
+ __u8 link_type;
+ __u8 tx_interval;
+ __u8 retrans_window;
+ __le16 rx_pkt_len;
+ __le16 tx_pkt_len;
+ __u8 air_mode;
+} __packed;
+
+#define HCI_EV_SYNC_CONN_CHANGED 0x2d
+struct hci_ev_sync_conn_changed {
+ __u8 status;
+ __le16 handle;
+ __u8 tx_interval;
+ __u8 retrans_window;
+ __le16 rx_pkt_len;
+ __le16 tx_pkt_len;
+} __packed;
+
+#define HCI_EV_SNIFF_SUBRATE 0x2e
+struct hci_ev_sniff_subrate {
+ __u8 status;
+ __le16 handle;
+ __le16 max_tx_latency;
+ __le16 max_rx_latency;
+ __le16 max_remote_timeout;
+ __le16 max_local_timeout;
+} __packed;
+
+#define HCI_EV_EXTENDED_INQUIRY_RESULT 0x2f
+struct extended_inquiry_info {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+ __u8 pscan_period_mode;
+ __u8 dev_class[3];
+ __le16 clock_offset;
+ __s8 rssi;
+ __u8 data[240];
+} __packed;
+
+struct hci_ev_ext_inquiry_result {
+ __u8 num;
+ struct extended_inquiry_info info[];
+} __packed;
+
+#define HCI_EV_KEY_REFRESH_COMPLETE 0x30
+struct hci_ev_key_refresh_complete {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_EV_IO_CAPA_REQUEST 0x31
+struct hci_ev_io_capa_request {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_EV_IO_CAPA_REPLY 0x32
+struct hci_ev_io_capa_reply {
+ bdaddr_t bdaddr;
+ __u8 capability;
+ __u8 oob_data;
+ __u8 authentication;
+} __packed;
+
+#define HCI_EV_USER_CONFIRM_REQUEST 0x33
+struct hci_ev_user_confirm_req {
+ bdaddr_t bdaddr;
+ __le32 passkey;
+} __packed;
+
+#define HCI_EV_USER_PASSKEY_REQUEST 0x34
+struct hci_ev_user_passkey_req {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_EV_REMOTE_OOB_DATA_REQUEST 0x35
+struct hci_ev_remote_oob_data_request {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_EV_SIMPLE_PAIR_COMPLETE 0x36
+struct hci_ev_simple_pair_complete {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_EV_USER_PASSKEY_NOTIFY 0x3b
+struct hci_ev_user_passkey_notify {
+ bdaddr_t bdaddr;
+ __le32 passkey;
+} __packed;
+
+#define HCI_KEYPRESS_STARTED 0
+#define HCI_KEYPRESS_ENTERED 1
+#define HCI_KEYPRESS_ERASED 2
+#define HCI_KEYPRESS_CLEARED 3
+#define HCI_KEYPRESS_COMPLETED 4
+
+#define HCI_EV_KEYPRESS_NOTIFY 0x3c
+struct hci_ev_keypress_notify {
+ bdaddr_t bdaddr;
+ __u8 type;
+} __packed;
+
+#define HCI_EV_REMOTE_HOST_FEATURES 0x3d
+struct hci_ev_remote_host_features {
+ bdaddr_t bdaddr;
+ __u8 features[8];
+} __packed;
+
+#define HCI_EV_LE_META 0x3e
+struct hci_ev_le_meta {
+ __u8 subevent;
+} __packed;
+
+#define HCI_EV_PHY_LINK_COMPLETE 0x40
+struct hci_ev_phy_link_complete {
+ __u8 status;
+ __u8 phy_handle;
+} __packed;
+
+#define HCI_EV_CHANNEL_SELECTED 0x41
+struct hci_ev_channel_selected {
+ __u8 phy_handle;
+} __packed;
+
+#define HCI_EV_DISCONN_PHY_LINK_COMPLETE 0x42
+struct hci_ev_disconn_phy_link_complete {
+ __u8 status;
+ __u8 phy_handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_EV_LOGICAL_LINK_COMPLETE 0x45
+struct hci_ev_logical_link_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 phy_handle;
+ __u8 flow_spec_id;
+} __packed;
+
+#define HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE 0x46
+struct hci_ev_disconn_logical_link_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_EV_NUM_COMP_BLOCKS 0x48
+struct hci_comp_blocks_info {
+ __le16 handle;
+ __le16 pkts;
+ __le16 blocks;
+} __packed;
+
+struct hci_ev_num_comp_blocks {
+ __le16 num_blocks;
+ __u8 num_hndl;
+ struct hci_comp_blocks_info handles[];
+} __packed;
+
+#define HCI_EV_SYNC_TRAIN_COMPLETE 0x4F
+struct hci_ev_sync_train_complete {
+ __u8 status;
+} __packed;
+
+#define HCI_EV_PERIPHERAL_PAGE_RESP_TIMEOUT 0x54
+
+#define HCI_EV_LE_CONN_COMPLETE 0x01
+struct hci_ev_le_conn_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 role;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __le16 interval;
+ __le16 latency;
+ __le16 supervision_timeout;
+ __u8 clk_accurancy;
+} __packed;
+
+/* Advertising report event types */
+#define LE_ADV_IND 0x00
+#define LE_ADV_DIRECT_IND 0x01
+#define LE_ADV_SCAN_IND 0x02
+#define LE_ADV_NONCONN_IND 0x03
+#define LE_ADV_SCAN_RSP 0x04
+#define LE_ADV_INVALID 0x05
+
+/* Legacy event types in extended adv report */
+#define LE_LEGACY_ADV_IND 0x0013
+#define LE_LEGACY_ADV_DIRECT_IND 0x0015
+#define LE_LEGACY_ADV_SCAN_IND 0x0012
+#define LE_LEGACY_NONCONN_IND 0x0010
+#define LE_LEGACY_SCAN_RSP_ADV 0x001b
+#define LE_LEGACY_SCAN_RSP_ADV_SCAN 0x001a
+
+/* Extended Advertising event types */
+#define LE_EXT_ADV_NON_CONN_IND 0x0000
+#define LE_EXT_ADV_CONN_IND 0x0001
+#define LE_EXT_ADV_SCAN_IND 0x0002
+#define LE_EXT_ADV_DIRECT_IND 0x0004
+#define LE_EXT_ADV_SCAN_RSP 0x0008
+#define LE_EXT_ADV_LEGACY_PDU 0x0010
+
+#define ADDR_LE_DEV_PUBLIC 0x00
+#define ADDR_LE_DEV_RANDOM 0x01
+#define ADDR_LE_DEV_PUBLIC_RESOLVED 0x02
+#define ADDR_LE_DEV_RANDOM_RESOLVED 0x03
+
+#define HCI_EV_LE_ADVERTISING_REPORT 0x02
+struct hci_ev_le_advertising_info {
+ __u8 type;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 length;
+ __u8 data[];
+} __packed;
+
+struct hci_ev_le_advertising_report {
+ __u8 num;
+ struct hci_ev_le_advertising_info info[];
+} __packed;
+
+#define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03
+struct hci_ev_le_conn_update_complete {
+ __u8 status;
+ __le16 handle;
+ __le16 interval;
+ __le16 latency;
+ __le16 supervision_timeout;
+} __packed;
+
+#define HCI_EV_LE_REMOTE_FEAT_COMPLETE 0x04
+struct hci_ev_le_remote_feat_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 features[8];
+} __packed;
+
+#define HCI_EV_LE_LTK_REQ 0x05
+struct hci_ev_le_ltk_req {
+ __le16 handle;
+ __le64 rand;
+ __le16 ediv;
+} __packed;
+
+#define HCI_EV_LE_REMOTE_CONN_PARAM_REQ 0x06
+struct hci_ev_le_remote_conn_param_req {
+ __le16 handle;
+ __le16 interval_min;
+ __le16 interval_max;
+ __le16 latency;
+ __le16 timeout;
+} __packed;
+
+#define HCI_EV_LE_DATA_LEN_CHANGE 0x07
+struct hci_ev_le_data_len_change {
+ __le16 handle;
+ __le16 tx_len;
+ __le16 tx_time;
+ __le16 rx_len;
+ __le16 rx_time;
+} __packed;
+
+#define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B
+struct hci_ev_le_direct_adv_info {
+ __u8 type;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 direct_addr_type;
+ bdaddr_t direct_addr;
+ __s8 rssi;
+} __packed;
+
+struct hci_ev_le_direct_adv_report {
+ __u8 num;
+ struct hci_ev_le_direct_adv_info info[];
+} __packed;
+
+#define HCI_EV_LE_PHY_UPDATE_COMPLETE 0x0c
+struct hci_ev_le_phy_update_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 tx_phy;
+ __u8 rx_phy;
+} __packed;
+
+#define HCI_EV_LE_EXT_ADV_REPORT 0x0d
+struct hci_ev_le_ext_adv_info {
+ __le16 type;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 primary_phy;
+ __u8 secondary_phy;
+ __u8 sid;
+ __u8 tx_power;
+ __s8 rssi;
+ __le16 interval;
+ __u8 direct_addr_type;
+ bdaddr_t direct_addr;
+ __u8 length;
+ __u8 data[];
+} __packed;
+
+struct hci_ev_le_ext_adv_report {
+ __u8 num;
+ struct hci_ev_le_ext_adv_info info[];
+} __packed;
+
+#define HCI_EV_LE_PA_SYNC_ESTABLISHED 0x0e
+struct hci_ev_le_pa_sync_established {
+ __u8 status;
+ __le16 handle;
+ __u8 sid;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 phy;
+ __le16 interval;
+ __u8 clock_accuracy;
+} __packed;
+
+#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a
+struct hci_ev_le_enh_conn_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 role;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ bdaddr_t local_rpa;
+ bdaddr_t peer_rpa;
+ __le16 interval;
+ __le16 latency;
+ __le16 supervision_timeout;
+ __u8 clk_accurancy;
+} __packed;
+
+#define HCI_EV_LE_EXT_ADV_SET_TERM 0x12
+struct hci_evt_le_ext_adv_set_term {
+ __u8 status;
+ __u8 handle;
+ __le16 conn_handle;
+ __u8 num_evts;
+} __packed;
+
+#define HCI_EVT_LE_CIS_ESTABLISHED 0x19
+struct hci_evt_le_cis_established {
+ __u8 status;
+ __le16 handle;
+ __u8 cig_sync_delay[3];
+ __u8 cis_sync_delay[3];
+ __u8 c_latency[3];
+ __u8 p_latency[3];
+ __u8 c_phy;
+ __u8 p_phy;
+ __u8 nse;
+ __u8 c_bn;
+ __u8 p_bn;
+ __u8 c_ft;
+ __u8 p_ft;
+ __le16 c_mtu;
+ __le16 p_mtu;
+ __le16 interval;
+} __packed;
+
+#define HCI_EVT_LE_CIS_REQ 0x1a
+struct hci_evt_le_cis_req {
+ __le16 acl_handle;
+ __le16 cis_handle;
+ __u8 cig_id;
+ __u8 cis_id;
+} __packed;
+
+#define HCI_EVT_LE_CREATE_BIG_COMPLETE 0x1b
+struct hci_evt_le_create_big_complete {
+ __u8 status;
+ __u8 handle;
+ __u8 sync_delay[3];
+ __u8 transport_delay[3];
+ __u8 phy;
+ __u8 nse;
+ __u8 bn;
+ __u8 pto;
+ __u8 irc;
+ __le16 max_pdu;
+ __le16 interval;
+ __u8 num_bis;
+ __le16 bis_handle[];
+} __packed;
+
+#define HCI_EVT_LE_BIG_SYNC_ESTABILISHED 0x1d
+struct hci_evt_le_big_sync_estabilished {
+ __u8 status;
+ __u8 handle;
+ __u8 latency[3];
+ __u8 nse;
+ __u8 bn;
+ __u8 pto;
+ __u8 irc;
+ __le16 max_pdu;
+ __le16 interval;
+ __u8 num_bis;
+ __le16 bis[];
+} __packed;
+
+#define HCI_EVT_LE_BIG_INFO_ADV_REPORT 0x22
+struct hci_evt_le_big_info_adv_report {
+ __le16 sync_handle;
+ __u8 num_bis;
+ __u8 nse;
+ __le16 iso_interval;
+ __u8 bn;
+ __u8 pto;
+ __u8 irc;
+ __le16 max_pdu;
+ __u8 sdu_interval[3];
+ __le16 max_sdu;
+ __u8 phy;
+ __u8 framing;
+ __u8 encryption;
+} __packed;
+
+#define HCI_EV_VENDOR 0xff
+
+/* Internal events generated by Bluetooth stack */
+#define HCI_EV_STACK_INTERNAL 0xfd
+struct hci_ev_stack_internal {
+ __u16 type;
+ __u8 data[];
+} __packed;
+
+#define HCI_EV_SI_DEVICE 0x01
+struct hci_ev_si_device {
+ __u16 event;
+ __u16 dev_id;
+} __packed;
+
+#define HCI_EV_SI_SECURITY 0x02
+struct hci_ev_si_security {
+ __u16 event;
+ __u16 proto;
+ __u16 subproto;
+ __u8 incoming;
+} __packed;
+
+/* ---- HCI Packet structures ---- */
+#define HCI_COMMAND_HDR_SIZE 3
+#define HCI_EVENT_HDR_SIZE 2
+#define HCI_ACL_HDR_SIZE 4
+#define HCI_SCO_HDR_SIZE 3
+#define HCI_ISO_HDR_SIZE 4
+
+struct hci_command_hdr {
+ __le16 opcode; /* OCF & OGF */
+ __u8 plen;
+} __packed;
+
+struct hci_event_hdr {
+ __u8 evt;
+ __u8 plen;
+} __packed;
+
+struct hci_acl_hdr {
+ __le16 handle; /* Handle & Flags(PB, BC) */
+ __le16 dlen;
+} __packed;
+
+struct hci_sco_hdr {
+ __le16 handle;
+ __u8 dlen;
+} __packed;
+
+struct hci_iso_hdr {
+ __le16 handle;
+ __le16 dlen;
+ __u8 data[];
+} __packed;
+
+/* ISO data packet status flags */
+#define HCI_ISO_STATUS_VALID 0x00
+#define HCI_ISO_STATUS_INVALID 0x01
+#define HCI_ISO_STATUS_NOP 0x02
+
+#define HCI_ISO_DATA_HDR_SIZE 4
+struct hci_iso_data_hdr {
+ __le16 sn;
+ __le16 slen;
+};
+
+#define HCI_ISO_TS_DATA_HDR_SIZE 8
+struct hci_iso_ts_data_hdr {
+ __le32 ts;
+ __le16 sn;
+ __le16 slen;
+};
+
+static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
+{
+ return (struct hci_event_hdr *) skb->data;
+}
+
+static inline struct hci_acl_hdr *hci_acl_hdr(const struct sk_buff *skb)
+{
+ return (struct hci_acl_hdr *) skb->data;
+}
+
+static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
+{
+ return (struct hci_sco_hdr *) skb->data;
+}
+
+/* Command opcode pack/unpack */
+#define hci_opcode_pack(ogf, ocf) ((__u16) ((ocf & 0x03ff)|(ogf << 10)))
+#define hci_opcode_ogf(op) (op >> 10)
+#define hci_opcode_ocf(op) (op & 0x03ff)
+
+/* ACL handle and flags pack/unpack */
+#define hci_handle_pack(h, f) ((__u16) ((h & 0x0fff)|(f << 12)))
+#define hci_handle(h) (h & 0x0fff)
+#define hci_flags(h) (h >> 12)
+
+/* ISO handle and flags pack/unpack */
+#define hci_iso_flags_pb(f) (f & 0x0003)
+#define hci_iso_flags_ts(f) ((f >> 2) & 0x0001)
+#define hci_iso_flags_pack(pb, ts) ((pb & 0x03) | ((ts & 0x01) << 2))
+
+/* ISO data length and flags pack/unpack */
+#define hci_iso_data_len_pack(h, f) ((__u16) ((h) | ((f) << 14)))
+#define hci_iso_data_len(h) ((h) & 0x3fff)
+#define hci_iso_data_flags(h) ((h) >> 14)
+
+/* codec transport types */
+#define HCI_TRANSPORT_SCO_ESCO 0x01
+
+/* le24 support */
+static inline void hci_cpu_to_le24(__u32 val, __u8 dst[3])
+{
+ dst[0] = val & 0xff;
+ dst[1] = (val & 0xff00) >> 8;
+ dst[2] = (val & 0xff0000) >> 16;
+}
+
+#endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
new file mode 100644
index 000000000..09c978f3d
--- /dev/null
+++ b/include/net/bluetooth/hci_core.h
@@ -0,0 +1,2124 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __HCI_CORE_H
+#define __HCI_CORE_H
+
+#include <linux/idr.h>
+#include <linux/leds.h>
+#include <linux/rculist.h>
+
+#include <net/bluetooth/hci.h>
+#include <net/bluetooth/hci_sync.h>
+#include <net/bluetooth/hci_sock.h>
+
+/* HCI priority */
+#define HCI_PRIO_MAX 7
+
+/* HCI maximum id value */
+#define HCI_MAX_ID 10000
+
+/* HCI Core structures */
+struct inquiry_data {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+ __u8 pscan_period_mode;
+ __u8 pscan_mode;
+ __u8 dev_class[3];
+ __le16 clock_offset;
+ __s8 rssi;
+ __u8 ssp_mode;
+};
+
+struct inquiry_entry {
+ struct list_head all; /* inq_cache.all */
+ struct list_head list; /* unknown or resolve */
+ enum {
+ NAME_NOT_KNOWN,
+ NAME_NEEDED,
+ NAME_PENDING,
+ NAME_KNOWN,
+ } name_state;
+ __u32 timestamp;
+ struct inquiry_data data;
+};
+
+struct discovery_state {
+ int type;
+ enum {
+ DISCOVERY_STOPPED,
+ DISCOVERY_STARTING,
+ DISCOVERY_FINDING,
+ DISCOVERY_RESOLVING,
+ DISCOVERY_STOPPING,
+ } state;
+ struct list_head all; /* All devices found during inquiry */
+ struct list_head unknown; /* Name state not known */
+ struct list_head resolve; /* Name needs to be resolved */
+ __u32 timestamp;
+ bdaddr_t last_adv_addr;
+ u8 last_adv_addr_type;
+ s8 last_adv_rssi;
+ u32 last_adv_flags;
+ u8 last_adv_data[HCI_MAX_AD_LENGTH];
+ u8 last_adv_data_len;
+ bool report_invalid_rssi;
+ bool result_filtering;
+ bool limited;
+ s8 rssi;
+ u16 uuid_count;
+ u8 (*uuids)[16];
+ unsigned long scan_start;
+ unsigned long scan_duration;
+ unsigned long name_resolve_timeout;
+};
+
+#define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+
+enum suspend_tasks {
+ SUSPEND_PAUSE_DISCOVERY,
+ SUSPEND_UNPAUSE_DISCOVERY,
+
+ SUSPEND_PAUSE_ADVERTISING,
+ SUSPEND_UNPAUSE_ADVERTISING,
+
+ SUSPEND_SCAN_DISABLE,
+ SUSPEND_SCAN_ENABLE,
+ SUSPEND_DISCONNECTING,
+
+ SUSPEND_POWERING_DOWN,
+
+ SUSPEND_PREPARE_NOTIFIER,
+
+ SUSPEND_SET_ADV_FILTER,
+ __SUSPEND_NUM_TASKS
+};
+
+enum suspended_state {
+ BT_RUNNING = 0,
+ BT_SUSPEND_DISCONNECT,
+ BT_SUSPEND_CONFIGURE_WAKE,
+};
+
+struct hci_conn_hash {
+ struct list_head list;
+ unsigned int acl_num;
+ unsigned int amp_num;
+ unsigned int sco_num;
+ unsigned int iso_num;
+ unsigned int le_num;
+ unsigned int le_num_peripheral;
+};
+
+struct bdaddr_list {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+};
+
+struct codec_list {
+ struct list_head list;
+ u8 id;
+ __u16 cid;
+ __u16 vid;
+ u8 transport;
+ u8 num_caps;
+ u32 len;
+ struct hci_codec_caps caps[];
+};
+
+struct bdaddr_list_with_irk {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 peer_irk[16];
+ u8 local_irk[16];
+};
+
+/* Bitmask of connection flags */
+enum hci_conn_flags {
+ HCI_CONN_FLAG_REMOTE_WAKEUP = 1,
+ HCI_CONN_FLAG_DEVICE_PRIVACY = 2,
+};
+typedef u8 hci_conn_flags_t;
+
+struct bdaddr_list_with_flags {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ hci_conn_flags_t flags;
+};
+
+struct bt_uuid {
+ struct list_head list;
+ u8 uuid[16];
+ u8 size;
+ u8 svc_hint;
+};
+
+struct blocked_key {
+ struct list_head list;
+ struct rcu_head rcu;
+ u8 type;
+ u8 val[16];
+};
+
+struct smp_csrk {
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 link_type;
+ u8 type;
+ u8 val[16];
+};
+
+struct smp_ltk {
+ struct list_head list;
+ struct rcu_head rcu;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 link_type;
+ u8 authenticated;
+ u8 type;
+ u8 enc_size;
+ __le16 ediv;
+ __le64 rand;
+ u8 val[16];
+};
+
+struct smp_irk {
+ struct list_head list;
+ struct rcu_head rcu;
+ bdaddr_t rpa;
+ bdaddr_t bdaddr;
+ u8 addr_type;
+ u8 link_type;
+ u8 val[16];
+};
+
+struct link_key {
+ struct list_head list;
+ struct rcu_head rcu;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 link_type;
+ u8 type;
+ u8 val[HCI_LINK_KEY_SIZE];
+ u8 pin_len;
+};
+
+struct oob_data {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 present;
+ u8 hash192[16];
+ u8 rand192[16];
+ u8 hash256[16];
+ u8 rand256[16];
+};
+
+struct adv_info {
+ struct list_head list;
+ bool enabled;
+ bool pending;
+ bool periodic;
+ __u8 mesh;
+ __u8 instance;
+ __u32 flags;
+ __u16 timeout;
+ __u16 remaining_time;
+ __u16 duration;
+ __u16 adv_data_len;
+ __u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
+ bool adv_data_changed;
+ __u16 scan_rsp_len;
+ __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
+ bool scan_rsp_changed;
+ __u16 per_adv_data_len;
+ __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH];
+ __s8 tx_power;
+ __u32 min_interval;
+ __u32 max_interval;
+ bdaddr_t random_addr;
+ bool rpa_expired;
+ struct delayed_work rpa_expired_cb;
+};
+
+#define HCI_MAX_ADV_INSTANCES 5
+#define HCI_DEFAULT_ADV_DURATION 2
+
+#define HCI_ADV_TX_POWER_NO_PREFERENCE 0x7F
+
+#define DATA_CMP(_d1, _l1, _d2, _l2) \
+ (_l1 == _l2 ? memcmp(_d1, _d2, _l1) : _l1 - _l2)
+
+#define ADV_DATA_CMP(_adv, _data, _len) \
+ DATA_CMP((_adv)->adv_data, (_adv)->adv_data_len, _data, _len)
+
+#define SCAN_RSP_CMP(_adv, _data, _len) \
+ DATA_CMP((_adv)->scan_rsp_data, (_adv)->scan_rsp_len, _data, _len)
+
+struct monitored_device {
+ struct list_head list;
+
+ bdaddr_t bdaddr;
+ __u8 addr_type;
+ __u16 handle;
+ bool notified;
+};
+
+struct adv_pattern {
+ struct list_head list;
+ __u8 ad_type;
+ __u8 offset;
+ __u8 length;
+ __u8 value[HCI_MAX_AD_LENGTH];
+};
+
+struct adv_rssi_thresholds {
+ __s8 low_threshold;
+ __s8 high_threshold;
+ __u16 low_threshold_timeout;
+ __u16 high_threshold_timeout;
+ __u8 sampling_period;
+};
+
+struct adv_monitor {
+ struct list_head patterns;
+ struct adv_rssi_thresholds rssi;
+ __u16 handle;
+
+ enum {
+ ADV_MONITOR_STATE_NOT_REGISTERED,
+ ADV_MONITOR_STATE_REGISTERED,
+ ADV_MONITOR_STATE_OFFLOADED
+ } state;
+};
+
+#define HCI_MIN_ADV_MONITOR_HANDLE 1
+#define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32
+#define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16
+#define HCI_ADV_MONITOR_EXT_NONE 1
+#define HCI_ADV_MONITOR_EXT_MSFT 2
+
+#define HCI_MAX_SHORT_NAME_LENGTH 10
+
+#define HCI_CONN_HANDLE_UNSET 0xffff
+#define HCI_CONN_HANDLE_MAX 0x0eff
+
+/* Min encryption key size to match with SMP */
+#define HCI_MIN_ENC_KEY_SIZE 7
+
+/* Default LE RPA expiry time, 15 minutes */
+#define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)
+
+/* Default min/max age of connection information (1s/3s) */
+#define DEFAULT_CONN_INFO_MIN_AGE 1000
+#define DEFAULT_CONN_INFO_MAX_AGE 3000
+/* Default authenticated payload timeout 30s */
+#define DEFAULT_AUTH_PAYLOAD_TIMEOUT 0x0bb8
+
+struct amp_assoc {
+ __u16 len;
+ __u16 offset;
+ __u16 rem_len;
+ __u16 len_so_far;
+ __u8 data[HCI_MAX_AMP_ASSOC_SIZE];
+};
+
+#define HCI_MAX_PAGES 3
+
+struct hci_dev {
+ struct list_head list;
+ struct mutex lock;
+
+ const char *name;
+ unsigned long flags;
+ __u16 id;
+ __u8 bus;
+ __u8 dev_type;
+ bdaddr_t bdaddr;
+ bdaddr_t setup_addr;
+ bdaddr_t public_addr;
+ bdaddr_t random_addr;
+ bdaddr_t static_addr;
+ __u8 adv_addr_type;
+ __u8 dev_name[HCI_MAX_NAME_LENGTH];
+ __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
+ __u8 eir[HCI_MAX_EIR_LENGTH];
+ __u16 appearance;
+ __u8 dev_class[3];
+ __u8 major_class;
+ __u8 minor_class;
+ __u8 max_page;
+ __u8 features[HCI_MAX_PAGES][8];
+ __u8 le_features[8];
+ __u8 le_accept_list_size;
+ __u8 le_resolv_list_size;
+ __u8 le_num_of_adv_sets;
+ __u8 le_states[8];
+ __u8 mesh_ad_types[16];
+ __u8 mesh_send_ref;
+ __u8 commands[64];
+ __u8 hci_ver;
+ __u16 hci_rev;
+ __u8 lmp_ver;
+ __u16 manufacturer;
+ __u16 lmp_subver;
+ __u16 voice_setting;
+ __u8 num_iac;
+ __u16 stored_max_keys;
+ __u16 stored_num_keys;
+ __u8 io_capability;
+ __s8 inq_tx_power;
+ __u8 err_data_reporting;
+ __u16 page_scan_interval;
+ __u16 page_scan_window;
+ __u8 page_scan_type;
+ __u8 le_adv_channel_map;
+ __u16 le_adv_min_interval;
+ __u16 le_adv_max_interval;
+ __u8 le_scan_type;
+ __u16 le_scan_interval;
+ __u16 le_scan_window;
+ __u16 le_scan_int_suspend;
+ __u16 le_scan_window_suspend;
+ __u16 le_scan_int_discovery;
+ __u16 le_scan_window_discovery;
+ __u16 le_scan_int_adv_monitor;
+ __u16 le_scan_window_adv_monitor;
+ __u16 le_scan_int_connect;
+ __u16 le_scan_window_connect;
+ __u16 le_conn_min_interval;
+ __u16 le_conn_max_interval;
+ __u16 le_conn_latency;
+ __u16 le_supv_timeout;
+ __u16 le_def_tx_len;
+ __u16 le_def_tx_time;
+ __u16 le_max_tx_len;
+ __u16 le_max_tx_time;
+ __u16 le_max_rx_len;
+ __u16 le_max_rx_time;
+ __u8 le_max_key_size;
+ __u8 le_min_key_size;
+ __u16 discov_interleaved_timeout;
+ __u16 conn_info_min_age;
+ __u16 conn_info_max_age;
+ __u16 auth_payload_timeout;
+ __u8 min_enc_key_size;
+ __u8 max_enc_key_size;
+ __u8 pairing_opts;
+ __u8 ssp_debug_mode;
+ __u8 hw_error_code;
+ __u32 clock;
+ __u16 advmon_allowlist_duration;
+ __u16 advmon_no_filter_duration;
+ __u8 enable_advmon_interleave_scan;
+
+ __u16 devid_source;
+ __u16 devid_vendor;
+ __u16 devid_product;
+ __u16 devid_version;
+
+ __u8 def_page_scan_type;
+ __u16 def_page_scan_int;
+ __u16 def_page_scan_window;
+ __u8 def_inq_scan_type;
+ __u16 def_inq_scan_int;
+ __u16 def_inq_scan_window;
+ __u16 def_br_lsto;
+ __u16 def_page_timeout;
+ __u16 def_multi_adv_rotation_duration;
+ __u16 def_le_autoconnect_timeout;
+ __s8 min_le_tx_power;
+ __s8 max_le_tx_power;
+
+ __u16 pkt_type;
+ __u16 esco_type;
+ __u16 link_policy;
+ __u16 link_mode;
+
+ __u32 idle_timeout;
+ __u16 sniff_min_interval;
+ __u16 sniff_max_interval;
+
+ __u8 amp_status;
+ __u32 amp_total_bw;
+ __u32 amp_max_bw;
+ __u32 amp_min_latency;
+ __u32 amp_max_pdu;
+ __u8 amp_type;
+ __u16 amp_pal_cap;
+ __u16 amp_assoc_size;
+ __u32 amp_max_flush_to;
+ __u32 amp_be_flush_to;
+
+ struct amp_assoc loc_assoc;
+
+ __u8 flow_ctl_mode;
+
+ unsigned int auto_accept_delay;
+
+ unsigned long quirks;
+
+ atomic_t cmd_cnt;
+ unsigned int acl_cnt;
+ unsigned int sco_cnt;
+ unsigned int le_cnt;
+ unsigned int iso_cnt;
+
+ unsigned int acl_mtu;
+ unsigned int sco_mtu;
+ unsigned int le_mtu;
+ unsigned int iso_mtu;
+ unsigned int acl_pkts;
+ unsigned int sco_pkts;
+ unsigned int le_pkts;
+ unsigned int iso_pkts;
+
+ __u16 block_len;
+ __u16 block_mtu;
+ __u16 num_blocks;
+ __u16 block_cnt;
+
+ unsigned long acl_last_tx;
+ unsigned long sco_last_tx;
+ unsigned long le_last_tx;
+
+ __u8 le_tx_def_phys;
+ __u8 le_rx_def_phys;
+
+ struct workqueue_struct *workqueue;
+ struct workqueue_struct *req_workqueue;
+
+ struct work_struct power_on;
+ struct delayed_work power_off;
+ struct work_struct error_reset;
+ struct work_struct cmd_sync_work;
+ struct list_head cmd_sync_work_list;
+ struct mutex cmd_sync_work_lock;
+ struct mutex unregister_lock;
+ struct work_struct cmd_sync_cancel_work;
+ struct work_struct reenable_adv_work;
+
+ __u16 discov_timeout;
+ struct delayed_work discov_off;
+
+ struct delayed_work service_cache;
+
+ struct delayed_work cmd_timer;
+ struct delayed_work ncmd_timer;
+
+ struct work_struct rx_work;
+ struct work_struct cmd_work;
+ struct work_struct tx_work;
+
+ struct delayed_work le_scan_disable;
+ struct delayed_work le_scan_restart;
+
+ struct sk_buff_head rx_q;
+ struct sk_buff_head raw_q;
+ struct sk_buff_head cmd_q;
+
+ struct sk_buff *sent_cmd;
+ struct sk_buff *recv_event;
+
+ struct mutex req_lock;
+ wait_queue_head_t req_wait_q;
+ __u32 req_status;
+ __u32 req_result;
+ struct sk_buff *req_skb;
+
+ void *smp_data;
+ void *smp_bredr_data;
+
+ struct discovery_state discovery;
+
+ int discovery_old_state;
+ bool discovery_paused;
+ int advertising_old_state;
+ bool advertising_paused;
+
+ struct notifier_block suspend_notifier;
+ enum suspended_state suspend_state_next;
+ enum suspended_state suspend_state;
+ bool scanning_paused;
+ bool suspended;
+ u8 wake_reason;
+ bdaddr_t wake_addr;
+ u8 wake_addr_type;
+
+ struct hci_conn_hash conn_hash;
+
+ struct list_head mesh_pending;
+ struct list_head mgmt_pending;
+ struct list_head reject_list;
+ struct list_head accept_list;
+ struct list_head uuids;
+ struct list_head link_keys;
+ struct list_head long_term_keys;
+ struct list_head identity_resolving_keys;
+ struct list_head remote_oob_data;
+ struct list_head le_accept_list;
+ struct list_head le_resolv_list;
+ struct list_head le_conn_params;
+ struct list_head pend_le_conns;
+ struct list_head pend_le_reports;
+ struct list_head blocked_keys;
+ struct list_head local_codecs;
+
+ struct hci_dev_stats stat;
+
+ atomic_t promisc;
+
+ const char *hw_info;
+ const char *fw_info;
+ struct dentry *debugfs;
+
+ struct device dev;
+
+ struct rfkill *rfkill;
+
+ DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
+ hci_conn_flags_t conn_flags;
+
+ __s8 adv_tx_power;
+ __u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
+ __u8 adv_data_len;
+ __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
+ __u8 scan_rsp_data_len;
+ __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH];
+ __u8 per_adv_data_len;
+
+ struct list_head adv_instances;
+ unsigned int adv_instance_cnt;
+ __u8 cur_adv_instance;
+ __u16 adv_instance_timeout;
+ struct delayed_work adv_instance_expire;
+
+ struct idr adv_monitors_idr;
+ unsigned int adv_monitors_cnt;
+
+ __u8 irk[16];
+ __u32 rpa_timeout;
+ struct delayed_work rpa_expired;
+ bdaddr_t rpa;
+
+ struct delayed_work mesh_send_done;
+
+ enum {
+ INTERLEAVE_SCAN_NONE,
+ INTERLEAVE_SCAN_NO_FILTER,
+ INTERLEAVE_SCAN_ALLOWLIST
+ } interleave_scan_state;
+
+ struct delayed_work interleave_scan;
+
+ struct list_head monitored_devices;
+ bool advmon_pend_notify;
+
+#if IS_ENABLED(CONFIG_BT_LEDS)
+ struct led_trigger *power_led;
+#endif
+
+#if IS_ENABLED(CONFIG_BT_MSFTEXT)
+ __u16 msft_opcode;
+ void *msft_data;
+ bool msft_curve_validity;
+#endif
+
+#if IS_ENABLED(CONFIG_BT_AOSPEXT)
+ bool aosp_capable;
+ bool aosp_quality_report;
+#endif
+
+ int (*open)(struct hci_dev *hdev);
+ int (*close)(struct hci_dev *hdev);
+ int (*flush)(struct hci_dev *hdev);
+ int (*setup)(struct hci_dev *hdev);
+ int (*shutdown)(struct hci_dev *hdev);
+ int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
+ void (*notify)(struct hci_dev *hdev, unsigned int evt);
+ void (*hw_error)(struct hci_dev *hdev, u8 code);
+ int (*post_init)(struct hci_dev *hdev);
+ int (*set_diag)(struct hci_dev *hdev, bool enable);
+ int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+ void (*cmd_timeout)(struct hci_dev *hdev);
+ bool (*wakeup)(struct hci_dev *hdev);
+ int (*set_quality_report)(struct hci_dev *hdev, bool enable);
+ int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path);
+ int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type,
+ struct bt_codec *codec, __u8 *vnd_len,
+ __u8 **vnd_data);
+};
+
+#define HCI_PHY_HANDLE(handle) (handle & 0xff)
+
+enum conn_reasons {
+ CONN_REASON_PAIR_DEVICE,
+ CONN_REASON_L2CAP_CHAN,
+ CONN_REASON_SCO_CONNECT,
+ CONN_REASON_ISO_CONNECT,
+};
+
+struct hci_conn {
+ struct list_head list;
+
+ atomic_t refcnt;
+
+ bdaddr_t dst;
+ __u8 dst_type;
+ bdaddr_t src;
+ __u8 src_type;
+ bdaddr_t init_addr;
+ __u8 init_addr_type;
+ bdaddr_t resp_addr;
+ __u8 resp_addr_type;
+ __u8 adv_instance;
+ __u16 handle;
+ __u16 sync_handle;
+ __u16 state;
+ __u8 mode;
+ __u8 type;
+ __u8 role;
+ bool out;
+ __u8 attempt;
+ __u8 dev_class[3];
+ __u8 features[HCI_MAX_PAGES][8];
+ __u16 pkt_type;
+ __u16 link_policy;
+ __u8 key_type;
+ __u8 auth_type;
+ __u8 sec_level;
+ __u8 pending_sec_level;
+ __u8 pin_length;
+ __u8 enc_key_size;
+ __u8 io_capability;
+ __u32 passkey_notify;
+ __u8 passkey_entered;
+ __u16 disc_timeout;
+ __u16 conn_timeout;
+ __u16 setting;
+ __u16 auth_payload_timeout;
+ __u16 le_conn_min_interval;
+ __u16 le_conn_max_interval;
+ __u16 le_conn_interval;
+ __u16 le_conn_latency;
+ __u16 le_supv_timeout;
+ __u8 le_adv_data[HCI_MAX_AD_LENGTH];
+ __u8 le_adv_data_len;
+ __u8 le_per_adv_data[HCI_MAX_PER_AD_LENGTH];
+ __u8 le_per_adv_data_len;
+ __u8 le_tx_phy;
+ __u8 le_rx_phy;
+ __s8 rssi;
+ __s8 tx_power;
+ __s8 max_tx_power;
+ struct bt_iso_qos iso_qos;
+ unsigned long flags;
+
+ enum conn_reasons conn_reason;
+
+ __u32 clock;
+ __u16 clock_accuracy;
+
+ unsigned long conn_info_timestamp;
+
+ __u8 remote_cap;
+ __u8 remote_auth;
+ __u8 remote_id;
+
+ unsigned int sent;
+
+ struct sk_buff_head data_q;
+ struct list_head chan_list;
+
+ struct delayed_work disc_work;
+ struct delayed_work auto_accept_work;
+ struct delayed_work idle_work;
+ struct delayed_work le_conn_timeout;
+ struct work_struct le_scan_cleanup;
+
+ struct device dev;
+ struct dentry *debugfs;
+
+ struct hci_dev *hdev;
+ void *l2cap_data;
+ void *sco_data;
+ void *iso_data;
+ struct amp_mgr *amp_mgr;
+
+ struct hci_conn *link;
+ struct bt_codec codec;
+
+ void (*connect_cfm_cb) (struct hci_conn *conn, u8 status);
+ void (*security_cfm_cb) (struct hci_conn *conn, u8 status);
+ void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason);
+
+ void (*cleanup)(struct hci_conn *conn);
+};
+
+struct hci_chan {
+ struct list_head list;
+ __u16 handle;
+ struct hci_conn *conn;
+ struct sk_buff_head data_q;
+ unsigned int sent;
+ __u8 state;
+ bool amp;
+};
+
+struct hci_conn_params {
+ struct list_head list;
+ struct list_head action;
+
+ bdaddr_t addr;
+ u8 addr_type;
+
+ u16 conn_min_interval;
+ u16 conn_max_interval;
+ u16 conn_latency;
+ u16 supervision_timeout;
+
+ enum {
+ HCI_AUTO_CONN_DISABLED,
+ HCI_AUTO_CONN_REPORT,
+ HCI_AUTO_CONN_DIRECT,
+ HCI_AUTO_CONN_ALWAYS,
+ HCI_AUTO_CONN_LINK_LOSS,
+ HCI_AUTO_CONN_EXPLICIT,
+ } auto_connect;
+
+ struct hci_conn *conn;
+ bool explicit_connect;
+ /* Accessed without hdev->lock: */
+ hci_conn_flags_t flags;
+ u8 privacy_mode;
+};
+
+extern struct list_head hci_dev_list;
+extern struct list_head hci_cb_list;
+extern rwlock_t hci_dev_list_lock;
+extern struct mutex hci_cb_list_lock;
+
+#define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags)
+#define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags)
+#define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags)
+
+#define hci_dev_clear_volatile_flags(hdev) \
+ do { \
+ hci_dev_clear_flag(hdev, HCI_LE_SCAN); \
+ hci_dev_clear_flag(hdev, HCI_LE_ADV); \
+ hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\
+ hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \
+ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \
+ } while (0)
+
+#define hci_dev_le_state_simultaneous(hdev) \
+ (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) && \
+ (hdev->le_states[4] & 0x08) && /* Central */ \
+ (hdev->le_states[4] & 0x40) && /* Peripheral */ \
+ (hdev->le_states[3] & 0x10)) /* Simultaneous */
+
+/* ----- HCI interface to upper protocols ----- */
+int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int l2cap_disconn_ind(struct hci_conn *hcon);
+void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+
+#if IS_ENABLED(CONFIG_BT_BREDR)
+int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
+void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+#else
+static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ __u8 *flags)
+{
+ return 0;
+}
+
+static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_BT_LE)
+int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
+void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+#else
+static inline int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ __u8 *flags)
+{
+ return 0;
+}
+static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb,
+ u16 flags)
+{
+}
+#endif
+
+/* ----- Inquiry cache ----- */
+#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
+#define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
+
+static inline void discovery_init(struct hci_dev *hdev)
+{
+ hdev->discovery.state = DISCOVERY_STOPPED;
+ INIT_LIST_HEAD(&hdev->discovery.all);
+ INIT_LIST_HEAD(&hdev->discovery.unknown);
+ INIT_LIST_HEAD(&hdev->discovery.resolve);
+ hdev->discovery.report_invalid_rssi = true;
+ hdev->discovery.rssi = HCI_RSSI_INVALID;
+}
+
+static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
+{
+ hdev->discovery.result_filtering = false;
+ hdev->discovery.report_invalid_rssi = true;
+ hdev->discovery.rssi = HCI_RSSI_INVALID;
+ hdev->discovery.uuid_count = 0;
+ kfree(hdev->discovery.uuids);
+ hdev->discovery.uuids = NULL;
+ hdev->discovery.scan_start = 0;
+ hdev->discovery.scan_duration = 0;
+}
+
+bool hci_discovery_active(struct hci_dev *hdev);
+
+void hci_discovery_set_state(struct hci_dev *hdev, int state);
+
+static inline int inquiry_cache_empty(struct hci_dev *hdev)
+{
+ return list_empty(&hdev->discovery.all);
+}
+
+static inline long inquiry_cache_age(struct hci_dev *hdev)
+{
+ struct discovery_state *c = &hdev->discovery;
+ return jiffies - c->timestamp;
+}
+
+static inline long inquiry_entry_age(struct inquiry_entry *e)
+{
+ return jiffies - e->timestamp;
+}
+
+struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
+ bdaddr_t *bdaddr);
+struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
+ bdaddr_t *bdaddr);
+struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
+ bdaddr_t *bdaddr,
+ int state);
+void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
+ struct inquiry_entry *ie);
+u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
+ bool name_known);
+void hci_inquiry_cache_flush(struct hci_dev *hdev);
+
+/* ----- HCI Connections ----- */
+enum {
+ HCI_CONN_AUTH_PEND,
+ HCI_CONN_ENCRYPT_PEND,
+ HCI_CONN_RSWITCH_PEND,
+ HCI_CONN_MODE_CHANGE_PEND,
+ HCI_CONN_SCO_SETUP_PEND,
+ HCI_CONN_MGMT_CONNECTED,
+ HCI_CONN_SSP_ENABLED,
+ HCI_CONN_SC_ENABLED,
+ HCI_CONN_AES_CCM,
+ HCI_CONN_POWER_SAVE,
+ HCI_CONN_FLUSH_KEY,
+ HCI_CONN_ENCRYPT,
+ HCI_CONN_AUTH,
+ HCI_CONN_SECURE,
+ HCI_CONN_FIPS,
+ HCI_CONN_STK_ENCRYPT,
+ HCI_CONN_AUTH_INITIATOR,
+ HCI_CONN_DROP,
+ HCI_CONN_CANCEL,
+ HCI_CONN_PARAM_REMOVAL_PEND,
+ HCI_CONN_NEW_LINK_KEY,
+ HCI_CONN_SCANNING,
+ HCI_CONN_AUTH_FAILURE,
+ HCI_CONN_PER_ADV,
+};
+
+static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
+ test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
+}
+
+static inline bool hci_conn_sc_enabled(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ return hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
+ test_bit(HCI_CONN_SC_ENABLED, &conn->flags);
+}
+
+static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ list_add_rcu(&c->list, &h->list);
+ switch (c->type) {
+ case ACL_LINK:
+ h->acl_num++;
+ break;
+ case AMP_LINK:
+ h->amp_num++;
+ break;
+ case LE_LINK:
+ h->le_num++;
+ if (c->role == HCI_ROLE_SLAVE)
+ h->le_num_peripheral++;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ h->sco_num++;
+ break;
+ case ISO_LINK:
+ h->iso_num++;
+ break;
+ }
+}
+
+static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+
+ list_del_rcu(&c->list);
+ synchronize_rcu();
+
+ switch (c->type) {
+ case ACL_LINK:
+ h->acl_num--;
+ break;
+ case AMP_LINK:
+ h->amp_num--;
+ break;
+ case LE_LINK:
+ h->le_num--;
+ if (c->role == HCI_ROLE_SLAVE)
+ h->le_num_peripheral--;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ h->sco_num--;
+ break;
+ case ISO_LINK:
+ h->iso_num--;
+ break;
+ }
+}
+
+static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ switch (type) {
+ case ACL_LINK:
+ return h->acl_num;
+ case AMP_LINK:
+ return h->amp_num;
+ case LE_LINK:
+ return h->le_num;
+ case SCO_LINK:
+ case ESCO_LINK:
+ return h->sco_num;
+ case ISO_LINK:
+ return h->iso_num;
+ default:
+ return 0;
+ }
+}
+
+static inline unsigned int hci_conn_count(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *c = &hdev->conn_hash;
+
+ return c->acl_num + c->amp_num + c->sco_num + c->le_num + c->iso_num;
+}
+
+static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+ __u8 type = INVALID_LINK;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->handle == handle) {
+ type = c->type;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return type;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
+ bdaddr_t *ba,
+ __u8 big, __u8 bis)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (bacmp(&c->dst, ba) || c->type != ISO_LINK)
+ continue;
+
+ if (c->iso_qos.big == big && c->iso_qos.bis == bis) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
+ __u16 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->handle == handle) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
+ __u8 type, bdaddr_t *ba)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && !bacmp(&c->dst, ba)) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev,
+ bdaddr_t *ba,
+ __u8 ba_type)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != LE_LINK)
+ continue;
+
+ if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev,
+ bdaddr_t *ba,
+ __u8 ba_type)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != ISO_LINK)
+ continue;
+
+ if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev,
+ __u8 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != ISO_LINK)
+ continue;
+
+ if (handle == c->iso_qos.cig) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
+ __u8 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK)
+ continue;
+
+ if (handle == c->iso_qos.big) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
+ __u8 type, __u16 state)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && c->state == state) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+typedef void (*hci_conn_func_t)(struct hci_conn *conn, void *data);
+static inline void hci_conn_hash_list_state(struct hci_dev *hdev,
+ hci_conn_func_t func, __u8 type,
+ __u16 state, void *data)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ if (!func)
+ return;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && c->state == state)
+ func(c, data);
+ }
+
+ rcu_read_unlock();
+}
+
+static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == LE_LINK && c->state == BT_CONNECT &&
+ !test_bit(HCI_CONN_SCANNING, &c->flags)) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+int hci_disconnect(struct hci_conn *conn, __u8 reason);
+bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
+void hci_sco_setup(struct hci_conn *conn, __u8 status);
+bool hci_iso_setup_path(struct hci_conn *conn);
+int hci_le_create_cis(struct hci_conn *conn);
+
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ u8 role);
+int hci_conn_del(struct hci_conn *conn);
+void hci_conn_hash_flush(struct hci_dev *hdev);
+void hci_conn_check_pending(struct hci_dev *hdev);
+
+struct hci_chan *hci_chan_create(struct hci_conn *conn);
+void hci_chan_del(struct hci_chan *chan);
+void hci_chan_list_flush(struct hci_conn *conn);
+struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
+
+struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 dst_type, u8 sec_level,
+ u16 conn_timeout,
+ enum conn_reasons conn_reason);
+struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 dst_type, bool dst_resolved, u8 sec_level,
+ u16 conn_timeout, u8 role);
+struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 sec_level, u8 auth_type,
+ enum conn_reasons conn_reason);
+struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ __u16 setting, struct bt_codec *codec);
+struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, struct bt_iso_qos *qos);
+struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, struct bt_iso_qos *qos);
+struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, struct bt_iso_qos *qos,
+ __u8 data_len, __u8 *data);
+int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
+ __u8 sid);
+int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
+ __u16 sync_handle, __u8 num_bis, __u8 bis[]);
+int hci_conn_check_link_mode(struct hci_conn *conn);
+int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
+int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
+ bool initiator);
+int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
+
+void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
+
+void hci_conn_failed(struct hci_conn *conn, u8 status);
+
+/*
+ * hci_conn_get() and hci_conn_put() are used to control the life-time of an
+ * "hci_conn" object. They do not guarantee that the hci_conn object is running,
+ * working or anything else. They just guarantee that the object is available
+ * and can be dereferenced. So you can use its locks, local variables and any
+ * other constant data.
+ * Before accessing runtime data, you _must_ lock the object and then check that
+ * it is still running. As soon as you release the locks, the connection might
+ * get dropped, though.
+ *
+ * On the other hand, hci_conn_hold() and hci_conn_drop() are used to control
+ * how long the underlying connection is held. So every channel that runs on the
+ * hci_conn object calls this to prevent the connection from disappearing. As
+ * long as you hold a device, you must also guarantee that you have a valid
+ * reference to the device via hci_conn_get() (or the initial reference from
+ * hci_conn_add()).
+ * The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't
+ * break because nobody cares for that. But this means, we cannot use
+ * _get()/_drop() in it, but require the caller to have a valid ref (FIXME).
+ */
+
+static inline struct hci_conn *hci_conn_get(struct hci_conn *conn)
+{
+ get_device(&conn->dev);
+ return conn;
+}
+
+static inline void hci_conn_put(struct hci_conn *conn)
+{
+ put_device(&conn->dev);
+}
+
+static inline void hci_conn_hold(struct hci_conn *conn)
+{
+ BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
+
+ atomic_inc(&conn->refcnt);
+ cancel_delayed_work(&conn->disc_work);
+}
+
+static inline void hci_conn_drop(struct hci_conn *conn)
+{
+ BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
+
+ if (atomic_dec_and_test(&conn->refcnt)) {
+ unsigned long timeo;
+
+ switch (conn->type) {
+ case ACL_LINK:
+ case LE_LINK:
+ cancel_delayed_work(&conn->idle_work);
+ if (conn->state == BT_CONNECTED) {
+ timeo = conn->disc_timeout;
+ if (!conn->out)
+ timeo *= 2;
+ } else {
+ timeo = 0;
+ }
+ break;
+
+ case AMP_LINK:
+ timeo = conn->disc_timeout;
+ break;
+
+ default:
+ timeo = 0;
+ break;
+ }
+
+ cancel_delayed_work(&conn->disc_work);
+ queue_delayed_work(conn->hdev->workqueue,
+ &conn->disc_work, timeo);
+ }
+}
+
+/* ----- HCI Devices ----- */
+static inline void hci_dev_put(struct hci_dev *d)
+{
+ BT_DBG("%s orig refcnt %d", d->name,
+ kref_read(&d->dev.kobj.kref));
+
+ put_device(&d->dev);
+}
+
+static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
+{
+ BT_DBG("%s orig refcnt %d", d->name,
+ kref_read(&d->dev.kobj.kref));
+
+ get_device(&d->dev);
+ return d;
+}
+
+#define hci_dev_lock(d) mutex_lock(&d->lock)
+#define hci_dev_unlock(d) mutex_unlock(&d->lock)
+
+#define to_hci_dev(d) container_of(d, struct hci_dev, dev)
+#define to_hci_conn(c) container_of(c, struct hci_conn, dev)
+
+static inline void *hci_get_drvdata(struct hci_dev *hdev)
+{
+ return dev_get_drvdata(&hdev->dev);
+}
+
+static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
+{
+ dev_set_drvdata(&hdev->dev, data);
+}
+
+static inline void *hci_get_priv(struct hci_dev *hdev)
+{
+ return (char *)hdev + sizeof(*hdev);
+}
+
+struct hci_dev *hci_dev_get(int index);
+struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type);
+
+struct hci_dev *hci_alloc_dev_priv(int sizeof_priv);
+
+static inline struct hci_dev *hci_alloc_dev(void)
+{
+ return hci_alloc_dev_priv(0);
+}
+
+void hci_free_dev(struct hci_dev *hdev);
+int hci_register_dev(struct hci_dev *hdev);
+void hci_unregister_dev(struct hci_dev *hdev);
+void hci_release_dev(struct hci_dev *hdev);
+int hci_register_suspend_notifier(struct hci_dev *hdev);
+int hci_unregister_suspend_notifier(struct hci_dev *hdev);
+int hci_suspend_dev(struct hci_dev *hdev);
+int hci_resume_dev(struct hci_dev *hdev);
+int hci_reset_dev(struct hci_dev *hdev);
+int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
+int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
+__printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
+__printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
+
+static inline void hci_set_msft_opcode(struct hci_dev *hdev, __u16 opcode)
+{
+#if IS_ENABLED(CONFIG_BT_MSFTEXT)
+ hdev->msft_opcode = opcode;
+#endif
+}
+
+static inline void hci_set_aosp_capable(struct hci_dev *hdev)
+{
+#if IS_ENABLED(CONFIG_BT_AOSPEXT)
+ hdev->aosp_capable = true;
+#endif
+}
+
+int hci_dev_open(__u16 dev);
+int hci_dev_close(__u16 dev);
+int hci_dev_do_close(struct hci_dev *hdev);
+int hci_dev_reset(__u16 dev);
+int hci_dev_reset_stat(__u16 dev);
+int hci_dev_cmd(unsigned int cmd, void __user *arg);
+int hci_get_dev_list(void __user *arg);
+int hci_get_dev_info(void __user *arg);
+int hci_get_conn_list(void __user *arg);
+int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
+int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
+int hci_inquiry(void __user *arg);
+
+struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list,
+ bdaddr_t *bdaddr, u8 type);
+struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
+ struct list_head *list, bdaddr_t *bdaddr,
+ u8 type);
+struct bdaddr_list_with_flags *
+hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type);
+int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type);
+int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type, u8 *peer_irk, u8 *local_irk);
+int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type, u32 flags);
+int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type);
+int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type);
+int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type);
+void hci_bdaddr_list_clear(struct list_head *list);
+
+struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
+ bdaddr_t *addr, u8 addr_type);
+struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
+ bdaddr_t *addr, u8 addr_type);
+void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
+void hci_conn_params_clear_disabled(struct hci_dev *hdev);
+void hci_conn_params_free(struct hci_conn_params *param);
+
+void hci_pend_le_list_del_init(struct hci_conn_params *param);
+void hci_pend_le_list_add(struct hci_conn_params *param,
+ struct list_head *list);
+struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
+ bdaddr_t *addr,
+ u8 addr_type);
+
+void hci_uuids_clear(struct hci_dev *hdev);
+
+void hci_link_keys_clear(struct hci_dev *hdev);
+struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
+struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
+ bdaddr_t *bdaddr, u8 *val, u8 type,
+ u8 pin_len, bool *persistent);
+struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, u8 type, u8 authenticated,
+ u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand);
+struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, u8 role);
+int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type);
+void hci_smp_ltks_clear(struct hci_dev *hdev);
+int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
+
+struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa);
+struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type);
+struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, u8 val[16], bdaddr_t *rpa);
+void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type);
+bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]);
+void hci_blocked_keys_clear(struct hci_dev *hdev);
+void hci_smp_irks_clear(struct hci_dev *hdev);
+
+bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+
+void hci_remote_oob_data_clear(struct hci_dev *hdev);
+struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 bdaddr_type);
+int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 bdaddr_type, u8 *hash192, u8 *rand192,
+ u8 *hash256, u8 *rand256);
+int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 bdaddr_type);
+
+void hci_adv_instances_clear(struct hci_dev *hdev);
+struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance);
+struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance);
+struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
+ u32 flags, u16 adv_data_len, u8 *adv_data,
+ u16 scan_rsp_len, u8 *scan_rsp_data,
+ u16 timeout, u16 duration, s8 tx_power,
+ u32 min_interval, u32 max_interval,
+ u8 mesh_handle);
+struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
+ u32 flags, u8 data_len, u8 *data,
+ u32 min_interval, u32 max_interval);
+int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
+ u16 adv_data_len, u8 *adv_data,
+ u16 scan_rsp_len, u8 *scan_rsp_data);
+int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
+void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
+u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance);
+bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance);
+
+void hci_adv_monitors_clear(struct hci_dev *hdev);
+void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
+int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
+int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle);
+int hci_remove_all_adv_monitor(struct hci_dev *hdev);
+bool hci_is_adv_monitoring(struct hci_dev *hdev);
+int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev);
+
+void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
+
+void hci_init_sysfs(struct hci_dev *hdev);
+void hci_conn_init_sysfs(struct hci_conn *conn);
+void hci_conn_add_sysfs(struct hci_conn *conn);
+void hci_conn_del_sysfs(struct hci_conn *conn);
+
+#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
+
+/* ----- LMP capabilities ----- */
+#define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT)
+#define lmp_rswitch_capable(dev) ((dev)->features[0][0] & LMP_RSWITCH)
+#define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD)
+#define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF)
+#define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK)
+#define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ)
+#define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO)
+#define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR))
+#define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE)
+#define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR)
+#define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC)
+#define lmp_esco_2m_capable(dev) ((dev)->features[0][5] & LMP_EDR_ESCO_2M)
+#define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ)
+#define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR))
+#define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR)
+#define lmp_no_flush_capable(dev) ((dev)->features[0][6] & LMP_NO_FLUSH)
+#define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO)
+#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
+#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
+#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT)
+#define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M)
+#define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M)
+#define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT)
+#define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT)
+
+/* ----- Extended LMP capabilities ----- */
+#define lmp_cpb_central_capable(dev) ((dev)->features[2][0] & LMP_CPB_CENTRAL)
+#define lmp_cpb_peripheral_capable(dev) ((dev)->features[2][0] & LMP_CPB_PERIPHERAL)
+#define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN)
+#define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN)
+#define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC)
+#define lmp_ping_capable(dev) ((dev)->features[2][1] & LMP_PING)
+
+/* ----- Host capabilities ----- */
+#define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP)
+#define lmp_host_sc_capable(dev) ((dev)->features[1][0] & LMP_HOST_SC)
+#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
+#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
+
+#define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \
+ !hci_dev_test_flag(dev, HCI_AUTO_OFF))
+#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
+ hci_dev_test_flag(dev, HCI_SC_ENABLED))
+#define rpa_valid(dev) (bacmp(&dev->rpa, BDADDR_ANY) && \
+ !hci_dev_test_flag(dev, HCI_RPA_EXPIRED))
+#define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \
+ !adv->rpa_expired)
+
+#define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
+ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
+
+#define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \
+ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M))
+
+#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
+ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
+
+#define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
+
+/* Use LL Privacy based address resolution if supported */
+#define use_ll_privacy(dev) (ll_privacy_capable(dev) && \
+ hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY))
+
+#define privacy_mode_capable(dev) (use_ll_privacy(dev) && \
+ (hdev->commands[39] & 0x04))
+
+/* Use enhanced synchronous connection if command is supported and its quirk
+ * has not been set.
+ */
+#define enhanced_sync_conn_capable(dev) \
+ (((dev)->commands[29] & 0x08) && \
+ !test_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &(dev)->quirks))
+
+/* Use ext scanning if set ext scan param and ext scan enable is supported */
+#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
+ ((dev)->commands[37] & 0x40) && \
+ !test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks))
+
+/* Use ext create connection if command is supported */
+#define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
+
+/* Extended advertising support */
+#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
+
+/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789:
+ *
+ * C24: Mandatory if the LE Controller supports Connection State and either
+ * LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported
+ */
+#define use_enhanced_conn_complete(dev) (ll_privacy_capable(dev) || \
+ ext_adv_capable(dev))
+
+/* Periodic advertising support */
+#define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV))
+
+/* CIS Master/Slave and BIS support */
+#define iso_capable(dev) (cis_capable(dev) || bis_capable(dev))
+#define cis_capable(dev) \
+ (cis_central_capable(dev) || cis_peripheral_capable(dev))
+#define cis_central_capable(dev) \
+ ((dev)->le_features[3] & HCI_LE_CIS_CENTRAL)
+#define cis_peripheral_capable(dev) \
+ ((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL)
+#define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER)
+
+#define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \
+ (!test_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &(dev)->quirks)))
+
+/* ----- HCI protocols ----- */
+#define HCI_PROTO_DEFER 0x01
+
+static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ __u8 type, __u8 *flags)
+{
+ switch (type) {
+ case ACL_LINK:
+ return l2cap_connect_ind(hdev, bdaddr);
+
+ case SCO_LINK:
+ case ESCO_LINK:
+ return sco_connect_ind(hdev, bdaddr, flags);
+
+ case ISO_LINK:
+ return iso_connect_ind(hdev, bdaddr, flags);
+
+ default:
+ BT_ERR("unknown link type %d", type);
+ return -EINVAL;
+ }
+}
+
+static inline int hci_proto_disconn_ind(struct hci_conn *conn)
+{
+ if (conn->type != ACL_LINK && conn->type != LE_LINK)
+ return HCI_ERROR_REMOTE_USER_TERM;
+
+ return l2cap_disconn_ind(conn);
+}
+
+/* ----- HCI callbacks ----- */
+struct hci_cb {
+ struct list_head list;
+
+ char *name;
+
+ void (*connect_cfm) (struct hci_conn *conn, __u8 status);
+ void (*disconn_cfm) (struct hci_conn *conn, __u8 status);
+ void (*security_cfm) (struct hci_conn *conn, __u8 status,
+ __u8 encrypt);
+ void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
+ void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
+};
+
+static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
+{
+ struct hci_cb *cb;
+
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->connect_cfm)
+ cb->connect_cfm(conn, status);
+ }
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->connect_cfm_cb)
+ conn->connect_cfm_cb(conn, status);
+}
+
+static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
+{
+ struct hci_cb *cb;
+
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->disconn_cfm)
+ cb->disconn_cfm(conn, reason);
+ }
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->disconn_cfm_cb)
+ conn->disconn_cfm_cb(conn, reason);
+}
+
+static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
+{
+ struct hci_cb *cb;
+ __u8 encrypt;
+
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
+ return;
+
+ encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
+
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->security_cfm)
+ cb->security_cfm(conn, status, encrypt);
+ }
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
+}
+
+static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
+{
+ struct hci_cb *cb;
+ __u8 encrypt;
+
+ if (conn->state == BT_CONFIG) {
+ if (!status)
+ conn->state = BT_CONNECTED;
+
+ hci_connect_cfm(conn, status);
+ hci_conn_drop(conn);
+ return;
+ }
+
+ if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
+ encrypt = 0x00;
+ else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
+ encrypt = 0x02;
+ else
+ encrypt = 0x01;
+
+ if (!status) {
+ if (conn->sec_level == BT_SECURITY_SDP)
+ conn->sec_level = BT_SECURITY_LOW;
+
+ if (conn->pending_sec_level > conn->sec_level)
+ conn->sec_level = conn->pending_sec_level;
+ }
+
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->security_cfm)
+ cb->security_cfm(conn, status, encrypt);
+ }
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
+}
+
+static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
+{
+ struct hci_cb *cb;
+
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->key_change_cfm)
+ cb->key_change_cfm(conn, status);
+ }
+ mutex_unlock(&hci_cb_list_lock);
+}
+
+static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
+ __u8 role)
+{
+ struct hci_cb *cb;
+
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->role_switch_cfm)
+ cb->role_switch_cfm(conn, status, role);
+ }
+ mutex_unlock(&hci_cb_list_lock);
+}
+
+static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
+{
+ if (addr_type != ADDR_LE_DEV_RANDOM)
+ return false;
+
+ if ((bdaddr->b[5] & 0xc0) == 0x40)
+ return true;
+
+ return false;
+}
+
+static inline bool hci_is_identity_address(bdaddr_t *addr, u8 addr_type)
+{
+ if (addr_type == ADDR_LE_DEV_PUBLIC)
+ return true;
+
+ /* Check for Random Static address type */
+ if ((addr->b[5] & 0xc0) == 0xc0)
+ return true;
+
+ return false;
+}
+
+static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 addr_type)
+{
+ if (!hci_bdaddr_is_rpa(bdaddr, addr_type))
+ return NULL;
+
+ return hci_find_irk_by_rpa(hdev, bdaddr);
+}
+
+static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
+ u16 to_multiplier)
+{
+ u16 max_latency;
+
+ if (min > max || min < 6 || max > 3200)
+ return -EINVAL;
+
+ if (to_multiplier < 10 || to_multiplier > 3200)
+ return -EINVAL;
+
+ if (max >= to_multiplier * 8)
+ return -EINVAL;
+
+ max_latency = (to_multiplier * 4 / max) - 1;
+ if (latency > 499 || latency > max_latency)
+ return -EINVAL;
+
+ return 0;
+}
+
+int hci_register_cb(struct hci_cb *hcb);
+int hci_unregister_cb(struct hci_cb *hcb);
+
+int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param);
+
+int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
+ const void *param);
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
+void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
+void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb);
+
+void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
+void *hci_recv_event_data(struct hci_dev *hdev, __u8 event);
+
+u32 hci_conn_get_phy(struct hci_conn *conn);
+
+/* ----- HCI Sockets ----- */
+void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
+void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
+ int flag, struct sock *skip_sk);
+void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
+void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
+ void *data, u16 data_len, ktime_t tstamp,
+ int flag, struct sock *skip_sk);
+
+void hci_sock_dev_event(struct hci_dev *hdev, int event);
+
+#define HCI_MGMT_VAR_LEN BIT(0)
+#define HCI_MGMT_NO_HDEV BIT(1)
+#define HCI_MGMT_UNTRUSTED BIT(2)
+#define HCI_MGMT_UNCONFIGURED BIT(3)
+#define HCI_MGMT_HDEV_OPTIONAL BIT(4)
+
+struct hci_mgmt_handler {
+ int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len);
+ size_t data_len;
+ unsigned long flags;
+};
+
+struct hci_mgmt_chan {
+ struct list_head list;
+ unsigned short channel;
+ size_t handler_count;
+ const struct hci_mgmt_handler *handlers;
+ void (*hdev_init) (struct sock *sk, struct hci_dev *hdev);
+};
+
+int hci_mgmt_chan_register(struct hci_mgmt_chan *c);
+void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
+
+/* Management interface */
+#define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR))
+#define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \
+ BIT(BDADDR_LE_RANDOM))
+#define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \
+ BIT(BDADDR_LE_PUBLIC) | \
+ BIT(BDADDR_LE_RANDOM))
+
+/* These LE scan and inquiry parameters were chosen according to LE General
+ * Discovery Procedure specification.
+ */
+#define DISCOV_LE_SCAN_WIN 0x12
+#define DISCOV_LE_SCAN_INT 0x12
+#define DISCOV_LE_TIMEOUT 10240 /* msec */
+#define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */
+#define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
+#define DISCOV_BREDR_INQUIRY_LEN 0x08
+#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
+#define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */
+#define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */
+#define DISCOV_LE_PER_ADV_INT_MIN 0x00A0 /* 200 msec */
+#define DISCOV_LE_PER_ADV_INT_MAX 0x00A0 /* 200 msec */
+#define DISCOV_LE_ADV_MESH_MIN 0x00A0 /* 100 msec */
+#define DISCOV_LE_ADV_MESH_MAX 0x00A0 /* 100 msec */
+#define INTERVAL_TO_MS(x) (((x) * 10) / 0x10)
+
+#define NAME_RESOLVE_DURATION msecs_to_jiffies(10240) /* 10.24 sec */
+
+void mgmt_fill_version_info(void *ver);
+int mgmt_new_settings(struct hci_dev *hdev);
+void mgmt_index_added(struct hci_dev *hdev);
+void mgmt_index_removed(struct hci_dev *hdev);
+void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
+void mgmt_power_on(struct hci_dev *hdev, int err);
+void __mgmt_power_off(struct hci_dev *hdev);
+void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ bool persistent);
+void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 *name, u8 name_len);
+void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 reason,
+ bool mgmt_connected);
+void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status);
+void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
+void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status);
+void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status);
+int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u32 value,
+ u8 confirm_hint);
+int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type);
+int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u32 passkey,
+ u8 entered);
+void mgmt_auth_failed(struct hci_conn *conn, u8 status);
+void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
+void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
+ u8 status);
+void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
+void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status);
+void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status);
+void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
+ u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
+ u64 instant);
+void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, s8 rssi, u8 *name, u8 name_len);
+void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+void mgmt_suspending(struct hci_dev *hdev, u8 state);
+void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
+ u8 addr_type);
+bool mgmt_powering_down(struct hci_dev *hdev);
+void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
+void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent);
+void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
+ bool persistent);
+void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 bdaddr_type, u8 store_hint, u16 min_interval,
+ u16 max_interval, u16 latency, u16 timeout);
+void mgmt_smp_complete(struct hci_conn *conn, bool complete);
+bool mgmt_get_connectable(struct hci_dev *hdev);
+u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev);
+void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
+ u8 instance);
+void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
+ u8 instance);
+void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle);
+int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
+void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
+ bdaddr_t *bdaddr, u8 addr_type);
+
+int hci_abort_conn(struct hci_conn *conn, u8 reason);
+u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
+ u16 to_multiplier);
+void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
+ __u8 ltk[16], __u8 key_size);
+
+void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 *bdaddr_type);
+
+#define SCO_AIRMODE_MASK 0x0003
+#define SCO_AIRMODE_CVSD 0x0000
+#define SCO_AIRMODE_TRANSP 0x0003
+
+#define LOCAL_CODEC_ACL_MASK BIT(0)
+#define LOCAL_CODEC_SCO_MASK BIT(1)
+
+#define TRANSPORT_TYPE_MAX 0x04
+
+#endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
new file mode 100644
index 000000000..082f89531
--- /dev/null
+++ b/include/net/bluetooth/hci_mon.h
@@ -0,0 +1,69 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+
+ Copyright (C) 2011-2012 Intel Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __HCI_MON_H
+#define __HCI_MON_H
+
+struct hci_mon_hdr {
+ __le16 opcode;
+ __le16 index;
+ __le16 len;
+} __packed;
+#define HCI_MON_HDR_SIZE 6
+
+#define HCI_MON_NEW_INDEX 0
+#define HCI_MON_DEL_INDEX 1
+#define HCI_MON_COMMAND_PKT 2
+#define HCI_MON_EVENT_PKT 3
+#define HCI_MON_ACL_TX_PKT 4
+#define HCI_MON_ACL_RX_PKT 5
+#define HCI_MON_SCO_TX_PKT 6
+#define HCI_MON_SCO_RX_PKT 7
+#define HCI_MON_OPEN_INDEX 8
+#define HCI_MON_CLOSE_INDEX 9
+#define HCI_MON_INDEX_INFO 10
+#define HCI_MON_VENDOR_DIAG 11
+#define HCI_MON_SYSTEM_NOTE 12
+#define HCI_MON_USER_LOGGING 13
+#define HCI_MON_CTRL_OPEN 14
+#define HCI_MON_CTRL_CLOSE 15
+#define HCI_MON_CTRL_COMMAND 16
+#define HCI_MON_CTRL_EVENT 17
+#define HCI_MON_ISO_TX_PKT 18
+#define HCI_MON_ISO_RX_PKT 19
+
+struct hci_mon_new_index {
+ __u8 type;
+ __u8 bus;
+ bdaddr_t bdaddr;
+ char name[8] __nonstring;
+} __packed;
+#define HCI_MON_NEW_INDEX_SIZE 16
+
+struct hci_mon_index_info {
+ bdaddr_t bdaddr;
+ __le16 manufacturer;
+} __packed;
+#define HCI_MON_INDEX_INFO_SIZE 8
+
+#endif /* __HCI_MON_H */
diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h
new file mode 100644
index 000000000..9949870f7
--- /dev/null
+++ b/include/net/bluetooth/hci_sock.h
@@ -0,0 +1,176 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __HCI_SOCK_H
+#define __HCI_SOCK_H
+
+/* Socket options */
+#define HCI_DATA_DIR 1
+#define HCI_FILTER 2
+#define HCI_TIME_STAMP 3
+
+/* CMSG flags */
+#define HCI_CMSG_DIR 0x01
+#define HCI_CMSG_TSTAMP 0x02
+
+struct sockaddr_hci {
+ sa_family_t hci_family;
+ unsigned short hci_dev;
+ unsigned short hci_channel;
+};
+#define HCI_DEV_NONE 0xffff
+
+#define HCI_CHANNEL_RAW 0
+#define HCI_CHANNEL_USER 1
+#define HCI_CHANNEL_MONITOR 2
+#define HCI_CHANNEL_CONTROL 3
+#define HCI_CHANNEL_LOGGING 4
+
+struct hci_filter {
+ unsigned long type_mask;
+ unsigned long event_mask[2];
+ __le16 opcode;
+};
+
+struct hci_ufilter {
+ __u32 type_mask;
+ __u32 event_mask[2];
+ __le16 opcode;
+};
+
+#define HCI_FLT_TYPE_BITS 31
+#define HCI_FLT_EVENT_BITS 63
+#define HCI_FLT_OGF_BITS 63
+#define HCI_FLT_OCF_BITS 127
+
+/* Ioctl defines */
+#define HCIDEVUP _IOW('H', 201, int)
+#define HCIDEVDOWN _IOW('H', 202, int)
+#define HCIDEVRESET _IOW('H', 203, int)
+#define HCIDEVRESTAT _IOW('H', 204, int)
+
+#define HCIGETDEVLIST _IOR('H', 210, int)
+#define HCIGETDEVINFO _IOR('H', 211, int)
+#define HCIGETCONNLIST _IOR('H', 212, int)
+#define HCIGETCONNINFO _IOR('H', 213, int)
+#define HCIGETAUTHINFO _IOR('H', 215, int)
+
+#define HCISETRAW _IOW('H', 220, int)
+#define HCISETSCAN _IOW('H', 221, int)
+#define HCISETAUTH _IOW('H', 222, int)
+#define HCISETENCRYPT _IOW('H', 223, int)
+#define HCISETPTYPE _IOW('H', 224, int)
+#define HCISETLINKPOL _IOW('H', 225, int)
+#define HCISETLINKMODE _IOW('H', 226, int)
+#define HCISETACLMTU _IOW('H', 227, int)
+#define HCISETSCOMTU _IOW('H', 228, int)
+
+#define HCIBLOCKADDR _IOW('H', 230, int)
+#define HCIUNBLOCKADDR _IOW('H', 231, int)
+
+#define HCIINQUIRY _IOR('H', 240, int)
+
+/* Ioctl requests structures */
+struct hci_dev_stats {
+ __u32 err_rx;
+ __u32 err_tx;
+ __u32 cmd_tx;
+ __u32 evt_rx;
+ __u32 acl_tx;
+ __u32 acl_rx;
+ __u32 sco_tx;
+ __u32 sco_rx;
+ __u32 byte_rx;
+ __u32 byte_tx;
+};
+
+struct hci_dev_info {
+ __u16 dev_id;
+ char name[8];
+
+ bdaddr_t bdaddr;
+
+ __u32 flags;
+ __u8 type;
+
+ __u8 features[8];
+
+ __u32 pkt_type;
+ __u32 link_policy;
+ __u32 link_mode;
+
+ __u16 acl_mtu;
+ __u16 acl_pkts;
+ __u16 sco_mtu;
+ __u16 sco_pkts;
+
+ struct hci_dev_stats stat;
+};
+
+struct hci_conn_info {
+ __u16 handle;
+ bdaddr_t bdaddr;
+ __u8 type;
+ __u8 out;
+ __u16 state;
+ __u32 link_mode;
+};
+
+struct hci_dev_req {
+ __u16 dev_id;
+ __u32 dev_opt;
+};
+
+struct hci_dev_list_req {
+ __u16 dev_num;
+ struct hci_dev_req dev_req[]; /* hci_dev_req structures */
+};
+
+struct hci_conn_list_req {
+ __u16 dev_id;
+ __u16 conn_num;
+ struct hci_conn_info conn_info[];
+};
+
+struct hci_conn_info_req {
+ bdaddr_t bdaddr;
+ __u8 type;
+ struct hci_conn_info conn_info[];
+};
+
+struct hci_auth_info_req {
+ bdaddr_t bdaddr;
+ __u8 type;
+};
+
+struct hci_inquiry_req {
+ __u16 dev_id;
+ __u16 flags;
+ __u8 lap[3];
+ __u8 length;
+ __u8 num_rsp;
+};
+#define IREQ_CACHE_FLUSH 0x0001
+
+#endif /* __HCI_SOCK_H */
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
new file mode 100644
index 000000000..17f5a4c32
--- /dev/null
+++ b/include/net/bluetooth/hci_sync.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BlueZ - Bluetooth protocol stack for Linux
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+typedef int (*hci_cmd_sync_work_func_t)(struct hci_dev *hdev, void *data);
+typedef void (*hci_cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data,
+ int err);
+
+struct hci_cmd_sync_work_entry {
+ struct list_head list;
+ hci_cmd_sync_work_func_t func;
+ void *data;
+ hci_cmd_sync_work_destroy_t destroy;
+};
+
+struct adv_info;
+/* Function with sync suffix shall not be called with hdev->lock held as they
+ * wait the command to complete and in the meantime an event could be received
+ * which could attempt to acquire hdev->lock causing a deadlock.
+ */
+struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout);
+struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout,
+ struct sock *sk);
+int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout,
+ struct sock *sk);
+
+void hci_cmd_sync_init(struct hci_dev *hdev);
+void hci_cmd_sync_clear(struct hci_dev *hdev);
+void hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
+void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
+
+int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+
+int hci_update_eir_sync(struct hci_dev *hdev);
+int hci_update_class_sync(struct hci_dev *hdev);
+
+int hci_update_eir_sync(struct hci_dev *hdev);
+int hci_update_class_sync(struct hci_dev *hdev);
+int hci_update_name_sync(struct hci_dev *hdev);
+int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode);
+
+int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
+ bool use_rpa, struct adv_info *adv_instance,
+ u8 *own_addr_type, bdaddr_t *rand_addr);
+
+int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
+ bool rpa, u8 *own_addr_type);
+
+int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance);
+int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance);
+int hci_update_adv_data(struct hci_dev *hdev, u8 instance);
+int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
+ bool force);
+
+int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance);
+int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance);
+int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance);
+int hci_enable_advertising_sync(struct hci_dev *hdev);
+int hci_enable_advertising(struct hci_dev *hdev);
+
+int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
+ u8 *data, u32 flags, u16 min_interval,
+ u16 max_interval, u16 sync_interval);
+
+int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
+ u8 instance, bool force);
+int hci_disable_advertising_sync(struct hci_dev *hdev);
+int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk,
+ u8 instance, bool force);
+int hci_update_passive_scan_sync(struct hci_dev *hdev);
+int hci_update_passive_scan(struct hci_dev *hdev);
+int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle);
+int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type);
+int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val);
+int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp);
+
+int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable);
+int hci_update_scan_sync(struct hci_dev *hdev);
+int hci_update_scan(struct hci_dev *hdev);
+
+int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul);
+int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
+ struct sock *sk);
+int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance);
+struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, bool ext,
+ struct sock *sk);
+
+int hci_reset_sync(struct hci_dev *hdev);
+int hci_dev_open_sync(struct hci_dev *hdev);
+int hci_dev_close_sync(struct hci_dev *hdev);
+
+int hci_powered_update_sync(struct hci_dev *hdev);
+int hci_set_powered_sync(struct hci_dev *hdev, u8 val);
+
+int hci_update_discoverable_sync(struct hci_dev *hdev);
+int hci_update_discoverable(struct hci_dev *hdev);
+
+int hci_update_connectable_sync(struct hci_dev *hdev);
+
+int hci_start_discovery_sync(struct hci_dev *hdev);
+int hci_stop_discovery_sync(struct hci_dev *hdev);
+
+int hci_suspend_sync(struct hci_dev *hdev);
+int hci_resume_sync(struct hci_dev *hdev);
+
+struct hci_conn;
+
+int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
+
+int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn);
+
+int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle);
+
+int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason);
+
+int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle);
+
+int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle);
diff --git a/include/net/bluetooth/iso.h b/include/net/bluetooth/iso.h
new file mode 100644
index 000000000..3f4fe8b78
--- /dev/null
+++ b/include/net/bluetooth/iso.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BlueZ - Bluetooth protocol stack for Linux
+ *
+ * Copyright (C) 2022 Intel Corporation
+ */
+
+#ifndef __ISO_H
+#define __ISO_H
+
+/* ISO defaults */
+#define ISO_DEFAULT_MTU 251
+#define ISO_MAX_NUM_BIS 0x1f
+
+/* ISO socket broadcast address */
+struct sockaddr_iso_bc {
+ bdaddr_t bc_bdaddr;
+ __u8 bc_bdaddr_type;
+ __u8 bc_sid;
+ __u8 bc_num_bis;
+ __u8 bc_bis[ISO_MAX_NUM_BIS];
+};
+
+/* ISO socket address */
+struct sockaddr_iso {
+ sa_family_t iso_family;
+ bdaddr_t iso_bdaddr;
+ __u8 iso_bdaddr_type;
+ struct sockaddr_iso_bc iso_bc[];
+};
+
+#endif /* __ISO_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
new file mode 100644
index 000000000..2f766e343
--- /dev/null
+++ b/include/net/bluetooth/l2cap.h
@@ -0,0 +1,1009 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
+ Copyright (C) 2010 Google Inc.
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __L2CAP_H
+#define __L2CAP_H
+
+#include <asm/unaligned.h>
+#include <linux/atomic.h>
+
+/* L2CAP defaults */
+#define L2CAP_DEFAULT_MTU 672
+#define L2CAP_DEFAULT_MIN_MTU 48
+#define L2CAP_DEFAULT_FLUSH_TO 0xFFFF
+#define L2CAP_EFS_DEFAULT_FLUSH_TO 0xFFFFFFFF
+#define L2CAP_DEFAULT_TX_WINDOW 63
+#define L2CAP_DEFAULT_EXT_WINDOW 0x3FFF
+#define L2CAP_DEFAULT_MAX_TX 3
+#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */
+#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
+#define L2CAP_DEFAULT_MAX_PDU_SIZE 1492 /* Sized for AMP packet */
+#define L2CAP_DEFAULT_ACK_TO 200
+#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
+#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF
+#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
+#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */
+#define L2CAP_LE_MIN_MTU 23
+#define L2CAP_ECRED_CONN_SCID_MAX 5
+
+#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100)
+#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000)
+#define L2CAP_ENC_TIMEOUT msecs_to_jiffies(5000)
+#define L2CAP_CONN_TIMEOUT msecs_to_jiffies(40000)
+#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000)
+#define L2CAP_MOVE_TIMEOUT msecs_to_jiffies(4000)
+#define L2CAP_MOVE_ERTX_TIMEOUT msecs_to_jiffies(60000)
+#define L2CAP_WAIT_ACK_POLL_PERIOD msecs_to_jiffies(200)
+#define L2CAP_WAIT_ACK_TIMEOUT msecs_to_jiffies(10000)
+
+#define L2CAP_A2MP_DEFAULT_MTU 670
+
+/* L2CAP socket address */
+struct sockaddr_l2 {
+ sa_family_t l2_family;
+ __le16 l2_psm;
+ bdaddr_t l2_bdaddr;
+ __le16 l2_cid;
+ __u8 l2_bdaddr_type;
+};
+
+/* L2CAP socket options */
+#define L2CAP_OPTIONS 0x01
+struct l2cap_options {
+ __u16 omtu;
+ __u16 imtu;
+ __u16 flush_to;
+ __u8 mode;
+ __u8 fcs;
+ __u8 max_tx;
+ __u16 txwin_size;
+};
+
+#define L2CAP_CONNINFO 0x02
+struct l2cap_conninfo {
+ __u16 hci_handle;
+ __u8 dev_class[3];
+};
+
+#define L2CAP_LM 0x03
+#define L2CAP_LM_MASTER 0x0001
+#define L2CAP_LM_AUTH 0x0002
+#define L2CAP_LM_ENCRYPT 0x0004
+#define L2CAP_LM_TRUSTED 0x0008
+#define L2CAP_LM_RELIABLE 0x0010
+#define L2CAP_LM_SECURE 0x0020
+#define L2CAP_LM_FIPS 0x0040
+
+/* L2CAP command codes */
+#define L2CAP_COMMAND_REJ 0x01
+#define L2CAP_CONN_REQ 0x02
+#define L2CAP_CONN_RSP 0x03
+#define L2CAP_CONF_REQ 0x04
+#define L2CAP_CONF_RSP 0x05
+#define L2CAP_DISCONN_REQ 0x06
+#define L2CAP_DISCONN_RSP 0x07
+#define L2CAP_ECHO_REQ 0x08
+#define L2CAP_ECHO_RSP 0x09
+#define L2CAP_INFO_REQ 0x0a
+#define L2CAP_INFO_RSP 0x0b
+#define L2CAP_CREATE_CHAN_REQ 0x0c
+#define L2CAP_CREATE_CHAN_RSP 0x0d
+#define L2CAP_MOVE_CHAN_REQ 0x0e
+#define L2CAP_MOVE_CHAN_RSP 0x0f
+#define L2CAP_MOVE_CHAN_CFM 0x10
+#define L2CAP_MOVE_CHAN_CFM_RSP 0x11
+#define L2CAP_CONN_PARAM_UPDATE_REQ 0x12
+#define L2CAP_CONN_PARAM_UPDATE_RSP 0x13
+#define L2CAP_LE_CONN_REQ 0x14
+#define L2CAP_LE_CONN_RSP 0x15
+#define L2CAP_LE_CREDITS 0x16
+#define L2CAP_ECRED_CONN_REQ 0x17
+#define L2CAP_ECRED_CONN_RSP 0x18
+#define L2CAP_ECRED_RECONF_REQ 0x19
+#define L2CAP_ECRED_RECONF_RSP 0x1a
+
+/* L2CAP extended feature mask */
+#define L2CAP_FEAT_FLOWCTL 0x00000001
+#define L2CAP_FEAT_RETRANS 0x00000002
+#define L2CAP_FEAT_BIDIR_QOS 0x00000004
+#define L2CAP_FEAT_ERTM 0x00000008
+#define L2CAP_FEAT_STREAMING 0x00000010
+#define L2CAP_FEAT_FCS 0x00000020
+#define L2CAP_FEAT_EXT_FLOW 0x00000040
+#define L2CAP_FEAT_FIXED_CHAN 0x00000080
+#define L2CAP_FEAT_EXT_WINDOW 0x00000100
+#define L2CAP_FEAT_UCD 0x00000200
+
+/* L2CAP checksum option */
+#define L2CAP_FCS_NONE 0x00
+#define L2CAP_FCS_CRC16 0x01
+
+/* L2CAP fixed channels */
+#define L2CAP_FC_SIG_BREDR 0x02
+#define L2CAP_FC_CONNLESS 0x04
+#define L2CAP_FC_A2MP 0x08
+#define L2CAP_FC_ATT 0x10
+#define L2CAP_FC_SIG_LE 0x20
+#define L2CAP_FC_SMP_LE 0x40
+#define L2CAP_FC_SMP_BREDR 0x80
+
+/* L2CAP Control Field bit masks */
+#define L2CAP_CTRL_SAR 0xC000
+#define L2CAP_CTRL_REQSEQ 0x3F00
+#define L2CAP_CTRL_TXSEQ 0x007E
+#define L2CAP_CTRL_SUPERVISE 0x000C
+
+#define L2CAP_CTRL_RETRANS 0x0080
+#define L2CAP_CTRL_FINAL 0x0080
+#define L2CAP_CTRL_POLL 0x0010
+#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */
+
+#define L2CAP_CTRL_TXSEQ_SHIFT 1
+#define L2CAP_CTRL_SUPER_SHIFT 2
+#define L2CAP_CTRL_POLL_SHIFT 4
+#define L2CAP_CTRL_FINAL_SHIFT 7
+#define L2CAP_CTRL_REQSEQ_SHIFT 8
+#define L2CAP_CTRL_SAR_SHIFT 14
+
+/* L2CAP Extended Control Field bit mask */
+#define L2CAP_EXT_CTRL_TXSEQ 0xFFFC0000
+#define L2CAP_EXT_CTRL_SAR 0x00030000
+#define L2CAP_EXT_CTRL_SUPERVISE 0x00030000
+#define L2CAP_EXT_CTRL_REQSEQ 0x0000FFFC
+
+#define L2CAP_EXT_CTRL_POLL 0x00040000
+#define L2CAP_EXT_CTRL_FINAL 0x00000002
+#define L2CAP_EXT_CTRL_FRAME_TYPE 0x00000001 /* I- or S-Frame */
+
+#define L2CAP_EXT_CTRL_FINAL_SHIFT 1
+#define L2CAP_EXT_CTRL_REQSEQ_SHIFT 2
+#define L2CAP_EXT_CTRL_SAR_SHIFT 16
+#define L2CAP_EXT_CTRL_SUPER_SHIFT 16
+#define L2CAP_EXT_CTRL_POLL_SHIFT 18
+#define L2CAP_EXT_CTRL_TXSEQ_SHIFT 18
+
+/* L2CAP Supervisory Function */
+#define L2CAP_SUPER_RR 0x00
+#define L2CAP_SUPER_REJ 0x01
+#define L2CAP_SUPER_RNR 0x02
+#define L2CAP_SUPER_SREJ 0x03
+
+/* L2CAP Segmentation and Reassembly */
+#define L2CAP_SAR_UNSEGMENTED 0x00
+#define L2CAP_SAR_START 0x01
+#define L2CAP_SAR_END 0x02
+#define L2CAP_SAR_CONTINUE 0x03
+
+/* L2CAP Command rej. reasons */
+#define L2CAP_REJ_NOT_UNDERSTOOD 0x0000
+#define L2CAP_REJ_MTU_EXCEEDED 0x0001
+#define L2CAP_REJ_INVALID_CID 0x0002
+
+/* L2CAP structures */
+struct l2cap_hdr {
+ __le16 len;
+ __le16 cid;
+} __packed;
+#define L2CAP_LEN_SIZE 2
+#define L2CAP_HDR_SIZE 4
+#define L2CAP_ENH_HDR_SIZE 6
+#define L2CAP_EXT_HDR_SIZE 8
+
+#define L2CAP_FCS_SIZE 2
+#define L2CAP_SDULEN_SIZE 2
+#define L2CAP_PSMLEN_SIZE 2
+#define L2CAP_ENH_CTRL_SIZE 2
+#define L2CAP_EXT_CTRL_SIZE 4
+
+struct l2cap_cmd_hdr {
+ __u8 code;
+ __u8 ident;
+ __le16 len;
+} __packed;
+#define L2CAP_CMD_HDR_SIZE 4
+
+struct l2cap_cmd_rej_unk {
+ __le16 reason;
+} __packed;
+
+struct l2cap_cmd_rej_mtu {
+ __le16 reason;
+ __le16 max_mtu;
+} __packed;
+
+struct l2cap_cmd_rej_cid {
+ __le16 reason;
+ __le16 scid;
+ __le16 dcid;
+} __packed;
+
+struct l2cap_conn_req {
+ __le16 psm;
+ __le16 scid;
+} __packed;
+
+struct l2cap_conn_rsp {
+ __le16 dcid;
+ __le16 scid;
+ __le16 result;
+ __le16 status;
+} __packed;
+
+/* protocol/service multiplexer (PSM) */
+#define L2CAP_PSM_SDP 0x0001
+#define L2CAP_PSM_RFCOMM 0x0003
+#define L2CAP_PSM_3DSP 0x0021
+#define L2CAP_PSM_IPSP 0x0023 /* 6LoWPAN */
+
+#define L2CAP_PSM_DYN_START 0x1001
+#define L2CAP_PSM_DYN_END 0xffff
+#define L2CAP_PSM_AUTO_END 0x10ff
+#define L2CAP_PSM_LE_DYN_START 0x0080
+#define L2CAP_PSM_LE_DYN_END 0x00ff
+
+/* channel identifier */
+#define L2CAP_CID_SIGNALING 0x0001
+#define L2CAP_CID_CONN_LESS 0x0002
+#define L2CAP_CID_A2MP 0x0003
+#define L2CAP_CID_ATT 0x0004
+#define L2CAP_CID_LE_SIGNALING 0x0005
+#define L2CAP_CID_SMP 0x0006
+#define L2CAP_CID_SMP_BREDR 0x0007
+#define L2CAP_CID_DYN_START 0x0040
+#define L2CAP_CID_DYN_END 0xffff
+#define L2CAP_CID_LE_DYN_END 0x007f
+
+/* connect/create channel results */
+#define L2CAP_CR_SUCCESS 0x0000
+#define L2CAP_CR_PEND 0x0001
+#define L2CAP_CR_BAD_PSM 0x0002
+#define L2CAP_CR_SEC_BLOCK 0x0003
+#define L2CAP_CR_NO_MEM 0x0004
+#define L2CAP_CR_BAD_AMP 0x0005
+#define L2CAP_CR_INVALID_SCID 0x0006
+#define L2CAP_CR_SCID_IN_USE 0x0007
+
+/* credit based connect results */
+#define L2CAP_CR_LE_SUCCESS 0x0000
+#define L2CAP_CR_LE_BAD_PSM 0x0002
+#define L2CAP_CR_LE_NO_MEM 0x0004
+#define L2CAP_CR_LE_AUTHENTICATION 0x0005
+#define L2CAP_CR_LE_AUTHORIZATION 0x0006
+#define L2CAP_CR_LE_BAD_KEY_SIZE 0x0007
+#define L2CAP_CR_LE_ENCRYPTION 0x0008
+#define L2CAP_CR_LE_INVALID_SCID 0x0009
+#define L2CAP_CR_LE_SCID_IN_USE 0X000A
+#define L2CAP_CR_LE_UNACCEPT_PARAMS 0X000B
+#define L2CAP_CR_LE_INVALID_PARAMS 0X000C
+
+/* connect/create channel status */
+#define L2CAP_CS_NO_INFO 0x0000
+#define L2CAP_CS_AUTHEN_PEND 0x0001
+#define L2CAP_CS_AUTHOR_PEND 0x0002
+
+struct l2cap_conf_req {
+ __le16 dcid;
+ __le16 flags;
+ __u8 data[];
+} __packed;
+
+struct l2cap_conf_rsp {
+ __le16 scid;
+ __le16 flags;
+ __le16 result;
+ __u8 data[];
+} __packed;
+
+#define L2CAP_CONF_SUCCESS 0x0000
+#define L2CAP_CONF_UNACCEPT 0x0001
+#define L2CAP_CONF_REJECT 0x0002
+#define L2CAP_CONF_UNKNOWN 0x0003
+#define L2CAP_CONF_PENDING 0x0004
+#define L2CAP_CONF_EFS_REJECT 0x0005
+
+/* configuration req/rsp continuation flag */
+#define L2CAP_CONF_FLAG_CONTINUATION 0x0001
+
+struct l2cap_conf_opt {
+ __u8 type;
+ __u8 len;
+ __u8 val[];
+} __packed;
+#define L2CAP_CONF_OPT_SIZE 2
+
+#define L2CAP_CONF_HINT 0x80
+#define L2CAP_CONF_MASK 0x7f
+
+#define L2CAP_CONF_MTU 0x01
+#define L2CAP_CONF_FLUSH_TO 0x02
+#define L2CAP_CONF_QOS 0x03
+#define L2CAP_CONF_RFC 0x04
+#define L2CAP_CONF_FCS 0x05
+#define L2CAP_CONF_EFS 0x06
+#define L2CAP_CONF_EWS 0x07
+
+#define L2CAP_CONF_MAX_SIZE 22
+
+struct l2cap_conf_rfc {
+ __u8 mode;
+ __u8 txwin_size;
+ __u8 max_transmit;
+ __le16 retrans_timeout;
+ __le16 monitor_timeout;
+ __le16 max_pdu_size;
+} __packed;
+
+#define L2CAP_MODE_BASIC 0x00
+#define L2CAP_MODE_RETRANS 0x01
+#define L2CAP_MODE_FLOWCTL 0x02
+#define L2CAP_MODE_ERTM 0x03
+#define L2CAP_MODE_STREAMING 0x04
+
+/* Unlike the above this one doesn't actually map to anything that would
+ * ever be sent over the air. Therefore, use a value that's unlikely to
+ * ever be used in the BR/EDR configuration phase.
+ */
+#define L2CAP_MODE_LE_FLOWCTL 0x80
+#define L2CAP_MODE_EXT_FLOWCTL 0x81
+
+struct l2cap_conf_efs {
+ __u8 id;
+ __u8 stype;
+ __le16 msdu;
+ __le32 sdu_itime;
+ __le32 acc_lat;
+ __le32 flush_to;
+} __packed;
+
+#define L2CAP_SERV_NOTRAFIC 0x00
+#define L2CAP_SERV_BESTEFFORT 0x01
+#define L2CAP_SERV_GUARANTEED 0x02
+
+#define L2CAP_BESTEFFORT_ID 0x01
+
+struct l2cap_disconn_req {
+ __le16 dcid;
+ __le16 scid;
+} __packed;
+
+struct l2cap_disconn_rsp {
+ __le16 dcid;
+ __le16 scid;
+} __packed;
+
+struct l2cap_info_req {
+ __le16 type;
+} __packed;
+
+struct l2cap_info_rsp {
+ __le16 type;
+ __le16 result;
+ __u8 data[];
+} __packed;
+
+struct l2cap_create_chan_req {
+ __le16 psm;
+ __le16 scid;
+ __u8 amp_id;
+} __packed;
+
+struct l2cap_create_chan_rsp {
+ __le16 dcid;
+ __le16 scid;
+ __le16 result;
+ __le16 status;
+} __packed;
+
+struct l2cap_move_chan_req {
+ __le16 icid;
+ __u8 dest_amp_id;
+} __packed;
+
+struct l2cap_move_chan_rsp {
+ __le16 icid;
+ __le16 result;
+} __packed;
+
+#define L2CAP_MR_SUCCESS 0x0000
+#define L2CAP_MR_PEND 0x0001
+#define L2CAP_MR_BAD_ID 0x0002
+#define L2CAP_MR_SAME_ID 0x0003
+#define L2CAP_MR_NOT_SUPP 0x0004
+#define L2CAP_MR_COLLISION 0x0005
+#define L2CAP_MR_NOT_ALLOWED 0x0006
+
+struct l2cap_move_chan_cfm {
+ __le16 icid;
+ __le16 result;
+} __packed;
+
+#define L2CAP_MC_CONFIRMED 0x0000
+#define L2CAP_MC_UNCONFIRMED 0x0001
+
+struct l2cap_move_chan_cfm_rsp {
+ __le16 icid;
+} __packed;
+
+/* info type */
+#define L2CAP_IT_CL_MTU 0x0001
+#define L2CAP_IT_FEAT_MASK 0x0002
+#define L2CAP_IT_FIXED_CHAN 0x0003
+
+/* info result */
+#define L2CAP_IR_SUCCESS 0x0000
+#define L2CAP_IR_NOTSUPP 0x0001
+
+struct l2cap_conn_param_update_req {
+ __le16 min;
+ __le16 max;
+ __le16 latency;
+ __le16 to_multiplier;
+} __packed;
+
+struct l2cap_conn_param_update_rsp {
+ __le16 result;
+} __packed;
+
+/* Connection Parameters result */
+#define L2CAP_CONN_PARAM_ACCEPTED 0x0000
+#define L2CAP_CONN_PARAM_REJECTED 0x0001
+
+struct l2cap_le_conn_req {
+ __le16 psm;
+ __le16 scid;
+ __le16 mtu;
+ __le16 mps;
+ __le16 credits;
+} __packed;
+
+struct l2cap_le_conn_rsp {
+ __le16 dcid;
+ __le16 mtu;
+ __le16 mps;
+ __le16 credits;
+ __le16 result;
+} __packed;
+
+struct l2cap_le_credits {
+ __le16 cid;
+ __le16 credits;
+} __packed;
+
+#define L2CAP_ECRED_MIN_MTU 64
+#define L2CAP_ECRED_MIN_MPS 64
+#define L2CAP_ECRED_MAX_CID 5
+
+struct l2cap_ecred_conn_req {
+ __le16 psm;
+ __le16 mtu;
+ __le16 mps;
+ __le16 credits;
+ __le16 scid[];
+} __packed;
+
+struct l2cap_ecred_conn_rsp {
+ __le16 mtu;
+ __le16 mps;
+ __le16 credits;
+ __le16 result;
+ __le16 dcid[];
+};
+
+struct l2cap_ecred_reconf_req {
+ __le16 mtu;
+ __le16 mps;
+ __le16 scid[];
+} __packed;
+
+#define L2CAP_RECONF_SUCCESS 0x0000
+#define L2CAP_RECONF_INVALID_MTU 0x0001
+#define L2CAP_RECONF_INVALID_MPS 0x0002
+
+struct l2cap_ecred_reconf_rsp {
+ __le16 result;
+} __packed;
+
+/* ----- L2CAP channels and connections ----- */
+struct l2cap_seq_list {
+ __u16 head;
+ __u16 tail;
+ __u16 mask;
+ __u16 *list;
+};
+
+#define L2CAP_SEQ_LIST_CLEAR 0xFFFF
+#define L2CAP_SEQ_LIST_TAIL 0x8000
+
+struct l2cap_chan {
+ struct l2cap_conn *conn;
+ struct hci_conn *hs_hcon;
+ struct hci_chan *hs_hchan;
+ struct kref kref;
+ atomic_t nesting;
+
+ __u8 state;
+
+ bdaddr_t dst;
+ __u8 dst_type;
+ bdaddr_t src;
+ __u8 src_type;
+ __le16 psm;
+ __le16 sport;
+ __u16 dcid;
+ __u16 scid;
+
+ __u16 imtu;
+ __u16 omtu;
+ __u16 flush_to;
+ __u8 mode;
+ __u8 chan_type;
+ __u8 chan_policy;
+
+ __u8 sec_level;
+
+ __u8 ident;
+
+ __u8 conf_req[64];
+ __u8 conf_len;
+ __u8 num_conf_req;
+ __u8 num_conf_rsp;
+
+ __u8 fcs;
+
+ __u16 tx_win;
+ __u16 tx_win_max;
+ __u16 ack_win;
+ __u8 max_tx;
+ __u16 retrans_timeout;
+ __u16 monitor_timeout;
+ __u16 mps;
+
+ __u16 tx_credits;
+ __u16 rx_credits;
+
+ __u8 tx_state;
+ __u8 rx_state;
+
+ unsigned long conf_state;
+ unsigned long conn_state;
+ unsigned long flags;
+
+ __u8 remote_amp_id;
+ __u8 local_amp_id;
+ __u8 move_id;
+ __u8 move_state;
+ __u8 move_role;
+
+ __u16 next_tx_seq;
+ __u16 expected_ack_seq;
+ __u16 expected_tx_seq;
+ __u16 buffer_seq;
+ __u16 srej_save_reqseq;
+ __u16 last_acked_seq;
+ __u16 frames_sent;
+ __u16 unacked_frames;
+ __u8 retry_count;
+ __u16 sdu_len;
+ struct sk_buff *sdu;
+ struct sk_buff *sdu_last_frag;
+
+ __u16 remote_tx_win;
+ __u8 remote_max_tx;
+ __u16 remote_mps;
+
+ __u8 local_id;
+ __u8 local_stype;
+ __u16 local_msdu;
+ __u32 local_sdu_itime;
+ __u32 local_acc_lat;
+ __u32 local_flush_to;
+
+ __u8 remote_id;
+ __u8 remote_stype;
+ __u16 remote_msdu;
+ __u32 remote_sdu_itime;
+ __u32 remote_acc_lat;
+ __u32 remote_flush_to;
+
+ struct delayed_work chan_timer;
+ struct delayed_work retrans_timer;
+ struct delayed_work monitor_timer;
+ struct delayed_work ack_timer;
+
+ struct sk_buff *tx_send_head;
+ struct sk_buff_head tx_q;
+ struct sk_buff_head srej_q;
+ struct l2cap_seq_list srej_list;
+ struct l2cap_seq_list retrans_list;
+
+ struct list_head list;
+ struct list_head global_l;
+
+ void *data;
+ const struct l2cap_ops *ops;
+ struct mutex lock;
+};
+
+struct l2cap_ops {
+ char *name;
+
+ struct l2cap_chan *(*new_connection) (struct l2cap_chan *chan);
+ int (*recv) (struct l2cap_chan * chan,
+ struct sk_buff *skb);
+ void (*teardown) (struct l2cap_chan *chan, int err);
+ void (*close) (struct l2cap_chan *chan);
+ void (*state_change) (struct l2cap_chan *chan,
+ int state, int err);
+ void (*ready) (struct l2cap_chan *chan);
+ void (*defer) (struct l2cap_chan *chan);
+ void (*resume) (struct l2cap_chan *chan);
+ void (*suspend) (struct l2cap_chan *chan);
+ void (*set_shutdown) (struct l2cap_chan *chan);
+ long (*get_sndtimeo) (struct l2cap_chan *chan);
+ struct pid *(*get_peer_pid) (struct l2cap_chan *chan);
+ struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
+ unsigned long hdr_len,
+ unsigned long len, int nb);
+ int (*filter) (struct l2cap_chan * chan,
+ struct sk_buff *skb);
+};
+
+struct l2cap_conn {
+ struct hci_conn *hcon;
+ struct hci_chan *hchan;
+
+ unsigned int mtu;
+
+ __u32 feat_mask;
+ __u8 remote_fixed_chan;
+ __u8 local_fixed_chan;
+
+ __u8 info_state;
+ __u8 info_ident;
+
+ struct delayed_work info_timer;
+
+ struct sk_buff *rx_skb;
+ __u32 rx_len;
+ __u8 tx_ident;
+ struct mutex ident_lock;
+
+ struct sk_buff_head pending_rx;
+ struct work_struct pending_rx_work;
+
+ struct work_struct id_addr_update_work;
+
+ __u8 disc_reason;
+
+ struct l2cap_chan *smp;
+
+ struct list_head chan_l;
+ struct mutex chan_lock;
+ struct kref ref;
+ struct list_head users;
+};
+
+struct l2cap_user {
+ struct list_head list;
+ int (*probe) (struct l2cap_conn *conn, struct l2cap_user *user);
+ void (*remove) (struct l2cap_conn *conn, struct l2cap_user *user);
+};
+
+#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
+#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x04
+#define L2CAP_INFO_FEAT_MASK_REQ_DONE 0x08
+
+#define L2CAP_CHAN_RAW 1
+#define L2CAP_CHAN_CONN_LESS 2
+#define L2CAP_CHAN_CONN_ORIENTED 3
+#define L2CAP_CHAN_FIXED 4
+
+/* ----- L2CAP socket info ----- */
+#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
+
+struct l2cap_pinfo {
+ struct bt_sock bt;
+ struct l2cap_chan *chan;
+ struct sk_buff *rx_busy_skb;
+};
+
+enum {
+ CONF_REQ_SENT,
+ CONF_INPUT_DONE,
+ CONF_OUTPUT_DONE,
+ CONF_MTU_DONE,
+ CONF_MODE_DONE,
+ CONF_CONNECT_PEND,
+ CONF_RECV_NO_FCS,
+ CONF_STATE2_DEVICE,
+ CONF_EWS_RECV,
+ CONF_LOC_CONF_PEND,
+ CONF_REM_CONF_PEND,
+ CONF_NOT_COMPLETE,
+};
+
+#define L2CAP_CONF_MAX_CONF_REQ 2
+#define L2CAP_CONF_MAX_CONF_RSP 2
+
+enum {
+ CONN_SREJ_SENT,
+ CONN_WAIT_F,
+ CONN_SREJ_ACT,
+ CONN_SEND_PBIT,
+ CONN_REMOTE_BUSY,
+ CONN_LOCAL_BUSY,
+ CONN_REJ_ACT,
+ CONN_SEND_FBIT,
+ CONN_RNR_SENT,
+};
+
+/* Definitions for flags in l2cap_chan */
+enum {
+ FLAG_ROLE_SWITCH,
+ FLAG_FORCE_ACTIVE,
+ FLAG_FORCE_RELIABLE,
+ FLAG_FLUSHABLE,
+ FLAG_EXT_CTRL,
+ FLAG_EFS_ENABLE,
+ FLAG_DEFER_SETUP,
+ FLAG_LE_CONN_REQ_SENT,
+ FLAG_ECRED_CONN_REQ_SENT,
+ FLAG_PENDING_SECURITY,
+ FLAG_HOLD_HCI_CONN,
+};
+
+/* Lock nesting levels for L2CAP channels. We need these because lockdep
+ * otherwise considers all channels equal and will e.g. complain about a
+ * connection oriented channel triggering SMP procedures or a listening
+ * channel creating and locking a child channel.
+ */
+enum {
+ L2CAP_NESTING_SMP,
+ L2CAP_NESTING_NORMAL,
+ L2CAP_NESTING_PARENT,
+};
+
+enum {
+ L2CAP_TX_STATE_XMIT,
+ L2CAP_TX_STATE_WAIT_F,
+};
+
+enum {
+ L2CAP_RX_STATE_RECV,
+ L2CAP_RX_STATE_SREJ_SENT,
+ L2CAP_RX_STATE_MOVE,
+ L2CAP_RX_STATE_WAIT_P,
+ L2CAP_RX_STATE_WAIT_F,
+};
+
+enum {
+ L2CAP_TXSEQ_EXPECTED,
+ L2CAP_TXSEQ_EXPECTED_SREJ,
+ L2CAP_TXSEQ_UNEXPECTED,
+ L2CAP_TXSEQ_UNEXPECTED_SREJ,
+ L2CAP_TXSEQ_DUPLICATE,
+ L2CAP_TXSEQ_DUPLICATE_SREJ,
+ L2CAP_TXSEQ_INVALID,
+ L2CAP_TXSEQ_INVALID_IGNORE,
+};
+
+enum {
+ L2CAP_EV_DATA_REQUEST,
+ L2CAP_EV_LOCAL_BUSY_DETECTED,
+ L2CAP_EV_LOCAL_BUSY_CLEAR,
+ L2CAP_EV_RECV_REQSEQ_AND_FBIT,
+ L2CAP_EV_RECV_FBIT,
+ L2CAP_EV_RETRANS_TO,
+ L2CAP_EV_MONITOR_TO,
+ L2CAP_EV_EXPLICIT_POLL,
+ L2CAP_EV_RECV_IFRAME,
+ L2CAP_EV_RECV_RR,
+ L2CAP_EV_RECV_REJ,
+ L2CAP_EV_RECV_RNR,
+ L2CAP_EV_RECV_SREJ,
+ L2CAP_EV_RECV_FRAME,
+};
+
+enum {
+ L2CAP_MOVE_ROLE_NONE,
+ L2CAP_MOVE_ROLE_INITIATOR,
+ L2CAP_MOVE_ROLE_RESPONDER,
+};
+
+enum {
+ L2CAP_MOVE_STABLE,
+ L2CAP_MOVE_WAIT_REQ,
+ L2CAP_MOVE_WAIT_RSP,
+ L2CAP_MOVE_WAIT_RSP_SUCCESS,
+ L2CAP_MOVE_WAIT_CONFIRM,
+ L2CAP_MOVE_WAIT_CONFIRM_RSP,
+ L2CAP_MOVE_WAIT_LOGICAL_COMP,
+ L2CAP_MOVE_WAIT_LOGICAL_CFM,
+ L2CAP_MOVE_WAIT_LOCAL_BUSY,
+ L2CAP_MOVE_WAIT_PREPARE,
+};
+
+void l2cap_chan_hold(struct l2cap_chan *c);
+struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c);
+void l2cap_chan_put(struct l2cap_chan *c);
+
+static inline void l2cap_chan_lock(struct l2cap_chan *chan)
+{
+ mutex_lock_nested(&chan->lock, atomic_read(&chan->nesting));
+}
+
+static inline void l2cap_chan_unlock(struct l2cap_chan *chan)
+{
+ mutex_unlock(&chan->lock);
+}
+
+static inline void l2cap_set_timer(struct l2cap_chan *chan,
+ struct delayed_work *work, long timeout)
+{
+ BT_DBG("chan %p state %s timeout %ld", chan,
+ state_to_string(chan->state), timeout);
+
+ /* If delayed work cancelled do not hold(chan)
+ since it is already done with previous set_timer */
+ if (!cancel_delayed_work(work))
+ l2cap_chan_hold(chan);
+
+ schedule_delayed_work(work, timeout);
+}
+
+static inline bool l2cap_clear_timer(struct l2cap_chan *chan,
+ struct delayed_work *work)
+{
+ bool ret;
+
+ /* put(chan) if delayed work cancelled otherwise it
+ is done in delayed work function */
+ ret = cancel_delayed_work(work);
+ if (ret)
+ l2cap_chan_put(chan);
+
+ return ret;
+}
+
+#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
+#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer)
+#define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer)
+#define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer)
+#define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \
+ msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
+#define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer)
+
+static inline int __seq_offset(struct l2cap_chan *chan, __u16 seq1, __u16 seq2)
+{
+ if (seq1 >= seq2)
+ return seq1 - seq2;
+ else
+ return chan->tx_win_max + 1 - seq2 + seq1;
+}
+
+static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq)
+{
+ return (seq + 1) % (chan->tx_win_max + 1);
+}
+
+static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan *chan)
+{
+ return NULL;
+}
+
+static inline int l2cap_chan_no_recv(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ return -ENOSYS;
+}
+
+static inline struct sk_buff *l2cap_chan_no_alloc_skb(struct l2cap_chan *chan,
+ unsigned long hdr_len,
+ unsigned long len, int nb)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err)
+{
+}
+
+static inline void l2cap_chan_no_close(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_ready(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_state_change(struct l2cap_chan *chan,
+ int state, int err)
+{
+}
+
+static inline void l2cap_chan_no_defer(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_suspend(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_resume(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_set_shutdown(struct l2cap_chan *chan)
+{
+}
+
+static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan)
+{
+ return 0;
+}
+
+extern bool disable_ertm;
+extern bool enable_ecred;
+
+int l2cap_init_sockets(void);
+void l2cap_cleanup_sockets(void);
+bool l2cap_is_socket(struct socket *sock);
+
+void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan);
+void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan);
+void __l2cap_connect_rsp_defer(struct l2cap_chan *chan);
+
+int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm);
+int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
+
+struct l2cap_chan *l2cap_chan_create(void);
+void l2cap_chan_close(struct l2cap_chan *chan, int reason);
+int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ bdaddr_t *dst, u8 dst_type);
+int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu);
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
+void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
+int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator);
+void l2cap_chan_set_defaults(struct l2cap_chan *chan);
+int l2cap_ertm_init(struct l2cap_chan *chan);
+void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
+void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
+typedef void (*l2cap_chan_func_t)(struct l2cap_chan *chan, void *data);
+void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
+ void *data);
+void l2cap_chan_del(struct l2cap_chan *chan, int err);
+void l2cap_send_conn_req(struct l2cap_chan *chan);
+void l2cap_move_start(struct l2cap_chan *chan);
+void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
+ u8 status);
+void __l2cap_physical_cfm(struct l2cap_chan *chan, int result);
+
+struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn);
+void l2cap_conn_put(struct l2cap_conn *conn);
+
+int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);
+void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user);
+
+#endif /* __L2CAP_H */
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
new file mode 100644
index 000000000..5e68b3dd4
--- /dev/null
+++ b/include/net/bluetooth/mgmt.h
@@ -0,0 +1,1177 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+
+ Copyright (C) 2010 Nokia Corporation
+ Copyright (C) 2011-2012 Intel Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#define MGMT_INDEX_NONE 0xFFFF
+
+#define MGMT_STATUS_SUCCESS 0x00
+#define MGMT_STATUS_UNKNOWN_COMMAND 0x01
+#define MGMT_STATUS_NOT_CONNECTED 0x02
+#define MGMT_STATUS_FAILED 0x03
+#define MGMT_STATUS_CONNECT_FAILED 0x04
+#define MGMT_STATUS_AUTH_FAILED 0x05
+#define MGMT_STATUS_NOT_PAIRED 0x06
+#define MGMT_STATUS_NO_RESOURCES 0x07
+#define MGMT_STATUS_TIMEOUT 0x08
+#define MGMT_STATUS_ALREADY_CONNECTED 0x09
+#define MGMT_STATUS_BUSY 0x0a
+#define MGMT_STATUS_REJECTED 0x0b
+#define MGMT_STATUS_NOT_SUPPORTED 0x0c
+#define MGMT_STATUS_INVALID_PARAMS 0x0d
+#define MGMT_STATUS_DISCONNECTED 0x0e
+#define MGMT_STATUS_NOT_POWERED 0x0f
+#define MGMT_STATUS_CANCELLED 0x10
+#define MGMT_STATUS_INVALID_INDEX 0x11
+#define MGMT_STATUS_RFKILLED 0x12
+#define MGMT_STATUS_ALREADY_PAIRED 0x13
+#define MGMT_STATUS_PERMISSION_DENIED 0x14
+
+struct mgmt_hdr {
+ __le16 opcode;
+ __le16 index;
+ __le16 len;
+} __packed;
+
+struct mgmt_tlv {
+ __le16 type;
+ __u8 length;
+ __u8 value[];
+} __packed;
+
+struct mgmt_addr_info {
+ bdaddr_t bdaddr;
+ __u8 type;
+} __packed;
+#define MGMT_ADDR_INFO_SIZE 7
+
+#define MGMT_OP_READ_VERSION 0x0001
+#define MGMT_READ_VERSION_SIZE 0
+struct mgmt_rp_read_version {
+ __u8 version;
+ __le16 revision;
+} __packed;
+
+#define MGMT_OP_READ_COMMANDS 0x0002
+#define MGMT_READ_COMMANDS_SIZE 0
+struct mgmt_rp_read_commands {
+ __le16 num_commands;
+ __le16 num_events;
+ __le16 opcodes[];
+} __packed;
+
+#define MGMT_OP_READ_INDEX_LIST 0x0003
+#define MGMT_READ_INDEX_LIST_SIZE 0
+struct mgmt_rp_read_index_list {
+ __le16 num_controllers;
+ __le16 index[];
+} __packed;
+
+/* Reserve one extra byte for names in management messages so that they
+ * are always guaranteed to be nul-terminated */
+#define MGMT_MAX_NAME_LENGTH (HCI_MAX_NAME_LENGTH + 1)
+#define MGMT_MAX_SHORT_NAME_LENGTH (HCI_MAX_SHORT_NAME_LENGTH + 1)
+
+#define MGMT_SETTING_POWERED BIT(0)
+#define MGMT_SETTING_CONNECTABLE BIT(1)
+#define MGMT_SETTING_FAST_CONNECTABLE BIT(2)
+#define MGMT_SETTING_DISCOVERABLE BIT(3)
+#define MGMT_SETTING_BONDABLE BIT(4)
+#define MGMT_SETTING_LINK_SECURITY BIT(5)
+#define MGMT_SETTING_SSP BIT(6)
+#define MGMT_SETTING_BREDR BIT(7)
+#define MGMT_SETTING_HS BIT(8)
+#define MGMT_SETTING_LE BIT(9)
+#define MGMT_SETTING_ADVERTISING BIT(10)
+#define MGMT_SETTING_SECURE_CONN BIT(11)
+#define MGMT_SETTING_DEBUG_KEYS BIT(12)
+#define MGMT_SETTING_PRIVACY BIT(13)
+#define MGMT_SETTING_CONFIGURATION BIT(14)
+#define MGMT_SETTING_STATIC_ADDRESS BIT(15)
+#define MGMT_SETTING_PHY_CONFIGURATION BIT(16)
+#define MGMT_SETTING_WIDEBAND_SPEECH BIT(17)
+#define MGMT_SETTING_CIS_CENTRAL BIT(18)
+#define MGMT_SETTING_CIS_PERIPHERAL BIT(19)
+
+#define MGMT_OP_READ_INFO 0x0004
+#define MGMT_READ_INFO_SIZE 0
+struct mgmt_rp_read_info {
+ bdaddr_t bdaddr;
+ __u8 version;
+ __le16 manufacturer;
+ __le32 supported_settings;
+ __le32 current_settings;
+ __u8 dev_class[3];
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
+} __packed;
+
+struct mgmt_mode {
+ __u8 val;
+} __packed;
+
+#define MGMT_SETTING_SIZE 1
+
+#define MGMT_OP_SET_POWERED 0x0005
+
+#define MGMT_OP_SET_DISCOVERABLE 0x0006
+struct mgmt_cp_set_discoverable {
+ __u8 val;
+ __le16 timeout;
+} __packed;
+#define MGMT_SET_DISCOVERABLE_SIZE 3
+
+#define MGMT_OP_SET_CONNECTABLE 0x0007
+
+#define MGMT_OP_SET_FAST_CONNECTABLE 0x0008
+
+#define MGMT_OP_SET_BONDABLE 0x0009
+
+#define MGMT_OP_SET_LINK_SECURITY 0x000A
+
+#define MGMT_OP_SET_SSP 0x000B
+
+#define MGMT_OP_SET_HS 0x000C
+
+#define MGMT_OP_SET_LE 0x000D
+#define MGMT_OP_SET_DEV_CLASS 0x000E
+struct mgmt_cp_set_dev_class {
+ __u8 major;
+ __u8 minor;
+} __packed;
+#define MGMT_SET_DEV_CLASS_SIZE 2
+
+#define MGMT_OP_SET_LOCAL_NAME 0x000F
+struct mgmt_cp_set_local_name {
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
+} __packed;
+#define MGMT_SET_LOCAL_NAME_SIZE 260
+
+#define MGMT_OP_ADD_UUID 0x0010
+struct mgmt_cp_add_uuid {
+ __u8 uuid[16];
+ __u8 svc_hint;
+} __packed;
+#define MGMT_ADD_UUID_SIZE 17
+
+#define MGMT_OP_REMOVE_UUID 0x0011
+struct mgmt_cp_remove_uuid {
+ __u8 uuid[16];
+} __packed;
+#define MGMT_REMOVE_UUID_SIZE 16
+
+struct mgmt_link_key_info {
+ struct mgmt_addr_info addr;
+ __u8 type;
+ __u8 val[16];
+ __u8 pin_len;
+} __packed;
+
+#define MGMT_OP_LOAD_LINK_KEYS 0x0012
+struct mgmt_cp_load_link_keys {
+ __u8 debug_keys;
+ __le16 key_count;
+ struct mgmt_link_key_info keys[];
+} __packed;
+#define MGMT_LOAD_LINK_KEYS_SIZE 3
+
+#define MGMT_LTK_UNAUTHENTICATED 0x00
+#define MGMT_LTK_AUTHENTICATED 0x01
+#define MGMT_LTK_P256_UNAUTH 0x02
+#define MGMT_LTK_P256_AUTH 0x03
+#define MGMT_LTK_P256_DEBUG 0x04
+
+struct mgmt_ltk_info {
+ struct mgmt_addr_info addr;
+ __u8 type;
+ __u8 initiator;
+ __u8 enc_size;
+ __le16 ediv;
+ __le64 rand;
+ __u8 val[16];
+} __packed;
+
+#define MGMT_OP_LOAD_LONG_TERM_KEYS 0x0013
+struct mgmt_cp_load_long_term_keys {
+ __le16 key_count;
+ struct mgmt_ltk_info keys[];
+} __packed;
+#define MGMT_LOAD_LONG_TERM_KEYS_SIZE 2
+
+#define MGMT_OP_DISCONNECT 0x0014
+struct mgmt_cp_disconnect {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_DISCONNECT_SIZE MGMT_ADDR_INFO_SIZE
+struct mgmt_rp_disconnect {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_GET_CONNECTIONS 0x0015
+#define MGMT_GET_CONNECTIONS_SIZE 0
+struct mgmt_rp_get_connections {
+ __le16 conn_count;
+ struct mgmt_addr_info addr[];
+} __packed;
+
+#define MGMT_OP_PIN_CODE_REPLY 0x0016
+struct mgmt_cp_pin_code_reply {
+ struct mgmt_addr_info addr;
+ __u8 pin_len;
+ __u8 pin_code[16];
+} __packed;
+#define MGMT_PIN_CODE_REPLY_SIZE (MGMT_ADDR_INFO_SIZE + 17)
+struct mgmt_rp_pin_code_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0017
+struct mgmt_cp_pin_code_neg_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_PIN_CODE_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_SET_IO_CAPABILITY 0x0018
+struct mgmt_cp_set_io_capability {
+ __u8 io_capability;
+} __packed;
+#define MGMT_SET_IO_CAPABILITY_SIZE 1
+
+#define MGMT_OP_PAIR_DEVICE 0x0019
+struct mgmt_cp_pair_device {
+ struct mgmt_addr_info addr;
+ __u8 io_cap;
+} __packed;
+#define MGMT_PAIR_DEVICE_SIZE (MGMT_ADDR_INFO_SIZE + 1)
+struct mgmt_rp_pair_device {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_CANCEL_PAIR_DEVICE 0x001A
+#define MGMT_CANCEL_PAIR_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_UNPAIR_DEVICE 0x001B
+struct mgmt_cp_unpair_device {
+ struct mgmt_addr_info addr;
+ __u8 disconnect;
+} __packed;
+#define MGMT_UNPAIR_DEVICE_SIZE (MGMT_ADDR_INFO_SIZE + 1)
+struct mgmt_rp_unpair_device {
+ struct mgmt_addr_info addr;
+};
+
+#define MGMT_OP_USER_CONFIRM_REPLY 0x001C
+struct mgmt_cp_user_confirm_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_USER_CONFIRM_REPLY_SIZE MGMT_ADDR_INFO_SIZE
+struct mgmt_rp_user_confirm_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x001D
+struct mgmt_cp_user_confirm_neg_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_USER_CONFIRM_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_USER_PASSKEY_REPLY 0x001E
+struct mgmt_cp_user_passkey_reply {
+ struct mgmt_addr_info addr;
+ __le32 passkey;
+} __packed;
+#define MGMT_USER_PASSKEY_REPLY_SIZE (MGMT_ADDR_INFO_SIZE + 4)
+struct mgmt_rp_user_passkey_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_USER_PASSKEY_NEG_REPLY 0x001F
+struct mgmt_cp_user_passkey_neg_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_USER_PASSKEY_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_READ_LOCAL_OOB_DATA 0x0020
+#define MGMT_READ_LOCAL_OOB_DATA_SIZE 0
+struct mgmt_rp_read_local_oob_data {
+ __u8 hash192[16];
+ __u8 rand192[16];
+ __u8 hash256[16];
+ __u8 rand256[16];
+} __packed;
+
+#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0021
+struct mgmt_cp_add_remote_oob_data {
+ struct mgmt_addr_info addr;
+ __u8 hash[16];
+ __u8 rand[16];
+} __packed;
+#define MGMT_ADD_REMOTE_OOB_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 32)
+struct mgmt_cp_add_remote_oob_ext_data {
+ struct mgmt_addr_info addr;
+ __u8 hash192[16];
+ __u8 rand192[16];
+ __u8 hash256[16];
+ __u8 rand256[16];
+} __packed;
+#define MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 64)
+
+#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x0022
+struct mgmt_cp_remove_remote_oob_data {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_REMOVE_REMOTE_OOB_DATA_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_START_DISCOVERY 0x0023
+struct mgmt_cp_start_discovery {
+ __u8 type;
+} __packed;
+#define MGMT_START_DISCOVERY_SIZE 1
+
+#define MGMT_OP_STOP_DISCOVERY 0x0024
+struct mgmt_cp_stop_discovery {
+ __u8 type;
+} __packed;
+#define MGMT_STOP_DISCOVERY_SIZE 1
+
+#define MGMT_OP_CONFIRM_NAME 0x0025
+struct mgmt_cp_confirm_name {
+ struct mgmt_addr_info addr;
+ __u8 name_known;
+} __packed;
+#define MGMT_CONFIRM_NAME_SIZE (MGMT_ADDR_INFO_SIZE + 1)
+struct mgmt_rp_confirm_name {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_BLOCK_DEVICE 0x0026
+struct mgmt_cp_block_device {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_BLOCK_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_UNBLOCK_DEVICE 0x0027
+struct mgmt_cp_unblock_device {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_UNBLOCK_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_SET_DEVICE_ID 0x0028
+struct mgmt_cp_set_device_id {
+ __le16 source;
+ __le16 vendor;
+ __le16 product;
+ __le16 version;
+} __packed;
+#define MGMT_SET_DEVICE_ID_SIZE 8
+
+#define MGMT_OP_SET_ADVERTISING 0x0029
+
+#define MGMT_OP_SET_BREDR 0x002A
+
+#define MGMT_OP_SET_STATIC_ADDRESS 0x002B
+struct mgmt_cp_set_static_address {
+ bdaddr_t bdaddr;
+} __packed;
+#define MGMT_SET_STATIC_ADDRESS_SIZE 6
+
+#define MGMT_OP_SET_SCAN_PARAMS 0x002C
+struct mgmt_cp_set_scan_params {
+ __le16 interval;
+ __le16 window;
+} __packed;
+#define MGMT_SET_SCAN_PARAMS_SIZE 4
+
+#define MGMT_OP_SET_SECURE_CONN 0x002D
+
+#define MGMT_OP_SET_DEBUG_KEYS 0x002E
+
+#define MGMT_OP_SET_PRIVACY 0x002F
+struct mgmt_cp_set_privacy {
+ __u8 privacy;
+ __u8 irk[16];
+} __packed;
+#define MGMT_SET_PRIVACY_SIZE 17
+
+struct mgmt_irk_info {
+ struct mgmt_addr_info addr;
+ __u8 val[16];
+} __packed;
+
+#define MGMT_OP_LOAD_IRKS 0x0030
+struct mgmt_cp_load_irks {
+ __le16 irk_count;
+ struct mgmt_irk_info irks[];
+} __packed;
+#define MGMT_LOAD_IRKS_SIZE 2
+
+#define MGMT_OP_GET_CONN_INFO 0x0031
+struct mgmt_cp_get_conn_info {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_GET_CONN_INFO_SIZE MGMT_ADDR_INFO_SIZE
+struct mgmt_rp_get_conn_info {
+ struct mgmt_addr_info addr;
+ __s8 rssi;
+ __s8 tx_power;
+ __s8 max_tx_power;
+} __packed;
+
+#define MGMT_OP_GET_CLOCK_INFO 0x0032
+struct mgmt_cp_get_clock_info {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_GET_CLOCK_INFO_SIZE MGMT_ADDR_INFO_SIZE
+struct mgmt_rp_get_clock_info {
+ struct mgmt_addr_info addr;
+ __le32 local_clock;
+ __le32 piconet_clock;
+ __le16 accuracy;
+} __packed;
+
+#define MGMT_OP_ADD_DEVICE 0x0033
+struct mgmt_cp_add_device {
+ struct mgmt_addr_info addr;
+ __u8 action;
+} __packed;
+#define MGMT_ADD_DEVICE_SIZE (MGMT_ADDR_INFO_SIZE + 1)
+
+#define MGMT_OP_REMOVE_DEVICE 0x0034
+struct mgmt_cp_remove_device {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_REMOVE_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
+
+struct mgmt_conn_param {
+ struct mgmt_addr_info addr;
+ __le16 min_interval;
+ __le16 max_interval;
+ __le16 latency;
+ __le16 timeout;
+} __packed;
+
+#define MGMT_OP_LOAD_CONN_PARAM 0x0035
+struct mgmt_cp_load_conn_param {
+ __le16 param_count;
+ struct mgmt_conn_param params[];
+} __packed;
+#define MGMT_LOAD_CONN_PARAM_SIZE 2
+
+#define MGMT_OP_READ_UNCONF_INDEX_LIST 0x0036
+#define MGMT_READ_UNCONF_INDEX_LIST_SIZE 0
+struct mgmt_rp_read_unconf_index_list {
+ __le16 num_controllers;
+ __le16 index[];
+} __packed;
+
+#define MGMT_OPTION_EXTERNAL_CONFIG 0x00000001
+#define MGMT_OPTION_PUBLIC_ADDRESS 0x00000002
+
+#define MGMT_OP_READ_CONFIG_INFO 0x0037
+#define MGMT_READ_CONFIG_INFO_SIZE 0
+struct mgmt_rp_read_config_info {
+ __le16 manufacturer;
+ __le32 supported_options;
+ __le32 missing_options;
+} __packed;
+
+#define MGMT_OP_SET_EXTERNAL_CONFIG 0x0038
+struct mgmt_cp_set_external_config {
+ __u8 config;
+} __packed;
+#define MGMT_SET_EXTERNAL_CONFIG_SIZE 1
+
+#define MGMT_OP_SET_PUBLIC_ADDRESS 0x0039
+struct mgmt_cp_set_public_address {
+ bdaddr_t bdaddr;
+} __packed;
+#define MGMT_SET_PUBLIC_ADDRESS_SIZE 6
+
+#define MGMT_OP_START_SERVICE_DISCOVERY 0x003A
+struct mgmt_cp_start_service_discovery {
+ __u8 type;
+ __s8 rssi;
+ __le16 uuid_count;
+ __u8 uuids[][16];
+} __packed;
+#define MGMT_START_SERVICE_DISCOVERY_SIZE 4
+
+#define MGMT_OP_READ_LOCAL_OOB_EXT_DATA 0x003B
+struct mgmt_cp_read_local_oob_ext_data {
+ __u8 type;
+} __packed;
+#define MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE 1
+struct mgmt_rp_read_local_oob_ext_data {
+ __u8 type;
+ __le16 eir_len;
+ __u8 eir[];
+} __packed;
+
+#define MGMT_OP_READ_EXT_INDEX_LIST 0x003C
+#define MGMT_READ_EXT_INDEX_LIST_SIZE 0
+struct mgmt_rp_read_ext_index_list {
+ __le16 num_controllers;
+ struct {
+ __le16 index;
+ __u8 type;
+ __u8 bus;
+ } entry[];
+} __packed;
+
+#define MGMT_OP_READ_ADV_FEATURES 0x0003D
+#define MGMT_READ_ADV_FEATURES_SIZE 0
+struct mgmt_rp_read_adv_features {
+ __le32 supported_flags;
+ __u8 max_adv_data_len;
+ __u8 max_scan_rsp_len;
+ __u8 max_instances;
+ __u8 num_instances;
+ __u8 instance[];
+} __packed;
+
+#define MGMT_OP_ADD_ADVERTISING 0x003E
+struct mgmt_cp_add_advertising {
+ __u8 instance;
+ __le32 flags;
+ __le16 duration;
+ __le16 timeout;
+ __u8 adv_data_len;
+ __u8 scan_rsp_len;
+ __u8 data[];
+} __packed;
+#define MGMT_ADD_ADVERTISING_SIZE 11
+struct mgmt_rp_add_advertising {
+ __u8 instance;
+} __packed;
+
+#define MGMT_ADV_FLAG_CONNECTABLE BIT(0)
+#define MGMT_ADV_FLAG_DISCOV BIT(1)
+#define MGMT_ADV_FLAG_LIMITED_DISCOV BIT(2)
+#define MGMT_ADV_FLAG_MANAGED_FLAGS BIT(3)
+#define MGMT_ADV_FLAG_TX_POWER BIT(4)
+#define MGMT_ADV_FLAG_APPEARANCE BIT(5)
+#define MGMT_ADV_FLAG_LOCAL_NAME BIT(6)
+#define MGMT_ADV_FLAG_SEC_1M BIT(7)
+#define MGMT_ADV_FLAG_SEC_2M BIT(8)
+#define MGMT_ADV_FLAG_SEC_CODED BIT(9)
+#define MGMT_ADV_FLAG_CAN_SET_TX_POWER BIT(10)
+#define MGMT_ADV_FLAG_HW_OFFLOAD BIT(11)
+#define MGMT_ADV_PARAM_DURATION BIT(12)
+#define MGMT_ADV_PARAM_TIMEOUT BIT(13)
+#define MGMT_ADV_PARAM_INTERVALS BIT(14)
+#define MGMT_ADV_PARAM_TX_POWER BIT(15)
+#define MGMT_ADV_PARAM_SCAN_RSP BIT(16)
+
+#define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \
+ MGMT_ADV_FLAG_SEC_CODED)
+
+#define MGMT_OP_REMOVE_ADVERTISING 0x003F
+struct mgmt_cp_remove_advertising {
+ __u8 instance;
+} __packed;
+#define MGMT_REMOVE_ADVERTISING_SIZE 1
+struct mgmt_rp_remove_advertising {
+ __u8 instance;
+} __packed;
+
+#define MGMT_OP_GET_ADV_SIZE_INFO 0x0040
+struct mgmt_cp_get_adv_size_info {
+ __u8 instance;
+ __le32 flags;
+} __packed;
+#define MGMT_GET_ADV_SIZE_INFO_SIZE 5
+struct mgmt_rp_get_adv_size_info {
+ __u8 instance;
+ __le32 flags;
+ __u8 max_adv_data_len;
+ __u8 max_scan_rsp_len;
+} __packed;
+
+#define MGMT_OP_START_LIMITED_DISCOVERY 0x0041
+
+#define MGMT_OP_READ_EXT_INFO 0x0042
+#define MGMT_READ_EXT_INFO_SIZE 0
+struct mgmt_rp_read_ext_info {
+ bdaddr_t bdaddr;
+ __u8 version;
+ __le16 manufacturer;
+ __le32 supported_settings;
+ __le32 current_settings;
+ __le16 eir_len;
+ __u8 eir[];
+} __packed;
+
+#define MGMT_OP_SET_APPEARANCE 0x0043
+struct mgmt_cp_set_appearance {
+ __le16 appearance;
+} __packed;
+#define MGMT_SET_APPEARANCE_SIZE 2
+
+#define MGMT_OP_GET_PHY_CONFIGURATION 0x0044
+struct mgmt_rp_get_phy_configuration {
+ __le32 supported_phys;
+ __le32 configurable_phys;
+ __le32 selected_phys;
+} __packed;
+#define MGMT_GET_PHY_CONFIGURATION_SIZE 0
+
+#define MGMT_PHY_BR_1M_1SLOT BIT(0)
+#define MGMT_PHY_BR_1M_3SLOT BIT(1)
+#define MGMT_PHY_BR_1M_5SLOT BIT(2)
+#define MGMT_PHY_EDR_2M_1SLOT BIT(3)
+#define MGMT_PHY_EDR_2M_3SLOT BIT(4)
+#define MGMT_PHY_EDR_2M_5SLOT BIT(5)
+#define MGMT_PHY_EDR_3M_1SLOT BIT(6)
+#define MGMT_PHY_EDR_3M_3SLOT BIT(7)
+#define MGMT_PHY_EDR_3M_5SLOT BIT(8)
+#define MGMT_PHY_LE_1M_TX BIT(9)
+#define MGMT_PHY_LE_1M_RX BIT(10)
+#define MGMT_PHY_LE_2M_TX BIT(11)
+#define MGMT_PHY_LE_2M_RX BIT(12)
+#define MGMT_PHY_LE_CODED_TX BIT(13)
+#define MGMT_PHY_LE_CODED_RX BIT(14)
+
+#define MGMT_PHY_BREDR_MASK (MGMT_PHY_BR_1M_1SLOT | MGMT_PHY_BR_1M_3SLOT | \
+ MGMT_PHY_BR_1M_5SLOT | MGMT_PHY_EDR_2M_1SLOT | \
+ MGMT_PHY_EDR_2M_3SLOT | MGMT_PHY_EDR_2M_5SLOT | \
+ MGMT_PHY_EDR_3M_1SLOT | MGMT_PHY_EDR_3M_3SLOT | \
+ MGMT_PHY_EDR_3M_5SLOT)
+#define MGMT_PHY_LE_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_1M_RX | \
+ MGMT_PHY_LE_2M_TX | MGMT_PHY_LE_2M_RX | \
+ MGMT_PHY_LE_CODED_TX | MGMT_PHY_LE_CODED_RX)
+#define MGMT_PHY_LE_TX_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_2M_TX | \
+ MGMT_PHY_LE_CODED_TX)
+#define MGMT_PHY_LE_RX_MASK (MGMT_PHY_LE_1M_RX | MGMT_PHY_LE_2M_RX | \
+ MGMT_PHY_LE_CODED_RX)
+
+#define MGMT_OP_SET_PHY_CONFIGURATION 0x0045
+struct mgmt_cp_set_phy_configuration {
+ __le32 selected_phys;
+} __packed;
+#define MGMT_SET_PHY_CONFIGURATION_SIZE 4
+
+#define MGMT_OP_SET_BLOCKED_KEYS 0x0046
+
+#define HCI_BLOCKED_KEY_TYPE_LINKKEY 0x00
+#define HCI_BLOCKED_KEY_TYPE_LTK 0x01
+#define HCI_BLOCKED_KEY_TYPE_IRK 0x02
+
+struct mgmt_blocked_key_info {
+ __u8 type;
+ __u8 val[16];
+} __packed;
+
+struct mgmt_cp_set_blocked_keys {
+ __le16 key_count;
+ struct mgmt_blocked_key_info keys[];
+} __packed;
+#define MGMT_OP_SET_BLOCKED_KEYS_SIZE 2
+
+#define MGMT_OP_SET_WIDEBAND_SPEECH 0x0047
+
+#define MGMT_CAP_SEC_FLAGS 0x01
+#define MGMT_CAP_MAX_ENC_KEY_SIZE 0x02
+#define MGMT_CAP_SMP_MAX_ENC_KEY_SIZE 0x03
+#define MGMT_CAP_LE_TX_PWR 0x04
+
+#define MGMT_OP_READ_CONTROLLER_CAP 0x0048
+#define MGMT_READ_CONTROLLER_CAP_SIZE 0
+struct mgmt_rp_read_controller_cap {
+ __le16 cap_len;
+ __u8 cap[];
+} __packed;
+
+#define MGMT_OP_READ_EXP_FEATURES_INFO 0x0049
+#define MGMT_READ_EXP_FEATURES_INFO_SIZE 0
+struct mgmt_rp_read_exp_features_info {
+ __le16 feature_count;
+ struct {
+ __u8 uuid[16];
+ __le32 flags;
+ } features[];
+} __packed;
+
+#define MGMT_OP_SET_EXP_FEATURE 0x004a
+struct mgmt_cp_set_exp_feature {
+ __u8 uuid[16];
+ __u8 param[];
+} __packed;
+#define MGMT_SET_EXP_FEATURE_SIZE 16
+struct mgmt_rp_set_exp_feature {
+ __u8 uuid[16];
+ __le32 flags;
+} __packed;
+
+#define MGMT_OP_READ_DEF_SYSTEM_CONFIG 0x004b
+#define MGMT_READ_DEF_SYSTEM_CONFIG_SIZE 0
+
+#define MGMT_OP_SET_DEF_SYSTEM_CONFIG 0x004c
+#define MGMT_SET_DEF_SYSTEM_CONFIG_SIZE 0
+
+#define MGMT_OP_READ_DEF_RUNTIME_CONFIG 0x004d
+#define MGMT_READ_DEF_RUNTIME_CONFIG_SIZE 0
+
+#define MGMT_OP_SET_DEF_RUNTIME_CONFIG 0x004e
+#define MGMT_SET_DEF_RUNTIME_CONFIG_SIZE 0
+
+#define MGMT_OP_GET_DEVICE_FLAGS 0x004F
+#define MGMT_GET_DEVICE_FLAGS_SIZE 7
+struct mgmt_cp_get_device_flags {
+ struct mgmt_addr_info addr;
+} __packed;
+struct mgmt_rp_get_device_flags {
+ struct mgmt_addr_info addr;
+ __le32 supported_flags;
+ __le32 current_flags;
+} __packed;
+
+#define MGMT_OP_SET_DEVICE_FLAGS 0x0050
+#define MGMT_SET_DEVICE_FLAGS_SIZE 11
+struct mgmt_cp_set_device_flags {
+ struct mgmt_addr_info addr;
+ __le32 current_flags;
+} __packed;
+struct mgmt_rp_set_device_flags {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS BIT(0)
+
+#define MGMT_OP_READ_ADV_MONITOR_FEATURES 0x0051
+#define MGMT_READ_ADV_MONITOR_FEATURES_SIZE 0
+struct mgmt_rp_read_adv_monitor_features {
+ __le32 supported_features;
+ __le32 enabled_features;
+ __le16 max_num_handles;
+ __u8 max_num_patterns;
+ __le16 num_handles;
+ __le16 handles[];
+} __packed;
+
+struct mgmt_adv_pattern {
+ __u8 ad_type;
+ __u8 offset;
+ __u8 length;
+ __u8 value[31];
+} __packed;
+
+#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR 0x0052
+struct mgmt_cp_add_adv_patterns_monitor {
+ __u8 pattern_count;
+ struct mgmt_adv_pattern patterns[];
+} __packed;
+#define MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE 1
+struct mgmt_rp_add_adv_patterns_monitor {
+ __le16 monitor_handle;
+} __packed;
+
+#define MGMT_OP_REMOVE_ADV_MONITOR 0x0053
+struct mgmt_cp_remove_adv_monitor {
+ __le16 monitor_handle;
+} __packed;
+#define MGMT_REMOVE_ADV_MONITOR_SIZE 2
+struct mgmt_rp_remove_adv_monitor {
+ __le16 monitor_handle;
+} __packed;
+
+#define MGMT_OP_ADD_EXT_ADV_PARAMS 0x0054
+struct mgmt_cp_add_ext_adv_params {
+ __u8 instance;
+ __le32 flags;
+ __le16 duration;
+ __le16 timeout;
+ __le32 min_interval;
+ __le32 max_interval;
+ __s8 tx_power;
+} __packed;
+#define MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE 18
+struct mgmt_rp_add_ext_adv_params {
+ __u8 instance;
+ __s8 tx_power;
+ __u8 max_adv_data_len;
+ __u8 max_scan_rsp_len;
+} __packed;
+
+#define MGMT_OP_ADD_EXT_ADV_DATA 0x0055
+struct mgmt_cp_add_ext_adv_data {
+ __u8 instance;
+ __u8 adv_data_len;
+ __u8 scan_rsp_len;
+ __u8 data[];
+} __packed;
+#define MGMT_ADD_EXT_ADV_DATA_SIZE 3
+struct mgmt_rp_add_ext_adv_data {
+ __u8 instance;
+} __packed;
+
+struct mgmt_adv_rssi_thresholds {
+ __s8 high_threshold;
+ __le16 high_threshold_timeout;
+ __s8 low_threshold;
+ __le16 low_threshold_timeout;
+ __u8 sampling_period;
+} __packed;
+
+#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI 0x0056
+struct mgmt_cp_add_adv_patterns_monitor_rssi {
+ struct mgmt_adv_rssi_thresholds rssi;
+ __u8 pattern_count;
+ struct mgmt_adv_pattern patterns[];
+} __packed;
+#define MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE 8
+#define MGMT_OP_SET_MESH_RECEIVER 0x0057
+struct mgmt_cp_set_mesh {
+ __u8 enable;
+ __le16 window;
+ __le16 period;
+ __u8 num_ad_types;
+ __u8 ad_types[];
+} __packed;
+#define MGMT_SET_MESH_RECEIVER_SIZE 6
+
+#define MGMT_OP_MESH_READ_FEATURES 0x0058
+#define MGMT_MESH_READ_FEATURES_SIZE 0
+#define MESH_HANDLES_MAX 3
+struct mgmt_rp_mesh_read_features {
+ __le16 index;
+ __u8 max_handles;
+ __u8 used_handles;
+ __u8 handles[MESH_HANDLES_MAX];
+} __packed;
+
+#define MGMT_OP_MESH_SEND 0x0059
+struct mgmt_cp_mesh_send {
+ struct mgmt_addr_info addr;
+ __le64 instant;
+ __le16 delay;
+ __u8 cnt;
+ __u8 adv_data_len;
+ __u8 adv_data[];
+} __packed;
+#define MGMT_MESH_SEND_SIZE 19
+
+#define MGMT_OP_MESH_SEND_CANCEL 0x005A
+struct mgmt_cp_mesh_send_cancel {
+ __u8 handle;
+} __packed;
+#define MGMT_MESH_SEND_CANCEL_SIZE 1
+
+#define MGMT_EV_CMD_COMPLETE 0x0001
+struct mgmt_ev_cmd_complete {
+ __le16 opcode;
+ __u8 status;
+ __u8 data[];
+} __packed;
+
+#define MGMT_EV_CMD_STATUS 0x0002
+struct mgmt_ev_cmd_status {
+ __le16 opcode;
+ __u8 status;
+} __packed;
+
+#define MGMT_EV_CONTROLLER_ERROR 0x0003
+struct mgmt_ev_controller_error {
+ __u8 error_code;
+} __packed;
+
+#define MGMT_EV_INDEX_ADDED 0x0004
+
+#define MGMT_EV_INDEX_REMOVED 0x0005
+
+#define MGMT_EV_NEW_SETTINGS 0x0006
+
+#define MGMT_EV_CLASS_OF_DEV_CHANGED 0x0007
+struct mgmt_ev_class_of_dev_changed {
+ __u8 dev_class[3];
+};
+
+#define MGMT_EV_LOCAL_NAME_CHANGED 0x0008
+struct mgmt_ev_local_name_changed {
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
+} __packed;
+
+#define MGMT_EV_NEW_LINK_KEY 0x0009
+struct mgmt_ev_new_link_key {
+ __u8 store_hint;
+ struct mgmt_link_key_info key;
+} __packed;
+
+#define MGMT_EV_NEW_LONG_TERM_KEY 0x000A
+struct mgmt_ev_new_long_term_key {
+ __u8 store_hint;
+ struct mgmt_ltk_info key;
+} __packed;
+
+#define MGMT_EV_DEVICE_CONNECTED 0x000B
+struct mgmt_ev_device_connected {
+ struct mgmt_addr_info addr;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[];
+} __packed;
+
+#define MGMT_DEV_DISCONN_UNKNOWN 0x00
+#define MGMT_DEV_DISCONN_TIMEOUT 0x01
+#define MGMT_DEV_DISCONN_LOCAL_HOST 0x02
+#define MGMT_DEV_DISCONN_REMOTE 0x03
+#define MGMT_DEV_DISCONN_AUTH_FAILURE 0x04
+#define MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND 0x05
+
+#define MGMT_EV_DEVICE_DISCONNECTED 0x000C
+struct mgmt_ev_device_disconnected {
+ struct mgmt_addr_info addr;
+ __u8 reason;
+} __packed;
+
+#define MGMT_EV_CONNECT_FAILED 0x000D
+struct mgmt_ev_connect_failed {
+ struct mgmt_addr_info addr;
+ __u8 status;
+} __packed;
+
+#define MGMT_EV_PIN_CODE_REQUEST 0x000E
+struct mgmt_ev_pin_code_request {
+ struct mgmt_addr_info addr;
+ __u8 secure;
+} __packed;
+
+#define MGMT_EV_USER_CONFIRM_REQUEST 0x000F
+struct mgmt_ev_user_confirm_request {
+ struct mgmt_addr_info addr;
+ __u8 confirm_hint;
+ __le32 value;
+} __packed;
+
+#define MGMT_EV_USER_PASSKEY_REQUEST 0x0010
+struct mgmt_ev_user_passkey_request {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_AUTH_FAILED 0x0011
+struct mgmt_ev_auth_failed {
+ struct mgmt_addr_info addr;
+ __u8 status;
+} __packed;
+
+#define MGMT_DEV_FOUND_CONFIRM_NAME BIT(0)
+#define MGMT_DEV_FOUND_LEGACY_PAIRING BIT(1)
+#define MGMT_DEV_FOUND_NOT_CONNECTABLE BIT(2)
+#define MGMT_DEV_FOUND_INITIATED_CONN BIT(3)
+#define MGMT_DEV_FOUND_NAME_REQUEST_FAILED BIT(4)
+#define MGMT_DEV_FOUND_SCAN_RSP BIT(5)
+
+#define MGMT_EV_DEVICE_FOUND 0x0012
+struct mgmt_ev_device_found {
+ struct mgmt_addr_info addr;
+ __s8 rssi;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[];
+} __packed;
+
+#define MGMT_EV_DISCOVERING 0x0013
+struct mgmt_ev_discovering {
+ __u8 type;
+ __u8 discovering;
+} __packed;
+
+#define MGMT_EV_DEVICE_BLOCKED 0x0014
+struct mgmt_ev_device_blocked {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_DEVICE_UNBLOCKED 0x0015
+struct mgmt_ev_device_unblocked {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_DEVICE_UNPAIRED 0x0016
+struct mgmt_ev_device_unpaired {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_PASSKEY_NOTIFY 0x0017
+struct mgmt_ev_passkey_notify {
+ struct mgmt_addr_info addr;
+ __le32 passkey;
+ __u8 entered;
+} __packed;
+
+#define MGMT_EV_NEW_IRK 0x0018
+struct mgmt_ev_new_irk {
+ __u8 store_hint;
+ bdaddr_t rpa;
+ struct mgmt_irk_info irk;
+} __packed;
+
+#define MGMT_CSRK_LOCAL_UNAUTHENTICATED 0x00
+#define MGMT_CSRK_REMOTE_UNAUTHENTICATED 0x01
+#define MGMT_CSRK_LOCAL_AUTHENTICATED 0x02
+#define MGMT_CSRK_REMOTE_AUTHENTICATED 0x03
+
+struct mgmt_csrk_info {
+ struct mgmt_addr_info addr;
+ __u8 type;
+ __u8 val[16];
+} __packed;
+
+#define MGMT_EV_NEW_CSRK 0x0019
+struct mgmt_ev_new_csrk {
+ __u8 store_hint;
+ struct mgmt_csrk_info key;
+} __packed;
+
+#define MGMT_EV_DEVICE_ADDED 0x001a
+struct mgmt_ev_device_added {
+ struct mgmt_addr_info addr;
+ __u8 action;
+} __packed;
+
+#define MGMT_EV_DEVICE_REMOVED 0x001b
+struct mgmt_ev_device_removed {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_NEW_CONN_PARAM 0x001c
+struct mgmt_ev_new_conn_param {
+ struct mgmt_addr_info addr;
+ __u8 store_hint;
+ __le16 min_interval;
+ __le16 max_interval;
+ __le16 latency;
+ __le16 timeout;
+} __packed;
+
+#define MGMT_EV_UNCONF_INDEX_ADDED 0x001d
+
+#define MGMT_EV_UNCONF_INDEX_REMOVED 0x001e
+
+#define MGMT_EV_NEW_CONFIG_OPTIONS 0x001f
+
+struct mgmt_ev_ext_index {
+ __u8 type;
+ __u8 bus;
+} __packed;
+
+#define MGMT_EV_EXT_INDEX_ADDED 0x0020
+
+#define MGMT_EV_EXT_INDEX_REMOVED 0x0021
+
+#define MGMT_EV_LOCAL_OOB_DATA_UPDATED 0x0022
+struct mgmt_ev_local_oob_data_updated {
+ __u8 type;
+ __le16 eir_len;
+ __u8 eir[];
+} __packed;
+
+#define MGMT_EV_ADVERTISING_ADDED 0x0023
+struct mgmt_ev_advertising_added {
+ __u8 instance;
+} __packed;
+
+#define MGMT_EV_ADVERTISING_REMOVED 0x0024
+struct mgmt_ev_advertising_removed {
+ __u8 instance;
+} __packed;
+
+#define MGMT_EV_EXT_INFO_CHANGED 0x0025
+struct mgmt_ev_ext_info_changed {
+ __le16 eir_len;
+ __u8 eir[];
+} __packed;
+
+#define MGMT_EV_PHY_CONFIGURATION_CHANGED 0x0026
+struct mgmt_ev_phy_configuration_changed {
+ __le32 selected_phys;
+} __packed;
+
+#define MGMT_EV_EXP_FEATURE_CHANGED 0x0027
+struct mgmt_ev_exp_feature_changed {
+ __u8 uuid[16];
+ __le32 flags;
+} __packed;
+
+#define MGMT_EV_DEVICE_FLAGS_CHANGED 0x002a
+struct mgmt_ev_device_flags_changed {
+ struct mgmt_addr_info addr;
+ __le32 supported_flags;
+ __le32 current_flags;
+} __packed;
+
+#define MGMT_EV_ADV_MONITOR_ADDED 0x002b
+struct mgmt_ev_adv_monitor_added {
+ __le16 monitor_handle;
+} __packed;
+
+#define MGMT_EV_ADV_MONITOR_REMOVED 0x002c
+struct mgmt_ev_adv_monitor_removed {
+ __le16 monitor_handle;
+} __packed;
+
+#define MGMT_EV_CONTROLLER_SUSPEND 0x002d
+struct mgmt_ev_controller_suspend {
+ __u8 suspend_state;
+} __packed;
+
+#define MGMT_EV_CONTROLLER_RESUME 0x002e
+struct mgmt_ev_controller_resume {
+ __u8 wake_reason;
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_WAKE_REASON_NON_BT_WAKE 0x0
+#define MGMT_WAKE_REASON_UNEXPECTED 0x1
+#define MGMT_WAKE_REASON_REMOTE_WAKE 0x2
+
+#define MGMT_EV_ADV_MONITOR_DEVICE_FOUND 0x002f
+struct mgmt_ev_adv_monitor_device_found {
+ __le16 monitor_handle;
+ struct mgmt_addr_info addr;
+ __s8 rssi;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[];
+} __packed;
+
+#define MGMT_EV_ADV_MONITOR_DEVICE_LOST 0x0030
+struct mgmt_ev_adv_monitor_device_lost {
+ __le16 monitor_handle;
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_MESH_DEVICE_FOUND 0x0031
+struct mgmt_ev_mesh_device_found {
+ struct mgmt_addr_info addr;
+ __s8 rssi;
+ __le64 instant;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[];
+} __packed;
+
+
+#define MGMT_EV_MESH_PACKET_CMPLT 0x0032
+struct mgmt_ev_mesh_pkt_cmplt {
+ __u8 handle;
+} __packed;
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
new file mode 100644
index 000000000..99d26879b
--- /dev/null
+++ b/include/net/bluetooth/rfcomm.h
@@ -0,0 +1,375 @@
+/*
+ RFCOMM implementation for Linux Bluetooth stack (BlueZ)
+ Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
+ Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#include <linux/refcount.h>
+
+#ifndef __RFCOMM_H
+#define __RFCOMM_H
+
+#define RFCOMM_CONN_TIMEOUT (HZ * 30)
+#define RFCOMM_DISC_TIMEOUT (HZ * 20)
+#define RFCOMM_AUTH_TIMEOUT (HZ * 25)
+#define RFCOMM_IDLE_TIMEOUT (HZ * 2)
+
+#define RFCOMM_DEFAULT_MTU 127
+#define RFCOMM_DEFAULT_CREDITS 7
+
+#define RFCOMM_MAX_CREDITS 40
+
+#define RFCOMM_SKB_HEAD_RESERVE 8
+#define RFCOMM_SKB_TAIL_RESERVE 2
+#define RFCOMM_SKB_RESERVE (RFCOMM_SKB_HEAD_RESERVE + RFCOMM_SKB_TAIL_RESERVE)
+
+#define RFCOMM_SABM 0x2f
+#define RFCOMM_DISC 0x43
+#define RFCOMM_UA 0x63
+#define RFCOMM_DM 0x0f
+#define RFCOMM_UIH 0xef
+
+#define RFCOMM_TEST 0x08
+#define RFCOMM_FCON 0x28
+#define RFCOMM_FCOFF 0x18
+#define RFCOMM_MSC 0x38
+#define RFCOMM_RPN 0x24
+#define RFCOMM_RLS 0x14
+#define RFCOMM_PN 0x20
+#define RFCOMM_NSC 0x04
+
+#define RFCOMM_V24_FC 0x02
+#define RFCOMM_V24_RTC 0x04
+#define RFCOMM_V24_RTR 0x08
+#define RFCOMM_V24_IC 0x40
+#define RFCOMM_V24_DV 0x80
+
+#define RFCOMM_RPN_BR_2400 0x0
+#define RFCOMM_RPN_BR_4800 0x1
+#define RFCOMM_RPN_BR_7200 0x2
+#define RFCOMM_RPN_BR_9600 0x3
+#define RFCOMM_RPN_BR_19200 0x4
+#define RFCOMM_RPN_BR_38400 0x5
+#define RFCOMM_RPN_BR_57600 0x6
+#define RFCOMM_RPN_BR_115200 0x7
+#define RFCOMM_RPN_BR_230400 0x8
+
+#define RFCOMM_RPN_DATA_5 0x0
+#define RFCOMM_RPN_DATA_6 0x1
+#define RFCOMM_RPN_DATA_7 0x2
+#define RFCOMM_RPN_DATA_8 0x3
+
+#define RFCOMM_RPN_STOP_1 0
+#define RFCOMM_RPN_STOP_15 1
+
+#define RFCOMM_RPN_PARITY_NONE 0x0
+#define RFCOMM_RPN_PARITY_ODD 0x1
+#define RFCOMM_RPN_PARITY_EVEN 0x3
+#define RFCOMM_RPN_PARITY_MARK 0x5
+#define RFCOMM_RPN_PARITY_SPACE 0x7
+
+#define RFCOMM_RPN_FLOW_NONE 0x00
+
+#define RFCOMM_RPN_XON_CHAR 0x11
+#define RFCOMM_RPN_XOFF_CHAR 0x13
+
+#define RFCOMM_RPN_PM_BITRATE 0x0001
+#define RFCOMM_RPN_PM_DATA 0x0002
+#define RFCOMM_RPN_PM_STOP 0x0004
+#define RFCOMM_RPN_PM_PARITY 0x0008
+#define RFCOMM_RPN_PM_PARITY_TYPE 0x0010
+#define RFCOMM_RPN_PM_XON 0x0020
+#define RFCOMM_RPN_PM_XOFF 0x0040
+#define RFCOMM_RPN_PM_FLOW 0x3F00
+
+#define RFCOMM_RPN_PM_ALL 0x3F7F
+
+struct rfcomm_hdr {
+ u8 addr;
+ u8 ctrl;
+ u8 len; /* Actual size can be 2 bytes */
+} __packed;
+
+struct rfcomm_cmd {
+ u8 addr;
+ u8 ctrl;
+ u8 len;
+ u8 fcs;
+} __packed;
+
+struct rfcomm_mcc {
+ u8 type;
+ u8 len;
+} __packed;
+
+struct rfcomm_pn {
+ u8 dlci;
+ u8 flow_ctrl;
+ u8 priority;
+ u8 ack_timer;
+ __le16 mtu;
+ u8 max_retrans;
+ u8 credits;
+} __packed;
+
+struct rfcomm_rpn {
+ u8 dlci;
+ u8 bit_rate;
+ u8 line_settings;
+ u8 flow_ctrl;
+ u8 xon_char;
+ u8 xoff_char;
+ __le16 param_mask;
+} __packed;
+
+struct rfcomm_rls {
+ u8 dlci;
+ u8 status;
+} __packed;
+
+struct rfcomm_msc {
+ u8 dlci;
+ u8 v24_sig;
+} __packed;
+
+/* ---- Core structures, flags etc ---- */
+
+struct rfcomm_session {
+ struct list_head list;
+ struct socket *sock;
+ struct timer_list timer;
+ unsigned long state;
+ unsigned long flags;
+ int initiator;
+
+ /* Default DLC parameters */
+ int cfc;
+ uint mtu;
+
+ struct list_head dlcs;
+};
+
+struct rfcomm_dlc {
+ struct list_head list;
+ struct rfcomm_session *session;
+ struct sk_buff_head tx_queue;
+ struct timer_list timer;
+
+ struct mutex lock;
+ unsigned long state;
+ unsigned long flags;
+ refcount_t refcnt;
+ u8 dlci;
+ u8 addr;
+ u8 priority;
+ u8 v24_sig;
+ u8 remote_v24_sig;
+ u8 mscex;
+ u8 out;
+ u8 sec_level;
+ u8 role_switch;
+ u32 defer_setup;
+
+ uint mtu;
+ uint cfc;
+ uint rx_credits;
+ uint tx_credits;
+
+ void *owner;
+
+ void (*data_ready)(struct rfcomm_dlc *d, struct sk_buff *skb);
+ void (*state_change)(struct rfcomm_dlc *d, int err);
+ void (*modem_status)(struct rfcomm_dlc *d, u8 v24_sig);
+};
+
+/* DLC and session flags */
+#define RFCOMM_RX_THROTTLED 0
+#define RFCOMM_TX_THROTTLED 1
+#define RFCOMM_TIMED_OUT 2
+#define RFCOMM_MSC_PENDING 3
+#define RFCOMM_SEC_PENDING 4
+#define RFCOMM_AUTH_PENDING 5
+#define RFCOMM_AUTH_ACCEPT 6
+#define RFCOMM_AUTH_REJECT 7
+#define RFCOMM_DEFER_SETUP 8
+#define RFCOMM_ENC_DROP 9
+
+/* Scheduling flags and events */
+#define RFCOMM_SCHED_WAKEUP 31
+
+/* MSC exchange flags */
+#define RFCOMM_MSCEX_TX 1
+#define RFCOMM_MSCEX_RX 2
+#define RFCOMM_MSCEX_OK (RFCOMM_MSCEX_TX + RFCOMM_MSCEX_RX)
+
+/* CFC states */
+#define RFCOMM_CFC_UNKNOWN -1
+#define RFCOMM_CFC_DISABLED 0
+#define RFCOMM_CFC_ENABLED RFCOMM_MAX_CREDITS
+
+/* ---- RFCOMM SEND RPN ---- */
+int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
+ u8 bit_rate, u8 data_bits, u8 stop_bits,
+ u8 parity, u8 flow_ctrl_settings,
+ u8 xon_char, u8 xoff_char, u16 param_mask);
+
+/* ---- RFCOMM DLCs (channels) ---- */
+struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio);
+void rfcomm_dlc_free(struct rfcomm_dlc *d);
+int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
+ u8 channel);
+int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason);
+int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb);
+void rfcomm_dlc_send_noerror(struct rfcomm_dlc *d, struct sk_buff *skb);
+int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig);
+int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig);
+void rfcomm_dlc_accept(struct rfcomm_dlc *d);
+struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel);
+
+#define rfcomm_dlc_lock(d) mutex_lock(&d->lock)
+#define rfcomm_dlc_unlock(d) mutex_unlock(&d->lock)
+
+static inline void rfcomm_dlc_hold(struct rfcomm_dlc *d)
+{
+ refcount_inc(&d->refcnt);
+}
+
+static inline void rfcomm_dlc_put(struct rfcomm_dlc *d)
+{
+ if (refcount_dec_and_test(&d->refcnt))
+ rfcomm_dlc_free(d);
+}
+
+void __rfcomm_dlc_throttle(struct rfcomm_dlc *d);
+void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d);
+
+static inline void rfcomm_dlc_throttle(struct rfcomm_dlc *d)
+{
+ if (!test_and_set_bit(RFCOMM_RX_THROTTLED, &d->flags))
+ __rfcomm_dlc_throttle(d);
+}
+
+static inline void rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
+{
+ if (test_and_clear_bit(RFCOMM_RX_THROTTLED, &d->flags))
+ __rfcomm_dlc_unthrottle(d);
+}
+
+/* ---- RFCOMM sessions ---- */
+void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src,
+ bdaddr_t *dst);
+
+/* ---- RFCOMM sockets ---- */
+struct sockaddr_rc {
+ sa_family_t rc_family;
+ bdaddr_t rc_bdaddr;
+ u8 rc_channel;
+};
+
+#define RFCOMM_CONNINFO 0x02
+struct rfcomm_conninfo {
+ __u16 hci_handle;
+ __u8 dev_class[3];
+};
+
+#define RFCOMM_LM 0x03
+#define RFCOMM_LM_MASTER 0x0001
+#define RFCOMM_LM_AUTH 0x0002
+#define RFCOMM_LM_ENCRYPT 0x0004
+#define RFCOMM_LM_TRUSTED 0x0008
+#define RFCOMM_LM_RELIABLE 0x0010
+#define RFCOMM_LM_SECURE 0x0020
+#define RFCOMM_LM_FIPS 0x0040
+
+#define rfcomm_pi(sk) ((struct rfcomm_pinfo *) sk)
+
+struct rfcomm_pinfo {
+ struct bt_sock bt;
+ bdaddr_t src;
+ bdaddr_t dst;
+ struct rfcomm_dlc *dlc;
+ u8 channel;
+ u8 sec_level;
+ u8 role_switch;
+};
+
+int rfcomm_init_sockets(void);
+void rfcomm_cleanup_sockets(void);
+
+int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel,
+ struct rfcomm_dlc **d);
+
+/* ---- RFCOMM TTY ---- */
+#define RFCOMM_MAX_DEV 256
+
+#define RFCOMMCREATEDEV _IOW('R', 200, int)
+#define RFCOMMRELEASEDEV _IOW('R', 201, int)
+#define RFCOMMGETDEVLIST _IOR('R', 210, int)
+#define RFCOMMGETDEVINFO _IOR('R', 211, int)
+#define RFCOMMSTEALDLC _IOW('R', 220, int)
+
+/* rfcomm_dev.flags bit definitions */
+#define RFCOMM_REUSE_DLC 0
+#define RFCOMM_RELEASE_ONHUP 1
+#define RFCOMM_HANGUP_NOW 2
+#define RFCOMM_TTY_ATTACHED 3
+#define RFCOMM_DEFUNCT_BIT4 4 /* don't reuse this bit - userspace visible */
+
+/* rfcomm_dev.status bit definitions */
+#define RFCOMM_DEV_RELEASED 0
+#define RFCOMM_TTY_OWNED 1
+
+struct rfcomm_dev_req {
+ s16 dev_id;
+ u32 flags;
+ bdaddr_t src;
+ bdaddr_t dst;
+ u8 channel;
+};
+
+struct rfcomm_dev_info {
+ s16 id;
+ u32 flags;
+ u16 state;
+ bdaddr_t src;
+ bdaddr_t dst;
+ u8 channel;
+};
+
+struct rfcomm_dev_list_req {
+ u16 dev_num;
+ struct rfcomm_dev_info dev_info[];
+};
+
+int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
+
+#ifdef CONFIG_BT_RFCOMM_TTY
+int rfcomm_init_ttys(void);
+void rfcomm_cleanup_ttys(void);
+#else
+static inline int rfcomm_init_ttys(void)
+{
+ return 0;
+}
+static inline void rfcomm_cleanup_ttys(void)
+{
+}
+#endif
+#endif /* __RFCOMM_H */
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
new file mode 100644
index 000000000..1aa2e14b6
--- /dev/null
+++ b/include/net/bluetooth/sco.h
@@ -0,0 +1,51 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __SCO_H
+#define __SCO_H
+
+/* SCO defaults */
+#define SCO_DEFAULT_MTU 500
+
+/* SCO socket address */
+struct sockaddr_sco {
+ sa_family_t sco_family;
+ bdaddr_t sco_bdaddr;
+};
+
+/* SCO socket options */
+#define SCO_OPTIONS 0x01
+struct sco_options {
+ __u16 mtu;
+};
+
+#define SCO_CONNINFO 0x02
+struct sco_conninfo {
+ __u16 hci_handle;
+ __u8 dev_class[3];
+};
+
+#define SCO_CMSG_PKT_STATUS 0x01
+
+#endif /* __SCO_H */
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
new file mode 100644
index 000000000..a016f275c
--- /dev/null
+++ b/include/net/bond_3ad.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+ */
+
+#ifndef _NET_BOND_3AD_H
+#define _NET_BOND_3AD_H
+
+#include <asm/byteorder.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+
+/* General definitions */
+#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
+#define AD_TIMER_INTERVAL 100 /*msec*/
+
+#define AD_LACP_SLOW 0
+#define AD_LACP_FAST 1
+
+typedef struct mac_addr {
+ u8 mac_addr_value[ETH_ALEN];
+} __packed mac_addr_t;
+
+enum {
+ BOND_AD_STABLE = 0,
+ BOND_AD_BANDWIDTH = 1,
+ BOND_AD_COUNT = 2,
+};
+
+/* rx machine states(43.4.11 in the 802.3ad standard) */
+typedef enum {
+ AD_RX_DUMMY,
+ AD_RX_INITIALIZE, /* rx Machine */
+ AD_RX_PORT_DISABLED, /* rx Machine */
+ AD_RX_LACP_DISABLED, /* rx Machine */
+ AD_RX_EXPIRED, /* rx Machine */
+ AD_RX_DEFAULTED, /* rx Machine */
+ AD_RX_CURRENT /* rx Machine */
+} rx_states_t;
+
+/* periodic machine states(43.4.12 in the 802.3ad standard) */
+typedef enum {
+ AD_PERIODIC_DUMMY,
+ AD_NO_PERIODIC, /* periodic machine */
+ AD_FAST_PERIODIC, /* periodic machine */
+ AD_SLOW_PERIODIC, /* periodic machine */
+ AD_PERIODIC_TX /* periodic machine */
+} periodic_states_t;
+
+/* mux machine states(43.4.13 in the 802.3ad standard) */
+typedef enum {
+ AD_MUX_DUMMY,
+ AD_MUX_DETACHED, /* mux machine */
+ AD_MUX_WAITING, /* mux machine */
+ AD_MUX_ATTACHED, /* mux machine */
+ AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */
+} mux_states_t;
+
+/* tx machine states(43.4.15 in the 802.3ad standard) */
+typedef enum {
+ AD_TX_DUMMY,
+ AD_TRANSMIT /* tx Machine */
+} tx_states_t;
+
+/* churn machine states(43.4.17 in the 802.3ad standard) */
+typedef enum {
+ AD_CHURN_MONITOR, /* monitoring for churn */
+ AD_CHURN, /* churn detected (error) */
+ AD_NO_CHURN /* no churn (no error) */
+} churn_state_t;
+
+/* rx indication types */
+typedef enum {
+ AD_TYPE_LACPDU = 1, /* type lacpdu */
+ AD_TYPE_MARKER /* type marker */
+} pdu_type_t;
+
+/* rx marker indication types */
+typedef enum {
+ AD_MARKER_INFORMATION_SUBTYPE = 1, /* marker imformation subtype */
+ AD_MARKER_RESPONSE_SUBTYPE /* marker response subtype */
+} bond_marker_subtype_t;
+
+/* timers types(43.4.9 in the 802.3ad standard) */
+typedef enum {
+ AD_CURRENT_WHILE_TIMER,
+ AD_ACTOR_CHURN_TIMER,
+ AD_PERIODIC_TIMER,
+ AD_PARTNER_CHURN_TIMER,
+ AD_WAIT_WHILE_TIMER
+} ad_timers_t;
+
+#pragma pack(1)
+
+/* Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) */
+typedef struct lacpdu {
+ u8 subtype; /* = LACP(= 0x01) */
+ u8 version_number;
+ u8 tlv_type_actor_info; /* = actor information(type/length/value) */
+ u8 actor_information_length; /* = 20 */
+ __be16 actor_system_priority;
+ struct mac_addr actor_system;
+ __be16 actor_key;
+ __be16 actor_port_priority;
+ __be16 actor_port;
+ u8 actor_state;
+ u8 reserved_3_1[3]; /* = 0 */
+ u8 tlv_type_partner_info; /* = partner information */
+ u8 partner_information_length; /* = 20 */
+ __be16 partner_system_priority;
+ struct mac_addr partner_system;
+ __be16 partner_key;
+ __be16 partner_port_priority;
+ __be16 partner_port;
+ u8 partner_state;
+ u8 reserved_3_2[3]; /* = 0 */
+ u8 tlv_type_collector_info; /* = collector information */
+ u8 collector_information_length;/* = 16 */
+ __be16 collector_max_delay;
+ u8 reserved_12[12];
+ u8 tlv_type_terminator; /* = terminator */
+ u8 terminator_length; /* = 0 */
+ u8 reserved_50[50]; /* = 0 */
+} __packed lacpdu_t;
+
+typedef struct lacpdu_header {
+ struct ethhdr hdr;
+ struct lacpdu lacpdu;
+} __packed lacpdu_header_t;
+
+/* Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) */
+typedef struct bond_marker {
+ u8 subtype; /* = 0x02 (marker PDU) */
+ u8 version_number; /* = 0x01 */
+ u8 tlv_type; /* = 0x01 (marker information) */
+ /* = 0x02 (marker response information) */
+ u8 marker_length; /* = 0x16 */
+ u16 requester_port; /* The number assigned to the port by the requester */
+ struct mac_addr requester_system; /* The requester's system id */
+ u32 requester_transaction_id; /* The transaction id allocated by the requester, */
+ u16 pad; /* = 0 */
+ u8 tlv_type_terminator; /* = 0x00 */
+ u8 terminator_length; /* = 0x00 */
+ u8 reserved_90[90]; /* = 0 */
+} __packed bond_marker_t;
+
+typedef struct bond_marker_header {
+ struct ethhdr hdr;
+ struct bond_marker marker;
+} __packed bond_marker_header_t;
+
+#pragma pack()
+
+struct slave;
+struct bonding;
+struct ad_info;
+struct port;
+
+#ifdef __ia64__
+#pragma pack(8)
+#endif
+
+struct bond_3ad_stats {
+ atomic64_t lacpdu_rx;
+ atomic64_t lacpdu_tx;
+ atomic64_t lacpdu_unknown_rx;
+ atomic64_t lacpdu_illegal_rx;
+
+ atomic64_t marker_rx;
+ atomic64_t marker_tx;
+ atomic64_t marker_resp_rx;
+ atomic64_t marker_resp_tx;
+ atomic64_t marker_unknown_rx;
+};
+
+/* aggregator structure(43.4.5 in the 802.3ad standard) */
+typedef struct aggregator {
+ struct mac_addr aggregator_mac_address;
+ u16 aggregator_identifier;
+ bool is_individual;
+ u16 actor_admin_aggregator_key;
+ u16 actor_oper_aggregator_key;
+ struct mac_addr partner_system;
+ u16 partner_system_priority;
+ u16 partner_oper_aggregator_key;
+ u16 receive_state; /* BOOLEAN */
+ u16 transmit_state; /* BOOLEAN */
+ struct port *lag_ports;
+ /* ****** PRIVATE PARAMETERS ****** */
+ struct slave *slave; /* pointer to the bond slave that this aggregator belongs to */
+ u16 is_active; /* BOOLEAN. Indicates if this aggregator is active */
+ u16 num_of_ports;
+} aggregator_t;
+
+struct port_params {
+ struct mac_addr system;
+ u16 system_priority;
+ u16 key;
+ u16 port_number;
+ u16 port_priority;
+ u16 port_state;
+};
+
+/* port structure(43.4.6 in the 802.3ad standard) */
+typedef struct port {
+ u16 actor_port_number;
+ u16 actor_port_priority;
+ struct mac_addr actor_system; /* This parameter is added here although it is not specified in the standard, just for simplification */
+ u16 actor_system_priority; /* This parameter is added here although it is not specified in the standard, just for simplification */
+ u16 actor_port_aggregator_identifier;
+ bool ntt;
+ u16 actor_admin_port_key;
+ u16 actor_oper_port_key;
+ u8 actor_admin_port_state;
+ u8 actor_oper_port_state;
+
+ struct port_params partner_admin;
+ struct port_params partner_oper;
+
+ bool is_enabled;
+
+ /* ****** PRIVATE PARAMETERS ****** */
+ u16 sm_vars; /* all state machines variables for this port */
+ rx_states_t sm_rx_state; /* state machine rx state */
+ u16 sm_rx_timer_counter; /* state machine rx timer counter */
+ periodic_states_t sm_periodic_state; /* state machine periodic state */
+ u16 sm_periodic_timer_counter; /* state machine periodic timer counter */
+ mux_states_t sm_mux_state; /* state machine mux state */
+ u16 sm_mux_timer_counter; /* state machine mux timer counter */
+ tx_states_t sm_tx_state; /* state machine tx state */
+ u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
+ u16 sm_churn_actor_timer_counter;
+ u16 sm_churn_partner_timer_counter;
+ u32 churn_actor_count;
+ u32 churn_partner_count;
+ churn_state_t sm_churn_actor_state;
+ churn_state_t sm_churn_partner_state;
+ struct slave *slave; /* pointer to the bond slave that this port belongs to */
+ struct aggregator *aggregator; /* pointer to an aggregator that this port related to */
+ struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */
+ u32 transaction_id; /* continuous number for identification of Marker PDU's; */
+ struct lacpdu lacpdu; /* the lacpdu that will be sent for this port */
+} port_t;
+
+/* system structure */
+struct ad_system {
+ u16 sys_priority;
+ struct mac_addr sys_mac_addr;
+};
+
+#ifdef __ia64__
+#pragma pack()
+#endif
+
+/* ========== AD Exported structures to the main bonding code ========== */
+#define BOND_AD_INFO(bond) ((bond)->ad_info)
+#define SLAVE_AD_INFO(slave) ((slave)->ad_info)
+
+struct ad_bond_info {
+ struct ad_system system; /* 802.3ad system structure */
+ struct bond_3ad_stats stats;
+ atomic_t agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
+ u16 aggregator_identifier;
+};
+
+struct ad_slave_info {
+ struct aggregator aggregator; /* 802.3ad aggregator structure */
+ struct port port; /* 802.3ad port structure */
+ struct bond_3ad_stats stats;
+ u16 id;
+};
+
+static inline const char *bond_3ad_churn_desc(churn_state_t state)
+{
+ static const char *const churn_description[] = {
+ "monitoring",
+ "churned",
+ "none",
+ "unknown"
+ };
+ int max_size = ARRAY_SIZE(churn_description);
+
+ if (state >= max_size)
+ state = max_size - 1;
+
+ return churn_description[state];
+}
+
+/* ========== AD Exported functions to the main bonding code ========== */
+void bond_3ad_initialize(struct bonding *bond);
+void bond_3ad_bind_slave(struct slave *slave);
+void bond_3ad_unbind_slave(struct slave *slave);
+void bond_3ad_state_machine_handler(struct work_struct *);
+void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout);
+void bond_3ad_adapter_speed_duplex_changed(struct slave *slave);
+void bond_3ad_handle_link_change(struct slave *slave, char link);
+int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
+int __bond_3ad_get_active_agg_info(struct bonding *bond,
+ struct ad_info *ad_info);
+int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave);
+int bond_3ad_set_carrier(struct bonding *bond);
+void bond_3ad_update_lacp_active(struct bonding *bond);
+void bond_3ad_update_lacp_rate(struct bonding *bond);
+void bond_3ad_update_ad_actor_settings(struct bonding *bond);
+int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
+size_t bond_3ad_stats_size(void);
+#endif /* _NET_BOND_3AD_H */
+
diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h
new file mode 100644
index 000000000..9dc082b2d
--- /dev/null
+++ b/include/net/bond_alb.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+ */
+
+#ifndef _NET_BOND_ALB_H
+#define _NET_BOND_ALB_H
+
+#include <linux/if_ether.h>
+
+struct bonding;
+struct slave;
+
+#define BOND_ALB_INFO(bond) ((bond)->alb_info)
+#define SLAVE_TLB_INFO(slave) ((slave)->tlb_info)
+
+#define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */
+#define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing.
+ * Used for division - never set
+ * to zero !!!
+ */
+#define BOND_ALB_DEFAULT_LP_INTERVAL 1
+#define BOND_ALB_LP_INTERVAL(bond) (bond->params.lp_interval) /* In seconds, periodic send of
+ * learning packets to the switch
+ */
+
+#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
+ * ALB_TIMER_TICKS_PER_SEC)
+
+#define BOND_ALB_LP_TICKS(bond) (BOND_ALB_LP_INTERVAL(bond) \
+ * ALB_TIMER_TICKS_PER_SEC)
+
+#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
+ * Note that this value MUST NOT be smaller
+ * because the key hash table is BYTE wide !
+ */
+
+
+#define TLB_NULL_INDEX 0xffffffff
+
+/* rlb defs */
+#define RLB_HASH_TABLE_SIZE 256
+#define RLB_NULL_INDEX 0xffffffff
+#define RLB_UPDATE_DELAY (2*ALB_TIMER_TICKS_PER_SEC) /* 2 seconds */
+#define RLB_ARP_BURST_SIZE 2
+#define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb
+ * rebalance interval (5 min).
+ */
+/* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is
+ * promiscuous after failover
+ */
+#define RLB_PROMISC_TIMEOUT (10*ALB_TIMER_TICKS_PER_SEC)
+
+
+struct tlb_client_info {
+ struct slave *tx_slave; /* A pointer to slave used for transmiting
+ * packets to a Client that the Hash function
+ * gave this entry index.
+ */
+ u32 tx_bytes; /* Each Client accumulates the BytesTx that
+ * were transmitted to it, and after each
+ * CallBack the LoadHistory is divided
+ * by the balance interval
+ */
+ u32 load_history; /* This field contains the amount of Bytes
+ * that were transmitted to this client by
+ * the server on the previous balance
+ * interval in Bps.
+ */
+ u32 next; /* The next Hash table entry index, assigned
+ * to use the same adapter for transmit.
+ */
+ u32 prev; /* The previous Hash table entry index,
+ * assigned to use the same
+ */
+};
+
+/* -------------------------------------------------------------------------
+ * struct rlb_client_info contains all info related to a specific rx client
+ * connection. This is the Clients Hash Table entry struct.
+ * Note that this is not a proper hash table; if a new client's IP address
+ * hash collides with an existing client entry, the old entry is replaced.
+ *
+ * There is a linked list (linked by the used_next and used_prev members)
+ * linking all the used entries of the hash table. This allows updating
+ * all the clients without walking over all the unused elements of the table.
+ *
+ * There are also linked lists of entries with identical hash(ip_src). These
+ * allow cleaning up the table from ip_src<->mac_src associations that have
+ * become outdated and would cause sending out invalid ARP updates to the
+ * network. These are linked by the (src_next and src_prev members).
+ * -------------------------------------------------------------------------
+ */
+struct rlb_client_info {
+ __be32 ip_src; /* the server IP address */
+ __be32 ip_dst; /* the client IP address */
+ u8 mac_src[ETH_ALEN]; /* the server MAC address */
+ u8 mac_dst[ETH_ALEN]; /* the client MAC address */
+
+ /* list of used hash table entries, starting at rx_hashtbl_used_head */
+ u32 used_next;
+ u32 used_prev;
+
+ /* ip_src based hashing */
+ u32 src_next; /* next entry with same hash(ip_src) */
+ u32 src_prev; /* prev entry with same hash(ip_src) */
+ u32 src_first; /* first entry with hash(ip_src) == this entry's index */
+
+ u8 assigned; /* checking whether this entry is assigned */
+ u8 ntt; /* flag - need to transmit client info */
+ struct slave *slave; /* the slave assigned to this client */
+ unsigned short vlan_id; /* VLAN tag associated with IP address */
+};
+
+struct tlb_slave_info {
+ u32 head; /* Index to the head of the bi-directional clients
+ * hash table entries list. The entries in the list
+ * are the entries that were assigned to use this
+ * slave for transmit.
+ */
+ u32 load; /* Each slave sums the loadHistory of all clients
+ * assigned to it
+ */
+};
+
+struct alb_bond_info {
+ struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
+ u32 unbalanced_load;
+ atomic_t tx_rebalance_counter;
+ int lp_counter;
+ /* -------- rlb parameters -------- */
+ int rlb_enabled;
+ struct rlb_client_info *rx_hashtbl; /* Receive hash table */
+ u32 rx_hashtbl_used_head;
+ u8 rx_ntt; /* flag - need to transmit
+ * to all rx clients
+ */
+ struct slave *rx_slave;/* last slave to xmit from */
+ u8 primary_is_promisc; /* boolean */
+ u32 rlb_promisc_timeout_counter;/* counts primary
+ * promiscuity time
+ */
+ u32 rlb_update_delay_counter;
+ u32 rlb_update_retry_counter;/* counter of retries
+ * of client update
+ */
+ u8 rlb_rebalance; /* flag - indicates that the
+ * rx traffic should be
+ * rebalanced
+ */
+};
+
+int bond_alb_initialize(struct bonding *bond, int rlb_enabled);
+void bond_alb_deinitialize(struct bonding *bond);
+int bond_alb_init_slave(struct bonding *bond, struct slave *slave);
+void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
+void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
+void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
+netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
+ struct sk_buff *skb);
+struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
+ struct sk_buff *skb);
+void bond_alb_monitor(struct work_struct *);
+int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
+void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
+#endif /* _NET_BOND_ALB_H */
+
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
new file mode 100644
index 000000000..69292ecc0
--- /dev/null
+++ b/include/net/bond_options.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * drivers/net/bond/bond_options.h - bonding options
+ * Copyright (c) 2013 Nikolay Aleksandrov <nikolay@redhat.com>
+ */
+
+#ifndef _NET_BOND_OPTIONS_H
+#define _NET_BOND_OPTIONS_H
+
+#include <linux/bits.h>
+#include <linux/limits.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+struct netlink_ext_ack;
+struct nlattr;
+
+#define BOND_OPT_MAX_NAMELEN 32
+#define BOND_OPT_VALID(opt) ((opt) < BOND_OPT_LAST)
+#define BOND_MODE_ALL_EX(x) (~(x))
+
+/* Option flags:
+ * BOND_OPTFLAG_NOSLAVES - check if the bond device is empty before setting
+ * BOND_OPTFLAG_IFDOWN - check if the bond device is down before setting
+ * BOND_OPTFLAG_RAWVAL - the option parses the value itself
+ */
+enum {
+ BOND_OPTFLAG_NOSLAVES = BIT(0),
+ BOND_OPTFLAG_IFDOWN = BIT(1),
+ BOND_OPTFLAG_RAWVAL = BIT(2)
+};
+
+/* Value type flags:
+ * BOND_VALFLAG_DEFAULT - mark the value as default
+ * BOND_VALFLAG_(MIN|MAX) - mark the value as min/max
+ */
+enum {
+ BOND_VALFLAG_DEFAULT = BIT(0),
+ BOND_VALFLAG_MIN = BIT(1),
+ BOND_VALFLAG_MAX = BIT(2)
+};
+
+/* Option IDs, their bit positions correspond to their IDs */
+enum {
+ BOND_OPT_MODE,
+ BOND_OPT_PACKETS_PER_SLAVE,
+ BOND_OPT_XMIT_HASH,
+ BOND_OPT_ARP_VALIDATE,
+ BOND_OPT_ARP_ALL_TARGETS,
+ BOND_OPT_FAIL_OVER_MAC,
+ BOND_OPT_ARP_INTERVAL,
+ BOND_OPT_ARP_TARGETS,
+ BOND_OPT_DOWNDELAY,
+ BOND_OPT_UPDELAY,
+ BOND_OPT_LACP_RATE,
+ BOND_OPT_MINLINKS,
+ BOND_OPT_AD_SELECT,
+ BOND_OPT_NUM_PEER_NOTIF,
+ BOND_OPT_MIIMON,
+ BOND_OPT_PRIMARY,
+ BOND_OPT_PRIMARY_RESELECT,
+ BOND_OPT_USE_CARRIER,
+ BOND_OPT_ACTIVE_SLAVE,
+ BOND_OPT_QUEUE_ID,
+ BOND_OPT_ALL_SLAVES_ACTIVE,
+ BOND_OPT_RESEND_IGMP,
+ BOND_OPT_LP_INTERVAL,
+ BOND_OPT_SLAVES,
+ BOND_OPT_TLB_DYNAMIC_LB,
+ BOND_OPT_AD_ACTOR_SYS_PRIO,
+ BOND_OPT_AD_ACTOR_SYSTEM,
+ BOND_OPT_AD_USER_PORT_KEY,
+ BOND_OPT_NUM_PEER_NOTIF_ALIAS,
+ BOND_OPT_PEER_NOTIF_DELAY,
+ BOND_OPT_LACP_ACTIVE,
+ BOND_OPT_MISSED_MAX,
+ BOND_OPT_NS_TARGETS,
+ BOND_OPT_PRIO,
+ BOND_OPT_LAST
+};
+
+/* This structure is used for storing option values and for passing option
+ * values when changing an option. The logic when used as an arg is as follows:
+ * - if value != ULLONG_MAX -> parse value
+ * - if string != NULL -> parse string
+ * - if the opt is RAW data and length less than maxlen,
+ * copy the data to extra storage
+ */
+
+#define BOND_OPT_EXTRA_MAXLEN 16
+struct bond_opt_value {
+ char *string;
+ u64 value;
+ u32 flags;
+ union {
+ char extra[BOND_OPT_EXTRA_MAXLEN];
+ struct net_device *slave_dev;
+ };
+};
+
+struct bonding;
+
+struct bond_option {
+ int id;
+ const char *name;
+ const char *desc;
+ u32 flags;
+
+ /* unsuppmodes is used to denote modes in which the option isn't
+ * supported.
+ */
+ unsigned long unsuppmodes;
+ /* supported values which this option can have, can be a subset of
+ * BOND_OPTVAL_RANGE's value range
+ */
+ const struct bond_opt_value *values;
+
+ int (*set)(struct bonding *bond, const struct bond_opt_value *val);
+};
+
+int __bond_opt_set(struct bonding *bond, unsigned int option,
+ struct bond_opt_value *val,
+ struct nlattr *bad_attr, struct netlink_ext_ack *extack);
+int __bond_opt_set_notify(struct bonding *bond, unsigned int option,
+ struct bond_opt_value *val);
+int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
+
+const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
+ struct bond_opt_value *val);
+const struct bond_option *bond_opt_get(unsigned int option);
+const struct bond_option *bond_opt_get_by_name(const char *name);
+const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
+
+/* This helper is used to initialize a bond_opt_value structure for parameter
+ * passing. There should be either a valid string or value, but not both.
+ * When value is ULLONG_MAX then string will be used.
+ */
+static inline void __bond_opt_init(struct bond_opt_value *optval,
+ char *string, u64 value,
+ void *extra, size_t extra_len)
+{
+ memset(optval, 0, sizeof(*optval));
+ optval->value = ULLONG_MAX;
+ if (value != ULLONG_MAX)
+ optval->value = value;
+ else if (string)
+ optval->string = string;
+
+ if (extra && extra_len <= BOND_OPT_EXTRA_MAXLEN)
+ memcpy(optval->extra, extra, extra_len);
+}
+#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value, NULL, 0)
+#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX, NULL, 0)
+#define bond_opt_initextra(optval, extra, extra_len) \
+ __bond_opt_init(optval, NULL, ULLONG_MAX, extra, extra_len)
+#define bond_opt_slave_initval(optval, slave_dev, value) \
+ __bond_opt_init(optval, NULL, value, slave_dev, sizeof(struct net_device *))
+
+void bond_option_arp_ip_targets_clear(struct bonding *bond);
+#if IS_ENABLED(CONFIG_IPV6)
+void bond_option_ns_ip6_targets_clear(struct bonding *bond);
+#endif
+
+#endif /* _NET_BOND_OPTIONS_H */
diff --git a/include/net/bonding.h b/include/net/bonding.h
new file mode 100644
index 000000000..9a3ac960d
--- /dev/null
+++ b/include/net/bonding.h
@@ -0,0 +1,795 @@
+/*
+ * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'.
+ *
+ * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
+ * NCM: Network and Communications Management, Inc.
+ *
+ * BUT, I'm the one who modified it for ethernet, so:
+ * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ */
+
+#ifndef _NET_BONDING_H
+#define _NET_BONDING_H
+
+#include <linux/timer.h>
+#include <linux/proc_fs.h>
+#include <linux/if_bonding.h>
+#include <linux/cpumask.h>
+#include <linux/in6.h>
+#include <linux/netpoll.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/reciprocal_div.h>
+#include <linux/if_link.h>
+
+#include <net/bond_3ad.h>
+#include <net/bond_alb.h>
+#include <net/bond_options.h>
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+
+#define BOND_MAX_ARP_TARGETS 16
+#define BOND_MAX_NS_TARGETS BOND_MAX_ARP_TARGETS
+
+#define BOND_DEFAULT_MIIMON 100
+
+#ifndef __long_aligned
+#define __long_aligned __attribute__((aligned((sizeof(long)))))
+#endif
+
+#define slave_info(bond_dev, slave_dev, fmt, ...) \
+ netdev_info(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__)
+#define slave_warn(bond_dev, slave_dev, fmt, ...) \
+ netdev_warn(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__)
+#define slave_dbg(bond_dev, slave_dev, fmt, ...) \
+ netdev_dbg(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__)
+#define slave_err(bond_dev, slave_dev, fmt, ...) \
+ netdev_err(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__)
+
+#define BOND_MODE(bond) ((bond)->params.mode)
+
+/* slave list primitives */
+#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
+
+#define bond_has_slaves(bond) !list_empty(bond_slave_list(bond))
+
+/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
+#define bond_first_slave(bond) \
+ (bond_has_slaves(bond) ? \
+ netdev_adjacent_get_private(bond_slave_list(bond)->next) : \
+ NULL)
+#define bond_last_slave(bond) \
+ (bond_has_slaves(bond) ? \
+ netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
+ NULL)
+
+/* Caller must have rcu_read_lock */
+#define bond_first_slave_rcu(bond) \
+ netdev_lower_get_first_private_rcu(bond->dev)
+
+#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
+#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
+
+/**
+ * bond_for_each_slave - iterate over all slaves
+ * @bond: the bond holding this list
+ * @pos: current slave
+ * @iter: list_head * iterator
+ *
+ * Caller must hold RTNL
+ */
+#define bond_for_each_slave(bond, pos, iter) \
+ netdev_for_each_lower_private((bond)->dev, pos, iter)
+
+/* Caller must have rcu_read_lock */
+#define bond_for_each_slave_rcu(bond, pos, iter) \
+ netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
+
+#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
+ NETIF_F_GSO_ESP)
+
+#define BOND_TLS_FEATURES (NETIF_F_HW_TLS_TX | NETIF_F_HW_TLS_RX)
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+extern atomic_t netpoll_block_tx;
+
+static inline void block_netpoll_tx(void)
+{
+ atomic_inc(&netpoll_block_tx);
+}
+
+static inline void unblock_netpoll_tx(void)
+{
+ atomic_dec(&netpoll_block_tx);
+}
+
+static inline int is_netpoll_tx_blocked(struct net_device *dev)
+{
+ if (unlikely(netpoll_tx_running(dev)))
+ return atomic_read(&netpoll_block_tx);
+ return 0;
+}
+#else
+#define block_netpoll_tx()
+#define unblock_netpoll_tx()
+#define is_netpoll_tx_blocked(dev) (0)
+#endif
+
+struct bond_params {
+ int mode;
+ int xmit_policy;
+ int miimon;
+ u8 num_peer_notif;
+ u8 missed_max;
+ int arp_interval;
+ int arp_validate;
+ int arp_all_targets;
+ int use_carrier;
+ int fail_over_mac;
+ int updelay;
+ int downdelay;
+ int peer_notif_delay;
+ int lacp_active;
+ int lacp_fast;
+ unsigned int min_links;
+ int ad_select;
+ char primary[IFNAMSIZ];
+ int primary_reselect;
+ __be32 arp_targets[BOND_MAX_ARP_TARGETS];
+ int tx_queues;
+ int all_slaves_active;
+ int resend_igmp;
+ int lp_interval;
+ int packets_per_slave;
+ int tlb_dynamic_lb;
+ struct reciprocal_value reciprocal_packets_per_slave;
+ u16 ad_actor_sys_prio;
+ u16 ad_user_port_key;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ns_targets[BOND_MAX_NS_TARGETS];
+#endif
+
+ /* 2 bytes of padding : see ether_addr_equal_64bits() */
+ u8 ad_actor_system[ETH_ALEN + 2];
+};
+
+struct slave {
+ struct net_device *dev; /* first - useful for panic debug */
+ struct bonding *bond; /* our master */
+ int delay;
+ /* all 4 in jiffies */
+ unsigned long last_link_up;
+ unsigned long last_tx;
+ unsigned long last_rx;
+ unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
+ s8 link; /* one of BOND_LINK_XXXX */
+ s8 link_new_state; /* one of BOND_LINK_XXXX */
+ u8 backup:1, /* indicates backup slave. Value corresponds with
+ BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
+ inactive:1, /* indicates inactive slave */
+ should_notify:1, /* indicates whether the state changed */
+ should_notify_link:1; /* indicates whether the link changed */
+ u8 duplex;
+ u32 original_mtu;
+ u32 link_failure_count;
+ u32 speed;
+ u16 queue_id;
+ u8 perm_hwaddr[MAX_ADDR_LEN];
+ int prio;
+ struct ad_slave_info *ad_info;
+ struct tlb_slave_info tlb_info;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *np;
+#endif
+ struct delayed_work notify_work;
+ struct kobject kobj;
+ struct rtnl_link_stats64 slave_stats;
+};
+
+static inline struct slave *to_slave(struct kobject *kobj)
+{
+ return container_of(kobj, struct slave, kobj);
+}
+
+struct bond_up_slave {
+ unsigned int count;
+ struct rcu_head rcu;
+ struct slave *arr[];
+};
+
+/*
+ * Link pseudo-state only used internally by monitors
+ */
+#define BOND_LINK_NOCHANGE -1
+
+struct bond_ipsec {
+ struct list_head list;
+ struct xfrm_state *xs;
+};
+
+/*
+ * Here are the locking policies for the two bonding locks:
+ * Get rcu_read_lock when reading or RTNL when writing slave list.
+ */
+struct bonding {
+ struct net_device *dev; /* first - useful for panic debug */
+ struct slave __rcu *curr_active_slave;
+ struct slave __rcu *current_arp_slave;
+ struct slave __rcu *primary_slave;
+ struct bond_up_slave __rcu *usable_slaves;
+ struct bond_up_slave __rcu *all_slaves;
+ bool force_primary;
+ bool notifier_ctx;
+ s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
+ int (*recv_probe)(const struct sk_buff *, struct bonding *,
+ struct slave *);
+ /* mode_lock is used for mode-specific locking needs, currently used by:
+ * 3ad mode (4) - protect against running bond_3ad_unbind_slave() and
+ * bond_3ad_state_machine_handler() concurrently and also
+ * the access to the state machine shared variables.
+ * TLB mode (5) - to sync the use and modifications of its hash table
+ * ALB mode (6) - to sync the use and modifications of its hash table
+ */
+ spinlock_t mode_lock;
+ spinlock_t stats_lock;
+ u32 send_peer_notif;
+ u8 igmp_retrans;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc_entry;
+ char proc_file_name[IFNAMSIZ];
+#endif /* CONFIG_PROC_FS */
+ struct list_head bond_list;
+ u32 __percpu *rr_tx_counter;
+ struct ad_bond_info ad_info;
+ struct alb_bond_info alb_info;
+ struct bond_params params;
+ struct workqueue_struct *wq;
+ struct delayed_work mii_work;
+ struct delayed_work arp_work;
+ struct delayed_work alb_work;
+ struct delayed_work ad_work;
+ struct delayed_work mcast_work;
+ struct delayed_work slave_arr_work;
+#ifdef CONFIG_DEBUG_FS
+ /* debugging support via debugfs */
+ struct dentry *debug_dir;
+#endif /* CONFIG_DEBUG_FS */
+ struct rtnl_link_stats64 bond_stats;
+#ifdef CONFIG_XFRM_OFFLOAD
+ struct list_head ipsec_list;
+ /* protecting ipsec_list */
+ spinlock_t ipsec_lock;
+#endif /* CONFIG_XFRM_OFFLOAD */
+ struct bpf_prog *xdp_prog;
+};
+
+#define bond_slave_get_rcu(dev) \
+ ((struct slave *) rcu_dereference(dev->rx_handler_data))
+
+#define bond_slave_get_rtnl(dev) \
+ ((struct slave *) rtnl_dereference(dev->rx_handler_data))
+
+void bond_queue_slave_event(struct slave *slave);
+void bond_lower_state_changed(struct slave *slave);
+
+struct bond_vlan_tag {
+ __be16 vlan_proto;
+ unsigned short vlan_id;
+};
+
+bool bond_sk_check(struct bonding *bond);
+
+/**
+ * Returns NULL if the net_device does not belong to any of the bond's slaves
+ *
+ * Caller must hold bond lock for read
+ */
+static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
+ struct net_device *slave_dev)
+{
+ return netdev_lower_dev_get_private(bond->dev, slave_dev);
+}
+
+static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
+{
+ return slave->bond;
+}
+
+static inline bool bond_should_override_tx_queue(struct bonding *bond)
+{
+ return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
+ BOND_MODE(bond) == BOND_MODE_ROUNDROBIN;
+}
+
+static inline bool bond_is_lb(const struct bonding *bond)
+{
+ return BOND_MODE(bond) == BOND_MODE_TLB ||
+ BOND_MODE(bond) == BOND_MODE_ALB;
+}
+
+static inline bool bond_needs_speed_duplex(const struct bonding *bond)
+{
+ return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond);
+}
+
+static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
+{
+ return (bond_is_lb(bond) && bond->params.tlb_dynamic_lb == 0);
+}
+
+static inline bool bond_mode_can_use_xmit_hash(const struct bonding *bond)
+{
+ return (BOND_MODE(bond) == BOND_MODE_8023AD ||
+ BOND_MODE(bond) == BOND_MODE_XOR ||
+ BOND_MODE(bond) == BOND_MODE_TLB ||
+ BOND_MODE(bond) == BOND_MODE_ALB);
+}
+
+static inline bool bond_mode_uses_xmit_hash(const struct bonding *bond)
+{
+ return (BOND_MODE(bond) == BOND_MODE_8023AD ||
+ BOND_MODE(bond) == BOND_MODE_XOR ||
+ bond_is_nondyn_tlb(bond));
+}
+
+static inline bool bond_mode_uses_arp(int mode)
+{
+ return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
+ mode != BOND_MODE_ALB;
+}
+
+static inline bool bond_mode_uses_primary(int mode)
+{
+ return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB ||
+ mode == BOND_MODE_ALB;
+}
+
+static inline bool bond_uses_primary(struct bonding *bond)
+{
+ return bond_mode_uses_primary(BOND_MODE(bond));
+}
+
+static inline struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
+{
+ struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave);
+
+ return bond_uses_primary(bond) && slave ? slave->dev : NULL;
+}
+
+static inline bool bond_slave_is_up(struct slave *slave)
+{
+ return netif_running(slave->dev) && netif_carrier_ok(slave->dev);
+}
+
+static inline void bond_set_active_slave(struct slave *slave)
+{
+ if (slave->backup) {
+ slave->backup = 0;
+ bond_queue_slave_event(slave);
+ bond_lower_state_changed(slave);
+ }
+}
+
+static inline void bond_set_backup_slave(struct slave *slave)
+{
+ if (!slave->backup) {
+ slave->backup = 1;
+ bond_queue_slave_event(slave);
+ bond_lower_state_changed(slave);
+ }
+}
+
+static inline void bond_set_slave_state(struct slave *slave,
+ int slave_state, bool notify)
+{
+ if (slave->backup == slave_state)
+ return;
+
+ slave->backup = slave_state;
+ if (notify) {
+ bond_lower_state_changed(slave);
+ bond_queue_slave_event(slave);
+ slave->should_notify = 0;
+ } else {
+ if (slave->should_notify)
+ slave->should_notify = 0;
+ else
+ slave->should_notify = 1;
+ }
+}
+
+static inline void bond_slave_state_change(struct bonding *bond)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave(bond, tmp, iter) {
+ if (tmp->link == BOND_LINK_UP)
+ bond_set_active_slave(tmp);
+ else if (tmp->link == BOND_LINK_DOWN)
+ bond_set_backup_slave(tmp);
+ }
+}
+
+static inline void bond_slave_state_notify(struct bonding *bond)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave(bond, tmp, iter) {
+ if (tmp->should_notify) {
+ bond_lower_state_changed(tmp);
+ tmp->should_notify = 0;
+ }
+ }
+}
+
+static inline int bond_slave_state(struct slave *slave)
+{
+ return slave->backup;
+}
+
+static inline bool bond_is_active_slave(struct slave *slave)
+{
+ return !bond_slave_state(slave);
+}
+
+static inline bool bond_slave_can_tx(struct slave *slave)
+{
+ return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP &&
+ bond_is_active_slave(slave);
+}
+
+static inline bool bond_is_active_slave_dev(const struct net_device *slave_dev)
+{
+ struct slave *slave;
+ bool active;
+
+ rcu_read_lock();
+ slave = bond_slave_get_rcu(slave_dev);
+ active = bond_is_active_slave(slave);
+ rcu_read_unlock();
+
+ return active;
+}
+
+static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len)
+{
+ if (len == ETH_ALEN) {
+ ether_addr_copy(dst, src);
+ return;
+ }
+
+ memcpy(dst, src, len);
+}
+
+#define BOND_PRI_RESELECT_ALWAYS 0
+#define BOND_PRI_RESELECT_BETTER 1
+#define BOND_PRI_RESELECT_FAILURE 2
+
+#define BOND_FOM_NONE 0
+#define BOND_FOM_ACTIVE 1
+#define BOND_FOM_FOLLOW 2
+
+#define BOND_ARP_TARGETS_ANY 0
+#define BOND_ARP_TARGETS_ALL 1
+
+#define BOND_ARP_VALIDATE_NONE 0
+#define BOND_ARP_VALIDATE_ACTIVE (1 << BOND_STATE_ACTIVE)
+#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP)
+#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \
+ BOND_ARP_VALIDATE_BACKUP)
+#define BOND_ARP_FILTER (BOND_ARP_VALIDATE_ALL + 1)
+#define BOND_ARP_FILTER_ACTIVE (BOND_ARP_VALIDATE_ACTIVE | \
+ BOND_ARP_FILTER)
+#define BOND_ARP_FILTER_BACKUP (BOND_ARP_VALIDATE_BACKUP | \
+ BOND_ARP_FILTER)
+
+#define BOND_SLAVE_NOTIFY_NOW true
+#define BOND_SLAVE_NOTIFY_LATER false
+
+static inline int slave_do_arp_validate(struct bonding *bond,
+ struct slave *slave)
+{
+ return bond->params.arp_validate & (1 << bond_slave_state(slave));
+}
+
+static inline int slave_do_arp_validate_only(struct bonding *bond)
+{
+ return bond->params.arp_validate & BOND_ARP_FILTER;
+}
+
+static inline int bond_is_ip_target_ok(__be32 addr)
+{
+ return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int bond_is_ip6_target_ok(struct in6_addr *addr)
+{
+ return !ipv6_addr_any(addr) &&
+ !ipv6_addr_loopback(addr) &&
+ !ipv6_addr_is_multicast(addr);
+}
+#endif
+
+/* Get the oldest arp which we've received on this slave for bond's
+ * arp_targets.
+ */
+static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond,
+ struct slave *slave)
+{
+ int i = 1;
+ unsigned long ret = slave->target_last_arp_rx[0];
+
+ for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++)
+ if (time_before(slave->target_last_arp_rx[i], ret))
+ ret = slave->target_last_arp_rx[i];
+
+ return ret;
+}
+
+static inline unsigned long slave_last_rx(struct bonding *bond,
+ struct slave *slave)
+{
+ if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
+ return slave_oldest_target_arp_rx(bond, slave);
+
+ return slave->last_rx;
+}
+
+static inline void slave_update_last_tx(struct slave *slave)
+{
+ WRITE_ONCE(slave->last_tx, jiffies);
+}
+
+static inline unsigned long slave_last_tx(struct slave *slave)
+{
+ return READ_ONCE(slave->last_tx);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave,
+ struct sk_buff *skb)
+{
+ return netpoll_send_skb(slave->np, skb);
+}
+#else
+static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave,
+ struct sk_buff *skb)
+{
+ BUG();
+ return NETDEV_TX_OK;
+}
+#endif
+
+static inline void bond_set_slave_inactive_flags(struct slave *slave,
+ bool notify)
+{
+ if (!bond_is_lb(slave->bond))
+ bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
+ if (!slave->bond->params.all_slaves_active)
+ slave->inactive = 1;
+}
+
+static inline void bond_set_slave_active_flags(struct slave *slave,
+ bool notify)
+{
+ bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify);
+ slave->inactive = 0;
+}
+
+static inline bool bond_is_slave_inactive(struct slave *slave)
+{
+ return slave->inactive;
+}
+
+static inline void bond_propose_link_state(struct slave *slave, int state)
+{
+ slave->link_new_state = state;
+}
+
+static inline void bond_commit_link_state(struct slave *slave, bool notify)
+{
+ if (slave->link_new_state == BOND_LINK_NOCHANGE)
+ return;
+
+ slave->link = slave->link_new_state;
+ if (notify) {
+ bond_queue_slave_event(slave);
+ bond_lower_state_changed(slave);
+ slave->should_notify_link = 0;
+ } else {
+ if (slave->should_notify_link)
+ slave->should_notify_link = 0;
+ else
+ slave->should_notify_link = 1;
+ }
+}
+
+static inline void bond_set_slave_link_state(struct slave *slave, int state,
+ bool notify)
+{
+ bond_propose_link_state(slave, state);
+ bond_commit_link_state(slave, notify);
+}
+
+static inline void bond_slave_link_notify(struct bonding *bond)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave(bond, tmp, iter) {
+ if (tmp->should_notify_link) {
+ bond_queue_slave_event(tmp);
+ bond_lower_state_changed(tmp);
+ tmp->should_notify_link = 0;
+ }
+ }
+}
+
+static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local)
+{
+ struct in_device *in_dev;
+ __be32 addr = 0;
+
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dev);
+
+ if (in_dev)
+ addr = inet_confirm_addr(dev_net(dev), in_dev, dst, local,
+ RT_SCOPE_HOST);
+ rcu_read_unlock();
+ return addr;
+}
+
+struct bond_net {
+ struct net *net; /* Associated network namespace */
+ struct list_head dev_list;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc_dir;
+#endif
+ struct class_attribute class_attr_bonding_masters;
+};
+
+int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
+netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
+int bond_create(struct net *net, const char *name);
+int bond_create_sysfs(struct bond_net *net);
+void bond_destroy_sysfs(struct bond_net *net);
+void bond_prepare_sysfs_group(struct bonding *bond);
+int bond_sysfs_slave_add(struct slave *slave);
+void bond_sysfs_slave_del(struct slave *slave);
+int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ struct netlink_ext_ack *extack);
+int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
+int bond_set_carrier(struct bonding *bond);
+void bond_select_active_slave(struct bonding *bond);
+void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
+void bond_create_debugfs(void);
+void bond_destroy_debugfs(void);
+void bond_debug_register(struct bonding *bond);
+void bond_debug_unregister(struct bonding *bond);
+void bond_debug_reregister(struct bonding *bond);
+const char *bond_mode_name(int mode);
+void bond_setup(struct net_device *bond_dev);
+unsigned int bond_get_num_tx_queues(void);
+int bond_netlink_init(void);
+void bond_netlink_fini(void);
+struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
+const char *bond_slave_link_status(s8 link);
+struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
+ struct net_device *end_dev,
+ int level);
+int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave);
+void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay);
+void bond_work_init_all(struct bonding *bond);
+
+#ifdef CONFIG_PROC_FS
+void bond_create_proc_entry(struct bonding *bond);
+void bond_remove_proc_entry(struct bonding *bond);
+void bond_create_proc_dir(struct bond_net *bn);
+void bond_destroy_proc_dir(struct bond_net *bn);
+#else
+static inline void bond_create_proc_entry(struct bonding *bond)
+{
+}
+
+static inline void bond_remove_proc_entry(struct bonding *bond)
+{
+}
+
+static inline void bond_create_proc_dir(struct bond_net *bn)
+{
+}
+
+static inline void bond_destroy_proc_dir(struct bond_net *bn)
+{
+}
+#endif
+
+static inline struct slave *bond_slave_has_mac(struct bonding *bond,
+ const u8 *mac)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave(bond, tmp, iter)
+ if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+ return tmp;
+
+ return NULL;
+}
+
+/* Caller must hold rcu_read_lock() for read */
+static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave_rcu(bond, tmp, iter)
+ if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+ return true;
+ return false;
+}
+
+/* Check if the ip is present in arp ip list, or first free slot if ip == 0
+ * Returns -1 if not found, index if found
+ */
+static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
+{
+ int i;
+
+ for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
+ if (targets[i] == ip)
+ return i;
+ else if (targets[i] == 0)
+ break;
+
+ return -1;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr *ip)
+{
+ struct in6_addr mcaddr;
+ int i;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
+ addrconf_addr_solict_mult(&targets[i], &mcaddr);
+ if ((ipv6_addr_equal(&targets[i], ip)) ||
+ (ipv6_addr_equal(&mcaddr, ip)))
+ return i;
+ else if (ipv6_addr_any(&targets[i]))
+ break;
+ }
+
+ return -1;
+}
+#endif
+
+/* exported from bond_main.c */
+extern unsigned int bond_net_id;
+
+/* exported from bond_netlink.c */
+extern struct rtnl_link_ops bond_link_ops;
+
+/* exported from bond_sysfs_slave.c */
+extern const struct sysfs_ops slave_sysfs_ops;
+
+/* exported from bond_3ad.c */
+extern const u8 lacpdu_mcast_addr[];
+
+static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
+{
+ dev_core_stats_tx_dropped_inc(dev);
+ dev_kfree_skb_any(skb);
+ return NET_XMIT_DROP;
+}
+
+#endif /* _NET_BONDING_H */
diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h
new file mode 100644
index 000000000..2926f1f00
--- /dev/null
+++ b/include/net/bpf_sk_storage.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Facebook */
+#ifndef _BPF_SK_STORAGE_H
+#define _BPF_SK_STORAGE_H
+
+#include <linux/rculist.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/bpf.h>
+#include <net/sock.h>
+#include <uapi/linux/sock_diag.h>
+#include <uapi/linux/btf.h>
+#include <linux/bpf_local_storage.h>
+
+struct sock;
+
+void bpf_sk_storage_free(struct sock *sk);
+
+extern const struct bpf_func_proto bpf_sk_storage_get_proto;
+extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
+extern const struct bpf_func_proto bpf_sk_storage_get_tracing_proto;
+extern const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto;
+
+struct bpf_local_storage_elem;
+struct bpf_sk_storage_diag;
+struct sk_buff;
+struct nlattr;
+
+#ifdef CONFIG_BPF_SYSCALL
+int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk);
+struct bpf_sk_storage_diag *
+bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs);
+void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag);
+int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
+ struct sock *sk, struct sk_buff *skb,
+ int stg_array_type,
+ unsigned int *res_diag_size);
+#else
+static inline int bpf_sk_storage_clone(const struct sock *sk,
+ struct sock *newsk)
+{
+ return 0;
+}
+static inline struct bpf_sk_storage_diag *
+bpf_sk_storage_diag_alloc(const struct nlattr *nla)
+{
+ return NULL;
+}
+static inline void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
+{
+}
+static inline int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
+ struct sock *sk, struct sk_buff *skb,
+ int stg_array_type,
+ unsigned int *res_diag_size)
+{
+ return 0;
+}
+#endif
+
+#endif /* _BPF_SK_STORAGE_H */
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
new file mode 100644
index 000000000..f90f0021f
--- /dev/null
+++ b/include/net/busy_poll.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * net busy poll support
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * Author: Eliezer Tamir
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ */
+
+#ifndef _LINUX_NET_BUSY_POLL_H
+#define _LINUX_NET_BUSY_POLL_H
+
+#include <linux/netdevice.h>
+#include <linux/sched/clock.h>
+#include <linux/sched/signal.h>
+#include <net/ip.h>
+
+/* 0 - Reserved to indicate value not set
+ * 1..NR_CPUS - Reserved for sender_cpu
+ * NR_CPUS+1..~0 - Region available for NAPI IDs
+ */
+#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
+
+#define BUSY_POLL_BUDGET 8
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+
+struct napi_struct;
+extern unsigned int sysctl_net_busy_read __read_mostly;
+extern unsigned int sysctl_net_busy_poll __read_mostly;
+
+static inline bool net_busy_loop_on(void)
+{
+ return READ_ONCE(sysctl_net_busy_poll);
+}
+
+static inline bool sk_can_busy_loop(const struct sock *sk)
+{
+ return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
+}
+
+bool sk_busy_loop_end(void *p, unsigned long start_time);
+
+void napi_busy_loop(unsigned int napi_id,
+ bool (*loop_end)(void *, unsigned long),
+ void *loop_end_arg, bool prefer_busy_poll, u16 budget);
+
+#else /* CONFIG_NET_RX_BUSY_POLL */
+static inline unsigned long net_busy_loop_on(void)
+{
+ return 0;
+}
+
+static inline bool sk_can_busy_loop(struct sock *sk)
+{
+ return false;
+}
+
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+static inline unsigned long busy_loop_current_time(void)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ return (unsigned long)(local_clock() >> 10);
+#else
+ return 0;
+#endif
+}
+
+/* in poll/select we use the global sysctl_net_ll_poll value */
+static inline bool busy_loop_timeout(unsigned long start_time)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll);
+
+ if (bp_usec) {
+ unsigned long end_time = start_time + bp_usec;
+ unsigned long now = busy_loop_current_time();
+
+ return time_after(now, end_time);
+ }
+#endif
+ return true;
+}
+
+static inline bool sk_busy_loop_timeout(struct sock *sk,
+ unsigned long start_time)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec);
+
+ if (bp_usec) {
+ unsigned long end_time = start_time + bp_usec;
+ unsigned long now = busy_loop_current_time();
+
+ return time_after(now, end_time);
+ }
+#endif
+ return true;
+}
+
+static inline void sk_busy_loop(struct sock *sk, int nonblock)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int napi_id = READ_ONCE(sk->sk_napi_id);
+
+ if (napi_id >= MIN_NAPI_ID)
+ napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk,
+ READ_ONCE(sk->sk_prefer_busy_poll),
+ READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET);
+#endif
+}
+
+/* used in the NIC receive handler to mark the skb */
+static inline void skb_mark_napi_id(struct sk_buff *skb,
+ struct napi_struct *napi)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ /* If the skb was already marked with a valid NAPI ID, avoid overwriting
+ * it.
+ */
+ if (skb->napi_id < MIN_NAPI_ID)
+ skb->napi_id = napi->napi_id;
+#endif
+}
+
+/* used in the protocol hanlder to propagate the napi_id to the socket */
+static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id))
+ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+#endif
+ sk_rx_queue_update(sk, skb);
+}
+
+/* Variant of sk_mark_napi_id() for passive flow setup,
+ * as sk->sk_napi_id and sk->sk_rx_queue_mapping content
+ * needs to be set.
+ */
+static inline void sk_mark_napi_id_set(struct sock *sk,
+ const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+#endif
+ sk_rx_queue_set(sk, skb);
+}
+
+static inline void __sk_mark_napi_id_once(struct sock *sk, unsigned int napi_id)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ if (!READ_ONCE(sk->sk_napi_id))
+ WRITE_ONCE(sk->sk_napi_id, napi_id);
+#endif
+}
+
+/* variant used for unconnected sockets */
+static inline void sk_mark_napi_id_once(struct sock *sk,
+ const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ __sk_mark_napi_id_once(sk, skb->napi_id);
+#endif
+}
+
+static inline void sk_mark_napi_id_once_xdp(struct sock *sk,
+ const struct xdp_buff *xdp)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ __sk_mark_napi_id_once(sk, xdp->rxq->napi_id);
+#endif
+}
+
+#endif /* _LINUX_NET_BUSY_POLL_H */
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
new file mode 100644
index 000000000..b655d8666
--- /dev/null
+++ b/include/net/caif/caif_dev.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland
+ */
+
+#ifndef CAIF_DEV_H_
+#define CAIF_DEV_H_
+
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfcnfg.h>
+#include <net/caif/caif_device.h>
+#include <linux/caif/caif_socket.h>
+#include <linux/if.h>
+#include <linux/net.h>
+
+/**
+ * struct caif_param - CAIF parameters.
+ * @size: Length of data
+ * @data: Binary Data Blob
+ */
+struct caif_param {
+ u16 size;
+ u8 data[256];
+};
+
+/**
+ * struct caif_connect_request - Request data for CAIF channel setup.
+ * @protocol: Type of CAIF protocol to use (at, datagram etc)
+ * @sockaddr: Socket address to connect.
+ * @priority: Priority of the connection.
+ * @link_selector: Link selector (high bandwidth or low latency)
+ * @ifindex: kernel index of the interface.
+ * @param: Connect Request parameters (CAIF_SO_REQ_PARAM).
+ *
+ * This struct is used when connecting a CAIF channel.
+ * It contains all CAIF channel configuration options.
+ */
+struct caif_connect_request {
+ enum caif_protocol_type protocol;
+ struct sockaddr_caif sockaddr;
+ enum caif_channel_priority priority;
+ enum caif_link_selector link_selector;
+ int ifindex;
+ struct caif_param param;
+};
+
+/**
+ * caif_connect_client - Connect a client to CAIF Core Stack.
+ * @config: Channel setup parameters, specifying what address
+ * to connect on the Modem.
+ * @client_layer: User implementation of client layer. This layer
+ * MUST have receive and control callback functions
+ * implemented.
+ * @ifindex: Link layer interface index used for this connection.
+ * @headroom: Head room needed by CAIF protocol.
+ * @tailroom: Tail room needed by CAIF protocol.
+ *
+ * This function connects a CAIF channel. The Client must implement
+ * the struct cflayer. This layer represents the Client layer and holds
+ * receive functions and control callback functions. Control callback
+ * function will receive information about connect/disconnect responses,
+ * flow control etc (see enum caif_control).
+ * E.g. CAIF Socket will call this function for each socket it connects
+ * and have one client_layer instance for each socket.
+ */
+int caif_connect_client(struct net *net,
+ struct caif_connect_request *conn_req,
+ struct cflayer *client_layer, int *ifindex,
+ int *headroom, int *tailroom);
+
+/**
+ * caif_disconnect_client - Disconnects a client from the CAIF stack.
+ *
+ * @client_layer: Client layer to be disconnected.
+ */
+int caif_disconnect_client(struct net *net, struct cflayer *client_layer);
+
+
+/**
+ * caif_client_register_refcnt - register ref-count functions provided by client.
+ *
+ * @adapt_layer: Client layer using CAIF Stack.
+ * @hold: Function provided by client layer increasing ref-count
+ * @put: Function provided by client layer decreasing ref-count
+ *
+ * Client of the CAIF Stack must register functions for reference counting.
+ * These functions are called by the CAIF Stack for every upstream packet,
+ * and must therefore be implemented efficiently.
+ *
+ * Client should call caif_free_client when reference count degrease to zero.
+ */
+
+void caif_client_register_refcnt(struct cflayer *adapt_layer,
+ void (*hold)(struct cflayer *lyr),
+ void (*put)(struct cflayer *lyr));
+/**
+ * caif_free_client - Free memory used to manage the client in the CAIF Stack.
+ *
+ * @client_layer: Client layer to be removed.
+ *
+ * This function must be called from client layer in order to free memory.
+ * Caller must guarantee that no packets are in flight upstream when calling
+ * this function.
+ */
+void caif_free_client(struct cflayer *adap_layer);
+
+/**
+ * struct caif_enroll_dev - Enroll a net-device as a CAIF Link layer
+ * @dev: Network device to enroll.
+ * @caifdev: Configuration information from CAIF Link Layer
+ * @link_support: Link layer support layer
+ * @head_room: Head room needed by link support layer
+ * @layer: Lowest layer in CAIF stack
+ * @rcv_fun: Receive function for CAIF stack.
+ *
+ * This function enroll a CAIF link layer into CAIF Stack and
+ * expects the interface to be able to handle CAIF payload.
+ * The link_support layer is used to add any Link Layer specific
+ * framing.
+ */
+int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+ struct cflayer *link_support, int head_room,
+ struct cflayer **layer, int (**rcv_func)(
+ struct sk_buff *, struct net_device *,
+ struct packet_type *, struct net_device *));
+
+#endif /* CAIF_DEV_H_ */
diff --git a/include/net/caif/caif_device.h b/include/net/caif/caif_device.h
new file mode 100644
index 000000000..91d1fd5b4
--- /dev/null
+++ b/include/net/caif/caif_device.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland
+ */
+
+#ifndef CAIF_DEVICE_H_
+#define CAIF_DEVICE_H_
+#include <linux/kernel.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/caif/caif_socket.h>
+#include <net/caif/caif_device.h>
+
+/**
+ * struct caif_dev_common - data shared between CAIF drivers and stack.
+ * @flowctrl: Flow Control callback function. This function is
+ * supplied by CAIF Core Stack and is used by CAIF
+ * Link Layer to send flow-stop to CAIF Core.
+ * The flow information will be distributed to all
+ * clients of CAIF.
+ *
+ * @link_select: Profile of device, either high-bandwidth or
+ * low-latency. This member is set by CAIF Link
+ * Layer Device in order to indicate if this device
+ * is a high bandwidth or low latency device.
+ *
+ * @use_frag: CAIF Frames may be fragmented.
+ * Is set by CAIF Link Layer in order to indicate if the
+ * interface receives fragmented frames that must be
+ * assembled by CAIF Core Layer.
+ *
+ * @use_fcs: Indicate if Frame CheckSum (fcs) is used.
+ * Is set if the physical interface is
+ * using Frame Checksum on the CAIF Frames.
+ *
+ * @use_stx: Indicate STart of frame eXtension (stx) in use.
+ * Is set if the CAIF Link Layer expects
+ * CAIF Frames to start with the STX byte.
+ *
+ * This structure is shared between the CAIF drivers and the CAIF stack.
+ * It is used by the device to register its behavior.
+ * CAIF Core layer must set the member flowctrl in order to supply
+ * CAIF Link Layer with the flow control function.
+ *
+ */
+ struct caif_dev_common {
+ void (*flowctrl)(struct net_device *net, int on);
+ enum caif_link_selector link_select;
+ int use_frag;
+ int use_fcs;
+ int use_stx;
+};
+
+#endif /* CAIF_DEVICE_H_ */
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
new file mode 100644
index 000000000..51f7bb42a
--- /dev/null
+++ b/include/net/caif/caif_layer.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland
+ */
+
+#ifndef CAIF_LAYER_H_
+#define CAIF_LAYER_H_
+
+#include <linux/list.h>
+
+struct cflayer;
+struct cfpkt;
+struct cfpktq;
+struct caif_payload_info;
+struct caif_packet_funcs;
+
+#define CAIF_LAYER_NAME_SZ 16
+
+/**
+ * caif_assert() - Assert function for CAIF.
+ * @assert: expression to evaluate.
+ *
+ * This function will print a error message and a do WARN_ON if the
+ * assertion failes. Normally this will do a stack up at the current location.
+ */
+#define caif_assert(assert) \
+do { \
+ if (!(assert)) { \
+ pr_err("caif:Assert detected:'%s'\n", #assert); \
+ WARN_ON(!(assert)); \
+ } \
+} while (0)
+
+/**
+ * enum caif_ctrlcmd - CAIF Stack Control Signaling sent in layer.ctrlcmd().
+ *
+ * @CAIF_CTRLCMD_FLOW_OFF_IND: Flow Control is OFF, transmit function
+ * should stop sending data
+ *
+ * @CAIF_CTRLCMD_FLOW_ON_IND: Flow Control is ON, transmit function
+ * can start sending data
+ *
+ * @CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: Remote end modem has decided to close
+ * down channel
+ *
+ * @CAIF_CTRLCMD_INIT_RSP: Called initially when the layer below
+ * has finished initialization
+ *
+ * @CAIF_CTRLCMD_DEINIT_RSP: Called when de-initialization is
+ * complete
+ *
+ * @CAIF_CTRLCMD_INIT_FAIL_RSP: Called if initialization fails
+ *
+ * @_CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: CAIF Link layer temporarily cannot
+ * send more packets.
+ * @_CAIF_CTRLCMD_PHYIF_FLOW_ON_IND: Called if CAIF Link layer is able
+ * to send packets again.
+ * @_CAIF_CTRLCMD_PHYIF_DOWN_IND: Called if CAIF Link layer is going
+ * down.
+ *
+ * These commands are sent upwards in the CAIF stack to the CAIF Client.
+ * They are used for signaling originating from the modem or CAIF Link Layer.
+ * These are either responses (*_RSP) or events (*_IND).
+ */
+enum caif_ctrlcmd {
+ CAIF_CTRLCMD_FLOW_OFF_IND,
+ CAIF_CTRLCMD_FLOW_ON_IND,
+ CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
+ CAIF_CTRLCMD_INIT_RSP,
+ CAIF_CTRLCMD_DEINIT_RSP,
+ CAIF_CTRLCMD_INIT_FAIL_RSP,
+ _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
+ _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
+ _CAIF_CTRLCMD_PHYIF_DOWN_IND,
+};
+
+/**
+ * enum caif_modemcmd - Modem Control Signaling, sent from CAIF Client
+ * to the CAIF Link Layer or modem.
+ *
+ * @CAIF_MODEMCMD_FLOW_ON_REQ: Flow Control is ON, transmit function
+ * can start sending data.
+ *
+ * @CAIF_MODEMCMD_FLOW_OFF_REQ: Flow Control is OFF, transmit function
+ * should stop sending data.
+ *
+ * @_CAIF_MODEMCMD_PHYIF_USEFULL: Notify physical layer that it is in use
+ *
+ * @_CAIF_MODEMCMD_PHYIF_USELESS: Notify physical layer that it is
+ * no longer in use.
+ *
+ * These are requests sent 'downwards' in the stack.
+ * Flow ON, OFF can be indicated to the modem.
+ */
+enum caif_modemcmd {
+ CAIF_MODEMCMD_FLOW_ON_REQ = 0,
+ CAIF_MODEMCMD_FLOW_OFF_REQ = 1,
+ _CAIF_MODEMCMD_PHYIF_USEFULL = 3,
+ _CAIF_MODEMCMD_PHYIF_USELESS = 4
+};
+
+/**
+ * enum caif_direction - CAIF Packet Direction.
+ * Indicate if a packet is to be sent out or to be received in.
+ * @CAIF_DIR_IN: Incoming packet received.
+ * @CAIF_DIR_OUT: Outgoing packet to be transmitted.
+ */
+enum caif_direction {
+ CAIF_DIR_IN = 0,
+ CAIF_DIR_OUT = 1
+};
+
+/**
+ * struct cflayer - CAIF Stack layer.
+ * Defines the framework for the CAIF Core Stack.
+ * @up: Pointer up to the layer above.
+ * @dn: Pointer down to the layer below.
+ * @node: List node used when layer participate in a list.
+ * @receive: Packet receive function.
+ * @transmit: Packet transmit funciton.
+ * @ctrlcmd: Used for control signalling upwards in the stack.
+ * @modemcmd: Used for control signaling downwards in the stack.
+ * @id: The identity of this layer
+ * @name: Name of the layer.
+ *
+ * This structure defines the layered structure in CAIF.
+ *
+ * It defines CAIF layering structure, used by all CAIF Layers and the
+ * layers interfacing CAIF.
+ *
+ * In order to integrate with CAIF an adaptation layer on top of the CAIF stack
+ * and PHY layer below the CAIF stack
+ * must be implemented. These layer must follow the design principles below.
+ *
+ * Principles for layering of protocol layers:
+ * - All layers must use this structure. If embedding it, then place this
+ * structure first in the layer specific structure.
+ *
+ * - Each layer should not depend on any others layer's private data.
+ *
+ * - In order to send data upwards do
+ * layer->up->receive(layer->up, packet);
+ *
+ * - In order to send data downwards do
+ * layer->dn->transmit(layer->dn, info, packet);
+ */
+struct cflayer {
+ struct cflayer *up;
+ struct cflayer *dn;
+ struct list_head node;
+
+ /*
+ * receive() - Receive Function (non-blocking).
+ * Contract: Each layer must implement a receive function passing the
+ * CAIF packets upwards in the stack.
+ * Packet handling rules:
+ * - The CAIF packet (cfpkt) ownership is passed to the
+ * called receive function. This means that the
+ * packet cannot be accessed after passing it to the
+ * above layer using up->receive().
+ *
+ * - If parsing of the packet fails, the packet must be
+ * destroyed and negative error code returned
+ * from the function.
+ * EXCEPTION: If the framing layer (cffrml) returns
+ * -EILSEQ, the packet is not freed.
+ *
+ * - If parsing succeeds (and above layers return OK) then
+ * the function must return a value >= 0.
+ *
+ * Returns result < 0 indicates an error, 0 or positive value
+ * indicates success.
+ *
+ * @layr: Pointer to the current layer the receive function is
+ * implemented for (this pointer).
+ * @cfpkt: Pointer to CaifPacket to be handled.
+ */
+ int (*receive)(struct cflayer *layr, struct cfpkt *cfpkt);
+
+ /*
+ * transmit() - Transmit Function (non-blocking).
+ * Contract: Each layer must implement a transmit function passing the
+ * CAIF packet downwards in the stack.
+ * Packet handling rules:
+ * - The CAIF packet (cfpkt) ownership is passed to the
+ * transmit function. This means that the packet
+ * cannot be accessed after passing it to the below
+ * layer using dn->transmit().
+ *
+ * - Upon error the packet ownership is still passed on,
+ * so the packet shall be freed where error is detected.
+ * Callers of the transmit function shall not free packets,
+ * but errors shall be returned.
+ *
+ * - Return value less than zero means error, zero or
+ * greater than zero means OK.
+ *
+ * Returns result < 0 indicates an error, 0 or positive value
+ * indicates success.
+ *
+ * @layr: Pointer to the current layer the receive function
+ * isimplemented for (this pointer).
+ * @cfpkt: Pointer to CaifPacket to be handled.
+ */
+ int (*transmit) (struct cflayer *layr, struct cfpkt *cfpkt);
+
+ /*
+ * cttrlcmd() - Control Function upwards in CAIF Stack (non-blocking).
+ * Used for signaling responses (CAIF_CTRLCMD_*_RSP)
+ * and asynchronous events from the modem (CAIF_CTRLCMD_*_IND)
+ *
+ * @layr: Pointer to the current layer the receive function
+ * is implemented for (this pointer).
+ * @ctrl: Control Command.
+ */
+ void (*ctrlcmd) (struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
+ /*
+ * modemctrl() - Control Function used for controlling the modem.
+ * Used to signal down-wards in the CAIF stack.
+ * Returns 0 on success, < 0 upon failure.
+ *
+ * @layr: Pointer to the current layer the receive function
+ * is implemented for (this pointer).
+ * @ctrl: Control Command.
+ */
+ int (*modemcmd) (struct cflayer *layr, enum caif_modemcmd ctrl);
+
+ unsigned int id;
+ char name[CAIF_LAYER_NAME_SZ];
+};
+
+/**
+ * layer_set_up() - Set the up pointer for a specified layer.
+ * @layr: Layer where up pointer shall be set.
+ * @above: Layer above.
+ */
+#define layer_set_up(layr, above) ((layr)->up = (struct cflayer *)(above))
+
+/**
+ * layer_set_dn() - Set the down pointer for a specified layer.
+ * @layr: Layer where down pointer shall be set.
+ * @below: Layer below.
+ */
+#define layer_set_dn(layr, below) ((layr)->dn = (struct cflayer *)(below))
+
+/**
+ * struct dev_info - Physical Device info information about physical layer.
+ * @dev: Pointer to native physical device.
+ * @id: Physical ID of the physical connection used by the
+ * logical CAIF connection. Used by service layers to
+ * identify their physical id to Caif MUX (CFMUXL)so
+ * that the MUX can add the correct physical ID to the
+ * packet.
+ */
+struct dev_info {
+ void *dev;
+ unsigned int id;
+};
+
+/**
+ * struct caif_payload_info - Payload information embedded in packet (sk_buff).
+ *
+ * @dev_info: Information about the receiving device.
+ *
+ * @hdr_len: Header length, used to align pay load on 32bit boundary.
+ *
+ * @channel_id: Channel ID of the logical CAIF connection.
+ * Used by mux to insert channel id into the caif packet.
+ */
+struct caif_payload_info {
+ struct dev_info *dev_info;
+ unsigned short hdr_len;
+ unsigned short channel_id;
+};
+
+#endif /* CAIF_LAYER_H_ */
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
new file mode 100644
index 000000000..8819ff4db
--- /dev/null
+++ b/include/net/caif/cfcnfg.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland
+ */
+
+#ifndef CFCNFG_H_
+#define CFCNFG_H_
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfctrl.h>
+
+struct cfcnfg;
+
+/**
+ * enum cfcnfg_phy_preference - Physical preference HW Abstraction
+ *
+ * @CFPHYPREF_UNSPECIFIED: Default physical interface
+ *
+ * @CFPHYPREF_LOW_LAT: Default physical interface for low-latency
+ * traffic
+ * @CFPHYPREF_HIGH_BW: Default physical interface for high-bandwidth
+ * traffic
+ * @CFPHYPREF_LOOP: TEST only Loopback interface simulating modem
+ * responses.
+ *
+ */
+enum cfcnfg_phy_preference {
+ CFPHYPREF_UNSPECIFIED,
+ CFPHYPREF_LOW_LAT,
+ CFPHYPREF_HIGH_BW,
+ CFPHYPREF_LOOP
+};
+
+/**
+ * cfcnfg_create() - Get the CAIF configuration object given network.
+ * @net: Network for the CAIF configuration object.
+ */
+struct cfcnfg *get_cfcnfg(struct net *net);
+
+/**
+ * cfcnfg_create() - Create the CAIF configuration object.
+ */
+struct cfcnfg *cfcnfg_create(void);
+
+/**
+ * cfcnfg_remove() - Remove the CFCNFG object
+ * @cfg: config object
+ */
+void cfcnfg_remove(struct cfcnfg *cfg);
+
+/**
+ * cfcnfg_add_phy_layer() - Adds a physical layer to the CAIF stack.
+ * @cnfg: Pointer to a CAIF configuration object, created by
+ * cfcnfg_create().
+ * @dev: Pointer to link layer device
+ * @phy_layer: Specify the physical layer. The transmit function
+ * MUST be set in the structure.
+ * @pref: The phy (link layer) preference.
+ * @link_support: Protocol implementation for link layer specific protocol.
+ * @fcs: Specify if checksum is used in CAIF Framing Layer.
+ * @head_room: Head space needed by link specific protocol.
+ */
+int
+cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
+ struct net_device *dev, struct cflayer *phy_layer,
+ enum cfcnfg_phy_preference pref,
+ struct cflayer *link_support,
+ bool fcs, int head_room);
+
+/**
+ * cfcnfg_del_phy_layer - Deletes an phy layer from the CAIF stack.
+ *
+ * @cnfg: Pointer to a CAIF configuration object, created by
+ * cfcnfg_create().
+ * @phy_layer: Adaptation layer to be removed.
+ */
+int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer);
+
+/**
+ * cfcnfg_set_phy_state() - Set the state of the physical interface device.
+ * @cnfg: Configuration object
+ * @phy_layer: Physical Layer representation
+ * @up: State of device
+ */
+int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer,
+ bool up);
+
+#endif /* CFCNFG_H_ */
diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
new file mode 100644
index 000000000..86d17315c
--- /dev/null
+++ b/include/net/caif/cfctrl.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland
+ */
+
+#ifndef CFCTRL_H_
+#define CFCTRL_H_
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+
+/* CAIF Control packet commands */
+enum cfctrl_cmd {
+ CFCTRL_CMD_LINK_SETUP = 0,
+ CFCTRL_CMD_LINK_DESTROY = 1,
+ CFCTRL_CMD_LINK_ERR = 2,
+ CFCTRL_CMD_ENUM = 3,
+ CFCTRL_CMD_SLEEP = 4,
+ CFCTRL_CMD_WAKE = 5,
+ CFCTRL_CMD_LINK_RECONF = 6,
+ CFCTRL_CMD_START_REASON = 7,
+ CFCTRL_CMD_RADIO_SET = 8,
+ CFCTRL_CMD_MODEM_SET = 9,
+ CFCTRL_CMD_MASK = 0xf
+};
+
+/* Channel types */
+enum cfctrl_srv {
+ CFCTRL_SRV_DECM = 0,
+ CFCTRL_SRV_VEI = 1,
+ CFCTRL_SRV_VIDEO = 2,
+ CFCTRL_SRV_DBG = 3,
+ CFCTRL_SRV_DATAGRAM = 4,
+ CFCTRL_SRV_RFM = 5,
+ CFCTRL_SRV_UTIL = 6,
+ CFCTRL_SRV_MASK = 0xf
+};
+
+#define CFCTRL_RSP_BIT 0x20
+#define CFCTRL_ERR_BIT 0x10
+
+struct cfctrl_rsp {
+ void (*linksetup_rsp)(struct cflayer *layer, u8 linkid,
+ enum cfctrl_srv serv, u8 phyid,
+ struct cflayer *adapt_layer);
+ void (*linkdestroy_rsp)(struct cflayer *layer, u8 linkid);
+ void (*linkerror_ind)(void);
+ void (*enum_rsp)(void);
+ void (*sleep_rsp)(void);
+ void (*wake_rsp)(void);
+ void (*restart_rsp)(void);
+ void (*radioset_rsp)(void);
+ void (*reject_rsp)(struct cflayer *layer, u8 linkid,
+ struct cflayer *client_layer);
+};
+
+/* Link Setup Parameters for CAIF-Links. */
+struct cfctrl_link_param {
+ enum cfctrl_srv linktype;/* (T3,T0) Type of Channel */
+ u8 priority; /* (P4,P0) Priority of the channel */
+ u8 phyid; /* (U2-U0) Physical interface to connect */
+ u8 endpoint; /* (E1,E0) Endpoint for data channels */
+ u8 chtype; /* (H1,H0) Channel-Type, applies to
+ * VEI, DEBUG */
+ union {
+ struct {
+ u8 connid; /* (D7,D0) Video LinkId */
+ } video;
+
+ struct {
+ u32 connid; /* (N31,Ngit0) Connection ID used
+ * for Datagram */
+ } datagram;
+
+ struct {
+ u32 connid; /* Connection ID used for RFM */
+ char volume[20]; /* Volume to mount for RFM */
+ } rfm; /* Configuration for RFM */
+
+ struct {
+ u16 fifosize_kb; /* Psock FIFO size in KB */
+ u16 fifosize_bufs; /* Psock # signal buffers */
+ char name[16]; /* Name of the PSOCK service */
+ u8 params[255]; /* Link setup Parameters> */
+ u16 paramlen; /* Length of Link Setup
+ * Parameters */
+ } utility; /* Configuration for Utility Links (Psock) */
+ } u;
+};
+
+/* This structure is used internally in CFCTRL */
+struct cfctrl_request_info {
+ int sequence_no;
+ enum cfctrl_cmd cmd;
+ u8 channel_id;
+ struct cfctrl_link_param param;
+ struct cflayer *client_layer;
+ struct list_head list;
+};
+
+struct cfctrl {
+ struct cfsrvl serv;
+ struct cfctrl_rsp res;
+ atomic_t req_seq_no;
+ atomic_t rsp_seq_no;
+ struct list_head list;
+ /* Protects from simultaneous access to first_req list */
+ spinlock_t info_list_lock;
+#ifndef CAIF_NO_LOOP
+ u8 loop_linkid;
+ int loop_linkused[256];
+ /* Protects simultaneous access to loop_linkid and loop_linkused */
+ spinlock_t loop_linkid_lock;
+#endif
+
+};
+
+void cfctrl_enum_req(struct cflayer *cfctrl, u8 physlinkid);
+int cfctrl_linkup_request(struct cflayer *cfctrl,
+ struct cfctrl_link_param *param,
+ struct cflayer *user_layer);
+int cfctrl_linkdown_req(struct cflayer *cfctrl, u8 linkid,
+ struct cflayer *client);
+
+struct cflayer *cfctrl_create(void);
+struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer);
+int cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer);
+void cfctrl_remove(struct cflayer *layr);
+
+#endif /* CFCTRL_H_ */
diff --git a/include/net/caif/cffrml.h b/include/net/caif/cffrml.h
new file mode 100644
index 000000000..1ab8a80ed
--- /dev/null
+++ b/include/net/caif/cffrml.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland
+ */
+
+#ifndef CFFRML_H_
+#define CFFRML_H_
+#include <net/caif/caif_layer.h>
+#include <linux/netdevice.h>
+
+struct cffrml;
+struct cflayer *cffrml_create(u16 phyid, bool use_fcs);
+void cffrml_free(struct cflayer *layr);
+void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up);
+void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn);
+void cffrml_put(struct cflayer *layr);
+void cffrml_hold(struct cflayer *layr);
+int cffrml_refcnt_read(struct cflayer *layr);
+
+#endif /* CFFRML_H_ */
diff --git a/include/net/caif/cfmuxl.h b/include/net/caif/cfmuxl.h
new file mode 100644
index 000000000..92ccb2648
--- /dev/null
+++ b/include/net/caif/cfmuxl.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland
+ */
+
+#ifndef CFMUXL_H_
+#define CFMUXL_H_
+#include <net/caif/caif_layer.h>
+
+struct cfsrvl;
+struct cffrml;
+
+struct cflayer *cfmuxl_create(void);
+int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid);
+struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid);
+int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *up, u8 phyid);
+struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 linkid);
+
+#endif /* CFMUXL_H_ */
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
new file mode 100644
index 000000000..44d914a50
--- /dev/null
+++ b/include/net/caif/cfpkt.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland
+ */
+
+#ifndef CFPKT_H_
+#define CFPKT_H_
+#include <net/caif/caif_layer.h>
+#include <linux/types.h>
+struct cfpkt;
+
+/* Create a CAIF packet.
+ * len: Length of packet to be created
+ * @return New packet.
+ */
+struct cfpkt *cfpkt_create(u16 len);
+
+/*
+ * Destroy a CAIF Packet.
+ * pkt Packet to be destoyed.
+ */
+void cfpkt_destroy(struct cfpkt *pkt);
+
+/*
+ * Extract header from packet.
+ *
+ * pkt Packet to extract header data from.
+ * data Pointer to copy the header data into.
+ * len Length of head data to copy.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len);
+
+static inline u8 cfpkt_extr_head_u8(struct cfpkt *pkt)
+{
+ u8 tmp;
+
+ cfpkt_extr_head(pkt, &tmp, 1);
+
+ return tmp;
+}
+
+static inline u16 cfpkt_extr_head_u16(struct cfpkt *pkt)
+{
+ __le16 tmp;
+
+ cfpkt_extr_head(pkt, &tmp, 2);
+
+ return le16_to_cpu(tmp);
+}
+
+static inline u32 cfpkt_extr_head_u32(struct cfpkt *pkt)
+{
+ __le32 tmp;
+
+ cfpkt_extr_head(pkt, &tmp, 4);
+
+ return le32_to_cpu(tmp);
+}
+
+/*
+ * Peek header from packet.
+ * Reads data from packet without changing packet.
+ *
+ * pkt Packet to extract header data from.
+ * data Pointer to copy the header data into.
+ * len Length of head data to copy.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len);
+
+/*
+ * Extract header from trailer (end of packet).
+ *
+ * pkt Packet to extract header data from.
+ * data Pointer to copy the trailer data into.
+ * len Length of header data to copy.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_extr_trail(struct cfpkt *pkt, void *data, u16 len);
+
+/*
+ * Add header to packet.
+ *
+ *
+ * pkt Packet to add header data to.
+ * data Pointer to data to copy into the header.
+ * len Length of header data to copy.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_add_head(struct cfpkt *pkt, const void *data, u16 len);
+
+/*
+ * Add trailer to packet.
+ *
+ *
+ * pkt Packet to add trailer data to.
+ * data Pointer to data to copy into the trailer.
+ * len Length of trailer data to copy.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len);
+
+/*
+ * Pad trailer on packet.
+ * Moves data pointer in packet, no content copied.
+ *
+ * pkt Packet in which to pad trailer.
+ * len Length of padding to add.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_pad_trail(struct cfpkt *pkt, u16 len);
+
+/*
+ * Add a single byte to packet body (tail).
+ *
+ * pkt Packet in which to add byte.
+ * data Byte to add.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_addbdy(struct cfpkt *pkt, const u8 data);
+
+/*
+ * Add a data to packet body (tail).
+ *
+ * pkt Packet in which to add data.
+ * data Pointer to data to copy into the packet body.
+ * len Length of data to add.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len);
+
+/*
+ * Checks whether there are more data to process in packet.
+ * pkt Packet to check.
+ * @return true if more data are available in packet false otherwise
+ */
+bool cfpkt_more(struct cfpkt *pkt);
+
+/*
+ * Checks whether the packet is erroneous,
+ * i.e. if it has been attempted to extract more data than available in packet
+ * or writing more data than has been allocated in cfpkt_create().
+ * pkt Packet to check.
+ * @return true on error false otherwise
+ */
+bool cfpkt_erroneous(struct cfpkt *pkt);
+
+/*
+ * Get the packet length.
+ * pkt Packet to get length from.
+ * @return Number of bytes in packet.
+ */
+u16 cfpkt_getlen(struct cfpkt *pkt);
+
+/*
+ * Set the packet length, by adjusting the trailer pointer according to length.
+ * pkt Packet to set length.
+ * len Packet length.
+ * @return Number of bytes in packet.
+ */
+int cfpkt_setlen(struct cfpkt *pkt, u16 len);
+
+/*
+ * cfpkt_append - Appends a packet's data to another packet.
+ * dstpkt: Packet to append data into, WILL BE FREED BY THIS FUNCTION
+ * addpkt: Packet to be appended and automatically released,
+ * WILL BE FREED BY THIS FUNCTION.
+ * expectlen: Packet's expected total length. This should be considered
+ * as a hint.
+ * NB: Input packets will be destroyed after appending and cannot be used
+ * after calling this function.
+ * @return The new appended packet.
+ */
+struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, struct cfpkt *addpkt,
+ u16 expectlen);
+
+/*
+ * cfpkt_split - Split a packet into two packets at the specified split point.
+ * pkt: Packet to be split (will contain the first part of the data on exit)
+ * pos: Position to split packet in two parts.
+ * @return The new packet, containing the second part of the data.
+ */
+struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos);
+
+/*
+ * Iteration function, iterates the packet buffers from start to end.
+ *
+ * Checksum iteration function used to iterate buffers
+ * (we may have packets consisting of a chain of buffers)
+ * pkt: Packet to calculate checksum for
+ * iter_func: Function pointer to iteration function
+ * chks: Checksum calculated so far.
+ * buf: Pointer to the buffer to checksum
+ * len: Length of buf.
+ * data: Initial checksum value.
+ * @return Checksum of buffer.
+ */
+
+int cfpkt_iterate(struct cfpkt *pkt,
+ u16 (*iter_func)(u16 chks, void *buf, u16 len),
+ u16 data);
+
+/* Map from a "native" packet (e.g. Linux Socket Buffer) to a CAIF packet.
+ * dir - Direction indicating whether this packet is to be sent or received.
+ * nativepkt - The native packet to be transformed to a CAIF packet
+ * @return The mapped CAIF Packet CFPKT.
+ */
+struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt);
+
+/* Map from a CAIF packet to a "native" packet (e.g. Linux Socket Buffer).
+ * pkt - The CAIF packet to be transformed into a "native" packet.
+ * @return The native packet transformed from a CAIF packet.
+ */
+void *cfpkt_tonative(struct cfpkt *pkt);
+
+/*
+ * Returns packet information for a packet.
+ * pkt Packet to get info from;
+ * @return Packet information
+ */
+struct caif_payload_info *cfpkt_info(struct cfpkt *pkt);
+
+/** cfpkt_set_prio - set priority for a CAIF packet.
+ *
+ * @pkt: The CAIF packet to be adjusted.
+ * @prio: one of TC_PRIO_ constants.
+ */
+void cfpkt_set_prio(struct cfpkt *pkt, int prio);
+
+#endif /* CFPKT_H_ */
diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
new file mode 100644
index 000000000..67cce8757
--- /dev/null
+++ b/include/net/caif/cfserl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland
+ */
+
+#ifndef CFSERL_H_
+#define CFSERL_H_
+#include <net/caif/caif_layer.h>
+
+struct cflayer *cfserl_create(int instance, bool use_stx);
+void cfserl_release(struct cflayer *layer);
+#endif
diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h
new file mode 100644
index 000000000..bd5440977
--- /dev/null
+++ b/include/net/caif/cfsrvl.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland
+ */
+
+#ifndef CFSRVL_H_
+#define CFSRVL_H_
+#include <linux/list.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/rculist.h>
+
+struct cfsrvl {
+ struct cflayer layer;
+ bool open;
+ bool phy_flow_on;
+ bool modem_flow_on;
+ bool supports_flowctrl;
+ void (*release)(struct cflayer *layer);
+ struct dev_info dev_info;
+ void (*hold)(struct cflayer *lyr);
+ void (*put)(struct cflayer *lyr);
+ struct rcu_head rcu;
+};
+
+struct cflayer *cfvei_create(u8 linkid, struct dev_info *dev_info);
+struct cflayer *cfdgml_create(u8 linkid, struct dev_info *dev_info);
+struct cflayer *cfutill_create(u8 linkid, struct dev_info *dev_info);
+struct cflayer *cfvidl_create(u8 linkid, struct dev_info *dev_info);
+struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info,
+ int mtu_size);
+struct cflayer *cfdbgl_create(u8 linkid, struct dev_info *dev_info);
+
+void cfsrvl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
+bool cfsrvl_phyid_match(struct cflayer *layer, int phyid);
+
+void cfsrvl_init(struct cfsrvl *service,
+ u8 channel_id,
+ struct dev_info *dev_info,
+ bool supports_flowctrl);
+bool cfsrvl_ready(struct cfsrvl *service, int *err);
+u8 cfsrvl_getphyid(struct cflayer *layer);
+
+static inline void cfsrvl_get(struct cflayer *layr)
+{
+ struct cfsrvl *s = container_of(layr, struct cfsrvl, layer);
+ if (layr == NULL || layr->up == NULL || s->hold == NULL)
+ return;
+
+ s->hold(layr->up);
+}
+
+static inline void cfsrvl_put(struct cflayer *layr)
+{
+ struct cfsrvl *s = container_of(layr, struct cfsrvl, layer);
+ if (layr == NULL || layr->up == NULL || s->hold == NULL)
+ return;
+
+ s->put(layr->up);
+}
+#endif /* CFSRVL_H_ */
diff --git a/include/net/calipso.h b/include/net/calipso.h
new file mode 100644
index 000000000..f8667a3fd
--- /dev/null
+++ b/include/net/calipso.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * CALIPSO - Common Architecture Label IPv6 Security Option
+ *
+ * This is an implementation of the CALIPSO protocol as specified in
+ * RFC 5570.
+ *
+ * Authors: Paul Moore <paul@paul-moore.com>
+ * Huw Davies <huw@codeweavers.com>
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
+ * (c) Copyright Huw Davies <huw@codeweavers.com>, 2015
+ */
+
+#ifndef _CALIPSO_H
+#define _CALIPSO_H
+
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/list.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <net/netlabel.h>
+#include <net/request_sock.h>
+#include <linux/refcount.h>
+#include <asm/unaligned.h>
+
+/* known doi values */
+#define CALIPSO_DOI_UNKNOWN 0x00000000
+
+/* doi mapping types */
+#define CALIPSO_MAP_UNKNOWN 0
+#define CALIPSO_MAP_PASS 2
+
+/*
+ * CALIPSO DOI definitions
+ */
+
+/* DOI definition struct */
+struct calipso_doi {
+ u32 doi;
+ u32 type;
+
+ refcount_t refcount;
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+/*
+ * Sysctl Variables
+ */
+extern int calipso_cache_enabled;
+extern int calipso_cache_bucketsize;
+
+#ifdef CONFIG_NETLABEL
+int __init calipso_init(void);
+void calipso_exit(void);
+bool calipso_validate(const struct sk_buff *skb, const unsigned char *option);
+#else
+static inline int __init calipso_init(void)
+{
+ return 0;
+}
+
+static inline void calipso_exit(void)
+{
+}
+static inline bool calipso_validate(const struct sk_buff *skb,
+ const unsigned char *option)
+{
+ return true;
+}
+#endif /* CONFIG_NETLABEL */
+
+#endif /* _CALIPSO_H */
diff --git a/include/net/cfg80211-wext.h b/include/net/cfg80211-wext.h
new file mode 100644
index 000000000..ad77caf2f
--- /dev/null
+++ b/include/net/cfg80211-wext.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __NET_CFG80211_WEXT_H
+#define __NET_CFG80211_WEXT_H
+/*
+ * 802.11 device and configuration interface -- wext handlers
+ *
+ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+
+/*
+ * Temporary wext handlers & helper functions
+ *
+ * These are used only by drivers that aren't yet fully
+ * converted to cfg80211.
+ */
+int cfg80211_wext_giwname(struct net_device *dev,
+ struct iw_request_info *info,
+ char *name, char *extra);
+int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
+ u32 *mode, char *extra);
+int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
+ u32 *mode, char *extra);
+int cfg80211_wext_siwscan(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+int cfg80211_wext_giwscan(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra);
+int cfg80211_wext_giwrange(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra);
+int cfg80211_wext_siwrts(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rts, char *extra);
+int cfg80211_wext_giwrts(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rts, char *extra);
+int cfg80211_wext_siwfrag(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *frag, char *extra);
+int cfg80211_wext_giwfrag(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *frag, char *extra);
+int cfg80211_wext_giwretry(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *retry, char *extra);
+
+#endif /* __NET_CFG80211_WEXT_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
new file mode 100644
index 000000000..5bf5c1ab5
--- /dev/null
+++ b/include/net/cfg80211.h
@@ -0,0 +1,9019 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __NET_CFG80211_H
+#define __NET_CFG80211_H
+/*
+ * 802.11 device and configuration interface
+ *
+ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2021 Intel Corporation
+ */
+
+#include <linux/ethtool.h>
+#include <uapi/linux/rfkill.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/netlink.h>
+#include <linux/skbuff.h>
+#include <linux/nl80211.h>
+#include <linux/if_ether.h>
+#include <linux/ieee80211.h>
+#include <linux/net.h>
+#include <linux/rfkill.h>
+#include <net/regulatory.h>
+
+/**
+ * DOC: Introduction
+ *
+ * cfg80211 is the configuration API for 802.11 devices in Linux. It bridges
+ * userspace and drivers, and offers some utility functionality associated
+ * with 802.11. cfg80211 must, directly or indirectly via mac80211, be used
+ * by all modern wireless drivers in Linux, so that they offer a consistent
+ * API through nl80211. For backward compatibility, cfg80211 also offers
+ * wireless extensions to userspace, but hides them from drivers completely.
+ *
+ * Additionally, cfg80211 contains code to help enforce regulatory spectrum
+ * use restrictions.
+ */
+
+
+/**
+ * DOC: Device registration
+ *
+ * In order for a driver to use cfg80211, it must register the hardware device
+ * with cfg80211. This happens through a number of hardware capability structs
+ * described below.
+ *
+ * The fundamental structure for each device is the 'wiphy', of which each
+ * instance describes a physical wireless device connected to the system. Each
+ * such wiphy can have zero, one, or many virtual interfaces associated with
+ * it, which need to be identified as such by pointing the network interface's
+ * @ieee80211_ptr pointer to a &struct wireless_dev which further describes
+ * the wireless part of the interface, normally this struct is embedded in the
+ * network interface's private data area. Drivers can optionally allow creating
+ * or destroying virtual interfaces on the fly, but without at least one or the
+ * ability to create some the wireless device isn't useful.
+ *
+ * Each wiphy structure contains device capability information, and also has
+ * a pointer to the various operations the driver offers. The definitions and
+ * structures here describe these capabilities in detail.
+ */
+
+struct wiphy;
+
+/*
+ * wireless hardware capability structures
+ */
+
+/**
+ * enum ieee80211_channel_flags - channel flags
+ *
+ * Channel flags set by the regulatory control code.
+ *
+ * @IEEE80211_CHAN_DISABLED: This channel is disabled.
+ * @IEEE80211_CHAN_NO_IR: do not initiate radiation, this includes
+ * sending probe requests or beaconing.
+ * @IEEE80211_CHAN_RADAR: Radar detection is required on this channel.
+ * @IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel
+ * is not permitted.
+ * @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel
+ * is not permitted.
+ * @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel.
+ * @IEEE80211_CHAN_NO_80MHZ: If the driver supports 80 MHz on the band,
+ * this flag indicates that an 80 MHz channel cannot use this
+ * channel as the control or any of the secondary channels.
+ * This may be due to the driver or due to regulatory bandwidth
+ * restrictions.
+ * @IEEE80211_CHAN_NO_160MHZ: If the driver supports 160 MHz on the band,
+ * this flag indicates that an 160 MHz channel cannot use this
+ * channel as the control or any of the secondary channels.
+ * This may be due to the driver or due to regulatory bandwidth
+ * restrictions.
+ * @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY
+ * @IEEE80211_CHAN_IR_CONCURRENT: see %NL80211_FREQUENCY_ATTR_IR_CONCURRENT
+ * @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted
+ * on this channel.
+ * @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
+ * on this channel.
+ * @IEEE80211_CHAN_NO_HE: HE operation is not permitted on this channel.
+ * @IEEE80211_CHAN_1MHZ: 1 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_2MHZ: 2 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_4MHZ: 4 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_8MHZ: 8 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_16MHZ: 16 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_NO_320MHZ: If the driver supports 320 MHz on the band,
+ * this flag indicates that a 320 MHz channel cannot use this
+ * channel as the control or any of the secondary channels.
+ * This may be due to the driver or due to regulatory bandwidth
+ * restrictions.
+ * @IEEE80211_CHAN_NO_EHT: EHT operation is not permitted on this channel.
+ */
+enum ieee80211_channel_flags {
+ IEEE80211_CHAN_DISABLED = 1<<0,
+ IEEE80211_CHAN_NO_IR = 1<<1,
+ /* hole at 1<<2 */
+ IEEE80211_CHAN_RADAR = 1<<3,
+ IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
+ IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
+ IEEE80211_CHAN_NO_OFDM = 1<<6,
+ IEEE80211_CHAN_NO_80MHZ = 1<<7,
+ IEEE80211_CHAN_NO_160MHZ = 1<<8,
+ IEEE80211_CHAN_INDOOR_ONLY = 1<<9,
+ IEEE80211_CHAN_IR_CONCURRENT = 1<<10,
+ IEEE80211_CHAN_NO_20MHZ = 1<<11,
+ IEEE80211_CHAN_NO_10MHZ = 1<<12,
+ IEEE80211_CHAN_NO_HE = 1<<13,
+ IEEE80211_CHAN_1MHZ = 1<<14,
+ IEEE80211_CHAN_2MHZ = 1<<15,
+ IEEE80211_CHAN_4MHZ = 1<<16,
+ IEEE80211_CHAN_8MHZ = 1<<17,
+ IEEE80211_CHAN_16MHZ = 1<<18,
+ IEEE80211_CHAN_NO_320MHZ = 1<<19,
+ IEEE80211_CHAN_NO_EHT = 1<<20,
+};
+
+#define IEEE80211_CHAN_NO_HT40 \
+ (IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS)
+
+#define IEEE80211_DFS_MIN_CAC_TIME_MS 60000
+#define IEEE80211_DFS_MIN_NOP_TIME_MS (30 * 60 * 1000)
+
+/**
+ * struct ieee80211_channel - channel definition
+ *
+ * This structure describes a single channel for use
+ * with cfg80211.
+ *
+ * @center_freq: center frequency in MHz
+ * @freq_offset: offset from @center_freq, in KHz
+ * @hw_value: hardware-specific value for the channel
+ * @flags: channel flags from &enum ieee80211_channel_flags.
+ * @orig_flags: channel flags at registration time, used by regulatory
+ * code to support devices with additional restrictions
+ * @band: band this channel belongs to.
+ * @max_antenna_gain: maximum antenna gain in dBi
+ * @max_power: maximum transmission power (in dBm)
+ * @max_reg_power: maximum regulatory transmission power (in dBm)
+ * @beacon_found: helper to regulatory code to indicate when a beacon
+ * has been found on this channel. Use regulatory_hint_found_beacon()
+ * to enable this, this is useful only on 5 GHz band.
+ * @orig_mag: internal use
+ * @orig_mpwr: internal use
+ * @dfs_state: current state of this channel. Only relevant if radar is required
+ * on this channel.
+ * @dfs_state_entered: timestamp (jiffies) when the dfs state was entered.
+ * @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels.
+ */
+struct ieee80211_channel {
+ enum nl80211_band band;
+ u32 center_freq;
+ u16 freq_offset;
+ u16 hw_value;
+ u32 flags;
+ int max_antenna_gain;
+ int max_power;
+ int max_reg_power;
+ bool beacon_found;
+ u32 orig_flags;
+ int orig_mag, orig_mpwr;
+ enum nl80211_dfs_state dfs_state;
+ unsigned long dfs_state_entered;
+ unsigned int dfs_cac_ms;
+};
+
+/**
+ * enum ieee80211_rate_flags - rate flags
+ *
+ * Hardware/specification flags for rates. These are structured
+ * in a way that allows using the same bitrate structure for
+ * different bands/PHY modes.
+ *
+ * @IEEE80211_RATE_SHORT_PREAMBLE: Hardware can send with short
+ * preamble on this bitrate; only relevant in 2.4GHz band and
+ * with CCK rates.
+ * @IEEE80211_RATE_MANDATORY_A: This bitrate is a mandatory rate
+ * when used with 802.11a (on the 5 GHz band); filled by the
+ * core code when registering the wiphy.
+ * @IEEE80211_RATE_MANDATORY_B: This bitrate is a mandatory rate
+ * when used with 802.11b (on the 2.4 GHz band); filled by the
+ * core code when registering the wiphy.
+ * @IEEE80211_RATE_MANDATORY_G: This bitrate is a mandatory rate
+ * when used with 802.11g (on the 2.4 GHz band); filled by the
+ * core code when registering the wiphy.
+ * @IEEE80211_RATE_ERP_G: This is an ERP rate in 802.11g mode.
+ * @IEEE80211_RATE_SUPPORTS_5MHZ: Rate can be used in 5 MHz mode
+ * @IEEE80211_RATE_SUPPORTS_10MHZ: Rate can be used in 10 MHz mode
+ */
+enum ieee80211_rate_flags {
+ IEEE80211_RATE_SHORT_PREAMBLE = 1<<0,
+ IEEE80211_RATE_MANDATORY_A = 1<<1,
+ IEEE80211_RATE_MANDATORY_B = 1<<2,
+ IEEE80211_RATE_MANDATORY_G = 1<<3,
+ IEEE80211_RATE_ERP_G = 1<<4,
+ IEEE80211_RATE_SUPPORTS_5MHZ = 1<<5,
+ IEEE80211_RATE_SUPPORTS_10MHZ = 1<<6,
+};
+
+/**
+ * enum ieee80211_bss_type - BSS type filter
+ *
+ * @IEEE80211_BSS_TYPE_ESS: Infrastructure BSS
+ * @IEEE80211_BSS_TYPE_PBSS: Personal BSS
+ * @IEEE80211_BSS_TYPE_IBSS: Independent BSS
+ * @IEEE80211_BSS_TYPE_MBSS: Mesh BSS
+ * @IEEE80211_BSS_TYPE_ANY: Wildcard value for matching any BSS type
+ */
+enum ieee80211_bss_type {
+ IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_BSS_TYPE_PBSS,
+ IEEE80211_BSS_TYPE_IBSS,
+ IEEE80211_BSS_TYPE_MBSS,
+ IEEE80211_BSS_TYPE_ANY
+};
+
+/**
+ * enum ieee80211_privacy - BSS privacy filter
+ *
+ * @IEEE80211_PRIVACY_ON: privacy bit set
+ * @IEEE80211_PRIVACY_OFF: privacy bit clear
+ * @IEEE80211_PRIVACY_ANY: Wildcard value for matching any privacy setting
+ */
+enum ieee80211_privacy {
+ IEEE80211_PRIVACY_ON,
+ IEEE80211_PRIVACY_OFF,
+ IEEE80211_PRIVACY_ANY
+};
+
+#define IEEE80211_PRIVACY(x) \
+ ((x) ? IEEE80211_PRIVACY_ON : IEEE80211_PRIVACY_OFF)
+
+/**
+ * struct ieee80211_rate - bitrate definition
+ *
+ * This structure describes a bitrate that an 802.11 PHY can
+ * operate with. The two values @hw_value and @hw_value_short
+ * are only for driver use when pointers to this structure are
+ * passed around.
+ *
+ * @flags: rate-specific flags
+ * @bitrate: bitrate in units of 100 Kbps
+ * @hw_value: driver/hardware value for this rate
+ * @hw_value_short: driver/hardware value for this rate when
+ * short preamble is used
+ */
+struct ieee80211_rate {
+ u32 flags;
+ u16 bitrate;
+ u16 hw_value, hw_value_short;
+};
+
+/**
+ * struct ieee80211_he_obss_pd - AP settings for spatial reuse
+ *
+ * @enable: is the feature enabled.
+ * @sr_ctrl: The SR Control field of SRP element.
+ * @non_srg_max_offset: non-SRG maximum tx power offset
+ * @min_offset: minimal tx power offset an associated station shall use
+ * @max_offset: maximum tx power offset an associated station shall use
+ * @bss_color_bitmap: bitmap that indicates the BSS color values used by
+ * members of the SRG
+ * @partial_bssid_bitmap: bitmap that indicates the partial BSSID values
+ * used by members of the SRG
+ */
+struct ieee80211_he_obss_pd {
+ bool enable;
+ u8 sr_ctrl;
+ u8 non_srg_max_offset;
+ u8 min_offset;
+ u8 max_offset;
+ u8 bss_color_bitmap[8];
+ u8 partial_bssid_bitmap[8];
+};
+
+/**
+ * struct cfg80211_he_bss_color - AP settings for BSS coloring
+ *
+ * @color: the current color.
+ * @enabled: HE BSS color is used
+ * @partial: define the AID equation.
+ */
+struct cfg80211_he_bss_color {
+ u8 color;
+ bool enabled;
+ bool partial;
+};
+
+/**
+ * struct ieee80211_sta_ht_cap - STA's HT capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11n HT capabilities for an STA.
+ *
+ * @ht_supported: is HT supported by the STA
+ * @cap: HT capabilities map as described in 802.11n spec
+ * @ampdu_factor: Maximum A-MPDU length factor
+ * @ampdu_density: Minimum A-MPDU spacing
+ * @mcs: Supported MCS rates
+ */
+struct ieee80211_sta_ht_cap {
+ u16 cap; /* use IEEE80211_HT_CAP_ */
+ bool ht_supported;
+ u8 ampdu_factor;
+ u8 ampdu_density;
+ struct ieee80211_mcs_info mcs;
+};
+
+/**
+ * struct ieee80211_sta_vht_cap - STA's VHT capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ac VHT capabilities for an STA.
+ *
+ * @vht_supported: is VHT supported by the STA
+ * @cap: VHT capabilities map as described in 802.11ac spec
+ * @vht_mcs: Supported VHT MCS rates
+ */
+struct ieee80211_sta_vht_cap {
+ bool vht_supported;
+ u32 cap; /* use IEEE80211_VHT_CAP_ */
+ struct ieee80211_vht_mcs_info vht_mcs;
+};
+
+#define IEEE80211_HE_PPE_THRES_MAX_LEN 25
+
+/**
+ * struct ieee80211_sta_he_cap - STA's HE capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ax HE capabilities for a STA.
+ *
+ * @has_he: true iff HE data is valid.
+ * @he_cap_elem: Fixed portion of the HE capabilities element.
+ * @he_mcs_nss_supp: The supported NSS/MCS combinations.
+ * @ppe_thres: Holds the PPE Thresholds data.
+ */
+struct ieee80211_sta_he_cap {
+ bool has_he;
+ struct ieee80211_he_cap_elem he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp;
+ u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN];
+};
+
+/**
+ * struct ieee80211_eht_mcs_nss_supp - EHT max supported NSS per MCS
+ *
+ * See P802.11be_D1.3 Table 9-401k - "Subfields of the Supported EHT-MCS
+ * and NSS Set field"
+ *
+ * @only_20mhz: MCS/NSS support for 20 MHz-only STA.
+ * @bw: MCS/NSS support for 80, 160 and 320 MHz
+ * @bw._80: MCS/NSS support for BW <= 80 MHz
+ * @bw._160: MCS/NSS support for BW = 160 MHz
+ * @bw._320: MCS/NSS support for BW = 320 MHz
+ */
+struct ieee80211_eht_mcs_nss_supp {
+ union {
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only only_20mhz;
+ struct {
+ struct ieee80211_eht_mcs_nss_supp_bw _80;
+ struct ieee80211_eht_mcs_nss_supp_bw _160;
+ struct ieee80211_eht_mcs_nss_supp_bw _320;
+ } __packed bw;
+ } __packed;
+} __packed;
+
+#define IEEE80211_EHT_PPE_THRES_MAX_LEN 32
+
+/**
+ * struct ieee80211_sta_eht_cap - STA's EHT capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11be EHT capabilities for a STA.
+ *
+ * @has_eht: true iff EHT data is valid.
+ * @eht_cap_elem: Fixed portion of the eht capabilities element.
+ * @eht_mcs_nss_supp: The supported NSS/MCS combinations.
+ * @eht_ppe_thres: Holds the PPE Thresholds data.
+ */
+struct ieee80211_sta_eht_cap {
+ bool has_eht;
+ struct ieee80211_eht_cap_elem_fixed eht_cap_elem;
+ struct ieee80211_eht_mcs_nss_supp eht_mcs_nss_supp;
+ u8 eht_ppe_thres[IEEE80211_EHT_PPE_THRES_MAX_LEN];
+};
+
+/**
+ * struct ieee80211_sband_iftype_data - sband data per interface type
+ *
+ * This structure encapsulates sband data that is relevant for the
+ * interface types defined in @types_mask. Each type in the
+ * @types_mask must be unique across all instances of iftype_data.
+ *
+ * @types_mask: interface types mask
+ * @he_cap: holds the HE capabilities
+ * @he_6ghz_capa: HE 6 GHz capabilities, must be filled in for a
+ * 6 GHz band channel (and 0 may be valid value).
+ * @eht_cap: STA's EHT capabilities
+ * @vendor_elems: vendor element(s) to advertise
+ * @vendor_elems.data: vendor element(s) data
+ * @vendor_elems.len: vendor element(s) length
+ */
+struct ieee80211_sband_iftype_data {
+ u16 types_mask;
+ struct ieee80211_sta_he_cap he_cap;
+ struct ieee80211_he_6ghz_capa he_6ghz_capa;
+ struct ieee80211_sta_eht_cap eht_cap;
+ struct {
+ const u8 *data;
+ unsigned int len;
+ } vendor_elems;
+};
+
+/**
+ * enum ieee80211_edmg_bw_config - allowed channel bandwidth configurations
+ *
+ * @IEEE80211_EDMG_BW_CONFIG_4: 2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_5: 2.16GHz and 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_6: 2.16GHz, 4.32GHz and 6.48GHz
+ * @IEEE80211_EDMG_BW_CONFIG_7: 2.16GHz, 4.32GHz, 6.48GHz and 8.64GHz
+ * @IEEE80211_EDMG_BW_CONFIG_8: 2.16GHz and 2.16GHz + 2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_9: 2.16GHz, 4.32GHz and 2.16GHz + 2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_10: 2.16GHz, 4.32GHz, 6.48GHz and 2.16GHz+2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_11: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz and
+ * 2.16GHz+2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_12: 2.16GHz, 2.16GHz + 2.16GHz and
+ * 4.32GHz + 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_13: 2.16GHz, 4.32GHz, 2.16GHz + 2.16GHz and
+ * 4.32GHz + 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_14: 2.16GHz, 4.32GHz, 6.48GHz, 2.16GHz + 2.16GHz
+ * and 4.32GHz + 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_15: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz,
+ * 2.16GHz + 2.16GHz and 4.32GHz + 4.32GHz
+ */
+enum ieee80211_edmg_bw_config {
+ IEEE80211_EDMG_BW_CONFIG_4 = 4,
+ IEEE80211_EDMG_BW_CONFIG_5 = 5,
+ IEEE80211_EDMG_BW_CONFIG_6 = 6,
+ IEEE80211_EDMG_BW_CONFIG_7 = 7,
+ IEEE80211_EDMG_BW_CONFIG_8 = 8,
+ IEEE80211_EDMG_BW_CONFIG_9 = 9,
+ IEEE80211_EDMG_BW_CONFIG_10 = 10,
+ IEEE80211_EDMG_BW_CONFIG_11 = 11,
+ IEEE80211_EDMG_BW_CONFIG_12 = 12,
+ IEEE80211_EDMG_BW_CONFIG_13 = 13,
+ IEEE80211_EDMG_BW_CONFIG_14 = 14,
+ IEEE80211_EDMG_BW_CONFIG_15 = 15,
+};
+
+/**
+ * struct ieee80211_edmg - EDMG configuration
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ay EDMG configuration
+ *
+ * @channels: bitmap that indicates the 2.16 GHz channel(s)
+ * that are allowed to be used for transmissions.
+ * Bit 0 indicates channel 1, bit 1 indicates channel 2, etc.
+ * Set to 0 indicate EDMG not supported.
+ * @bw_config: Channel BW Configuration subfield encodes
+ * the allowed channel bandwidth configurations
+ */
+struct ieee80211_edmg {
+ u8 channels;
+ enum ieee80211_edmg_bw_config bw_config;
+};
+
+/**
+ * struct ieee80211_sta_s1g_cap - STA's S1G capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ah S1G capabilities for a STA.
+ *
+ * @s1g: is STA an S1G STA
+ * @cap: S1G capabilities information
+ * @nss_mcs: Supported NSS MCS set
+ */
+struct ieee80211_sta_s1g_cap {
+ bool s1g;
+ u8 cap[10]; /* use S1G_CAPAB_ */
+ u8 nss_mcs[5];
+};
+
+/**
+ * struct ieee80211_supported_band - frequency band definition
+ *
+ * This structure describes a frequency band a wiphy
+ * is able to operate in.
+ *
+ * @channels: Array of channels the hardware can operate with
+ * in this band.
+ * @band: the band this structure represents
+ * @n_channels: Number of channels in @channels
+ * @bitrates: Array of bitrates the hardware can operate with
+ * in this band. Must be sorted to give a valid "supported
+ * rates" IE, i.e. CCK rates first, then OFDM.
+ * @n_bitrates: Number of bitrates in @bitrates
+ * @ht_cap: HT capabilities in this band
+ * @vht_cap: VHT capabilities in this band
+ * @s1g_cap: S1G capabilities in this band
+ * @edmg_cap: EDMG capabilities in this band
+ * @s1g_cap: S1G capabilities in this band (S1B band only, of course)
+ * @n_iftype_data: number of iftype data entries
+ * @iftype_data: interface type data entries. Note that the bits in
+ * @types_mask inside this structure cannot overlap (i.e. only
+ * one occurrence of each type is allowed across all instances of
+ * iftype_data).
+ */
+struct ieee80211_supported_band {
+ struct ieee80211_channel *channels;
+ struct ieee80211_rate *bitrates;
+ enum nl80211_band band;
+ int n_channels;
+ int n_bitrates;
+ struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
+ struct ieee80211_sta_s1g_cap s1g_cap;
+ struct ieee80211_edmg edmg_cap;
+ u16 n_iftype_data;
+ const struct ieee80211_sband_iftype_data *iftype_data;
+};
+
+/**
+ * ieee80211_get_sband_iftype_data - return sband data for a given iftype
+ * @sband: the sband to search for the STA on
+ * @iftype: enum nl80211_iftype
+ *
+ * Return: pointer to struct ieee80211_sband_iftype_data, or NULL is none found
+ */
+static inline const struct ieee80211_sband_iftype_data *
+ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
+ u8 iftype)
+{
+ int i;
+
+ if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
+ return NULL;
+
+ if (iftype == NL80211_IFTYPE_AP_VLAN)
+ iftype = NL80211_IFTYPE_AP;
+
+ for (i = 0; i < sband->n_iftype_data; i++) {
+ const struct ieee80211_sband_iftype_data *data =
+ &sband->iftype_data[i];
+
+ if (data->types_mask & BIT(iftype))
+ return data;
+ }
+
+ return NULL;
+}
+
+/**
+ * ieee80211_get_he_iftype_cap - return HE capabilities for an sband's iftype
+ * @sband: the sband to search for the iftype on
+ * @iftype: enum nl80211_iftype
+ *
+ * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found
+ */
+static inline const struct ieee80211_sta_he_cap *
+ieee80211_get_he_iftype_cap(const struct ieee80211_supported_band *sband,
+ u8 iftype)
+{
+ const struct ieee80211_sband_iftype_data *data =
+ ieee80211_get_sband_iftype_data(sband, iftype);
+
+ if (data && data->he_cap.has_he)
+ return &data->he_cap;
+
+ return NULL;
+}
+
+/**
+ * ieee80211_get_he_6ghz_capa - return HE 6 GHz capabilities
+ * @sband: the sband to search for the STA on
+ * @iftype: the iftype to search for
+ *
+ * Return: the 6GHz capabilities
+ */
+static inline __le16
+ieee80211_get_he_6ghz_capa(const struct ieee80211_supported_band *sband,
+ enum nl80211_iftype iftype)
+{
+ const struct ieee80211_sband_iftype_data *data =
+ ieee80211_get_sband_iftype_data(sband, iftype);
+
+ if (WARN_ON(!data || !data->he_cap.has_he))
+ return 0;
+
+ return data->he_6ghz_capa.capa;
+}
+
+/**
+ * ieee80211_get_eht_iftype_cap - return ETH capabilities for an sband's iftype
+ * @sband: the sband to search for the iftype on
+ * @iftype: enum nl80211_iftype
+ *
+ * Return: pointer to the struct ieee80211_sta_eht_cap, or NULL is none found
+ */
+static inline const struct ieee80211_sta_eht_cap *
+ieee80211_get_eht_iftype_cap(const struct ieee80211_supported_band *sband,
+ enum nl80211_iftype iftype)
+{
+ const struct ieee80211_sband_iftype_data *data =
+ ieee80211_get_sband_iftype_data(sband, iftype);
+
+ if (data && data->eht_cap.has_eht)
+ return &data->eht_cap;
+
+ return NULL;
+}
+
+/**
+ * wiphy_read_of_freq_limits - read frequency limits from device tree
+ *
+ * @wiphy: the wireless device to get extra limits for
+ *
+ * Some devices may have extra limitations specified in DT. This may be useful
+ * for chipsets that normally support more bands but are limited due to board
+ * design (e.g. by antennas or external power amplifier).
+ *
+ * This function reads info from DT and uses it to *modify* channels (disable
+ * unavailable ones). It's usually a *bad* idea to use it in drivers with
+ * shared channel data as DT limitations are device specific. You should make
+ * sure to call it only if channels in wiphy are copied and can be modified
+ * without affecting other devices.
+ *
+ * As this function access device node it has to be called after set_wiphy_dev.
+ * It also modifies channels so they have to be set first.
+ * If using this helper, call it before wiphy_register().
+ */
+#ifdef CONFIG_OF
+void wiphy_read_of_freq_limits(struct wiphy *wiphy);
+#else /* CONFIG_OF */
+static inline void wiphy_read_of_freq_limits(struct wiphy *wiphy)
+{
+}
+#endif /* !CONFIG_OF */
+
+
+/*
+ * Wireless hardware/device configuration structures and methods
+ */
+
+/**
+ * DOC: Actions and configuration
+ *
+ * Each wireless device and each virtual interface offer a set of configuration
+ * operations and other actions that are invoked by userspace. Each of these
+ * actions is described in the operations structure, and the parameters these
+ * operations use are described separately.
+ *
+ * Additionally, some operations are asynchronous and expect to get status
+ * information via some functions that drivers need to call.
+ *
+ * Scanning and BSS list handling with its associated functionality is described
+ * in a separate chapter.
+ */
+
+#define VHT_MUMIMO_GROUPS_DATA_LEN (WLAN_MEMBERSHIP_LEN +\
+ WLAN_USER_POSITION_LEN)
+
+/**
+ * struct vif_params - describes virtual interface parameters
+ * @flags: monitor interface flags, unchanged if 0, otherwise
+ * %MONITOR_FLAG_CHANGED will be set
+ * @use_4addr: use 4-address frames
+ * @macaddr: address to use for this virtual interface.
+ * If this parameter is set to zero address the driver may
+ * determine the address as needed.
+ * This feature is only fully supported by drivers that enable the
+ * %NL80211_FEATURE_MAC_ON_CREATE flag. Others may support creating
+ ** only p2p devices with specified MAC.
+ * @vht_mumimo_groups: MU-MIMO groupID, used for monitoring MU-MIMO packets
+ * belonging to that MU-MIMO groupID; %NULL if not changed
+ * @vht_mumimo_follow_addr: MU-MIMO follow address, used for monitoring
+ * MU-MIMO packets going to the specified station; %NULL if not changed
+ */
+struct vif_params {
+ u32 flags;
+ int use_4addr;
+ u8 macaddr[ETH_ALEN];
+ const u8 *vht_mumimo_groups;
+ const u8 *vht_mumimo_follow_addr;
+};
+
+/**
+ * struct key_params - key information
+ *
+ * Information about a key
+ *
+ * @key: key material
+ * @key_len: length of key material
+ * @cipher: cipher suite selector
+ * @seq: sequence counter (IV/PN) for TKIP and CCMP keys, only used
+ * with the get_key() callback, must be in little endian,
+ * length given by @seq_len.
+ * @seq_len: length of @seq.
+ * @vlan_id: vlan_id for VLAN group key (if nonzero)
+ * @mode: key install mode (RX_TX, NO_TX or SET_TX)
+ */
+struct key_params {
+ const u8 *key;
+ const u8 *seq;
+ int key_len;
+ int seq_len;
+ u16 vlan_id;
+ u32 cipher;
+ enum nl80211_key_mode mode;
+};
+
+/**
+ * struct cfg80211_chan_def - channel definition
+ * @chan: the (control) channel
+ * @width: channel width
+ * @center_freq1: center frequency of first segment
+ * @center_freq2: center frequency of second segment
+ * (only with 80+80 MHz)
+ * @edmg: define the EDMG channels configuration.
+ * If edmg is requested (i.e. the .channels member is non-zero),
+ * chan will define the primary channel and all other
+ * parameters are ignored.
+ * @freq1_offset: offset from @center_freq1, in KHz
+ */
+struct cfg80211_chan_def {
+ struct ieee80211_channel *chan;
+ enum nl80211_chan_width width;
+ u32 center_freq1;
+ u32 center_freq2;
+ struct ieee80211_edmg edmg;
+ u16 freq1_offset;
+};
+
+/*
+ * cfg80211_bitrate_mask - masks for bitrate control
+ */
+struct cfg80211_bitrate_mask {
+ struct {
+ u32 legacy;
+ u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
+ u16 vht_mcs[NL80211_VHT_NSS_MAX];
+ u16 he_mcs[NL80211_HE_NSS_MAX];
+ enum nl80211_txrate_gi gi;
+ enum nl80211_he_gi he_gi;
+ enum nl80211_he_ltf he_ltf;
+ } control[NUM_NL80211_BANDS];
+};
+
+
+/**
+ * struct cfg80211_tid_cfg - TID specific configuration
+ * @config_override: Flag to notify driver to reset TID configuration
+ * of the peer.
+ * @tids: bitmap of TIDs to modify
+ * @mask: bitmap of attributes indicating which parameter changed,
+ * similar to &nl80211_tid_config_supp.
+ * @noack: noack configuration value for the TID
+ * @retry_long: retry count value
+ * @retry_short: retry count value
+ * @ampdu: Enable/Disable MPDU aggregation
+ * @rtscts: Enable/Disable RTS/CTS
+ * @amsdu: Enable/Disable MSDU aggregation
+ * @txrate_type: Tx bitrate mask type
+ * @txrate_mask: Tx bitrate to be applied for the TID
+ */
+struct cfg80211_tid_cfg {
+ bool config_override;
+ u8 tids;
+ u64 mask;
+ enum nl80211_tid_config noack;
+ u8 retry_long, retry_short;
+ enum nl80211_tid_config ampdu;
+ enum nl80211_tid_config rtscts;
+ enum nl80211_tid_config amsdu;
+ enum nl80211_tx_rate_setting txrate_type;
+ struct cfg80211_bitrate_mask txrate_mask;
+};
+
+/**
+ * struct cfg80211_tid_config - TID configuration
+ * @peer: Station's MAC address
+ * @n_tid_conf: Number of TID specific configurations to be applied
+ * @tid_conf: Configuration change info
+ */
+struct cfg80211_tid_config {
+ const u8 *peer;
+ u32 n_tid_conf;
+ struct cfg80211_tid_cfg tid_conf[];
+};
+
+/**
+ * struct cfg80211_fils_aad - FILS AAD data
+ * @macaddr: STA MAC address
+ * @kek: FILS KEK
+ * @kek_len: FILS KEK length
+ * @snonce: STA Nonce
+ * @anonce: AP Nonce
+ */
+struct cfg80211_fils_aad {
+ const u8 *macaddr;
+ const u8 *kek;
+ u8 kek_len;
+ const u8 *snonce;
+ const u8 *anonce;
+};
+
+/**
+ * cfg80211_get_chandef_type - return old channel type from chandef
+ * @chandef: the channel definition
+ *
+ * Return: The old channel type (NOHT, HT20, HT40+/-) from a given
+ * chandef, which must have a bandwidth allowing this conversion.
+ */
+static inline enum nl80211_channel_type
+cfg80211_get_chandef_type(const struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ return NL80211_CHAN_NO_HT;
+ case NL80211_CHAN_WIDTH_20:
+ return NL80211_CHAN_HT20;
+ case NL80211_CHAN_WIDTH_40:
+ if (chandef->center_freq1 > chandef->chan->center_freq)
+ return NL80211_CHAN_HT40PLUS;
+ return NL80211_CHAN_HT40MINUS;
+ default:
+ WARN_ON(1);
+ return NL80211_CHAN_NO_HT;
+ }
+}
+
+/**
+ * cfg80211_chandef_create - create channel definition using channel type
+ * @chandef: the channel definition struct to fill
+ * @channel: the control channel
+ * @chantype: the channel type
+ *
+ * Given a channel type, create a channel definition.
+ */
+void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type chantype);
+
+/**
+ * cfg80211_chandef_identical - check if two channel definitions are identical
+ * @chandef1: first channel definition
+ * @chandef2: second channel definition
+ *
+ * Return: %true if the channels defined by the channel definitions are
+ * identical, %false otherwise.
+ */
+static inline bool
+cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1,
+ const struct cfg80211_chan_def *chandef2)
+{
+ return (chandef1->chan == chandef2->chan &&
+ chandef1->width == chandef2->width &&
+ chandef1->center_freq1 == chandef2->center_freq1 &&
+ chandef1->freq1_offset == chandef2->freq1_offset &&
+ chandef1->center_freq2 == chandef2->center_freq2);
+}
+
+/**
+ * cfg80211_chandef_is_edmg - check if chandef represents an EDMG channel
+ *
+ * @chandef: the channel definition
+ *
+ * Return: %true if EDMG defined, %false otherwise.
+ */
+static inline bool
+cfg80211_chandef_is_edmg(const struct cfg80211_chan_def *chandef)
+{
+ return chandef->edmg.channels || chandef->edmg.bw_config;
+}
+
+/**
+ * cfg80211_chandef_compatible - check if two channel definitions are compatible
+ * @chandef1: first channel definition
+ * @chandef2: second channel definition
+ *
+ * Return: %NULL if the given channel definitions are incompatible,
+ * chandef1 or chandef2 otherwise.
+ */
+const struct cfg80211_chan_def *
+cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1,
+ const struct cfg80211_chan_def *chandef2);
+
+/**
+ * cfg80211_chandef_valid - check if a channel definition is valid
+ * @chandef: the channel definition to check
+ * Return: %true if the channel definition is valid. %false otherwise.
+ */
+bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef);
+
+/**
+ * cfg80211_chandef_usable - check if secondary channels can be used
+ * @wiphy: the wiphy to validate against
+ * @chandef: the channel definition to check
+ * @prohibited_flags: the regulatory channel flags that must not be set
+ * Return: %true if secondary channels are usable. %false otherwise.
+ */
+bool cfg80211_chandef_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags);
+
+/**
+ * cfg80211_chandef_dfs_required - checks if radar detection is required
+ * @wiphy: the wiphy to validate against
+ * @chandef: the channel definition to check
+ * @iftype: the interface type as specified in &enum nl80211_iftype
+ * Returns:
+ * 1 if radar detection is required, 0 if it is not, < 0 on error
+ */
+int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype);
+
+/**
+ * ieee80211_chanwidth_rate_flags - return rate flags for channel width
+ * @width: the channel width of the channel
+ *
+ * In some channel types, not all rates may be used - for example CCK
+ * rates may not be used in 5/10 MHz channels.
+ *
+ * Returns: rate flags which apply for this channel width
+ */
+static inline enum ieee80211_rate_flags
+ieee80211_chanwidth_rate_flags(enum nl80211_chan_width width)
+{
+ switch (width) {
+ case NL80211_CHAN_WIDTH_5:
+ return IEEE80211_RATE_SUPPORTS_5MHZ;
+ case NL80211_CHAN_WIDTH_10:
+ return IEEE80211_RATE_SUPPORTS_10MHZ;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/**
+ * ieee80211_chandef_rate_flags - returns rate flags for a channel
+ * @chandef: channel definition for the channel
+ *
+ * See ieee80211_chanwidth_rate_flags().
+ *
+ * Returns: rate flags which apply for this channel
+ */
+static inline enum ieee80211_rate_flags
+ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef)
+{
+ return ieee80211_chanwidth_rate_flags(chandef->width);
+}
+
+/**
+ * ieee80211_chandef_max_power - maximum transmission power for the chandef
+ *
+ * In some regulations, the transmit power may depend on the configured channel
+ * bandwidth which may be defined as dBm/MHz. This function returns the actual
+ * max_power for non-standard (20 MHz) channels.
+ *
+ * @chandef: channel definition for the channel
+ *
+ * Returns: maximum allowed transmission power in dBm for the chandef
+ */
+static inline int
+ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ return min(chandef->chan->max_reg_power - 6,
+ chandef->chan->max_power);
+ case NL80211_CHAN_WIDTH_10:
+ return min(chandef->chan->max_reg_power - 3,
+ chandef->chan->max_power);
+ default:
+ break;
+ }
+ return chandef->chan->max_power;
+}
+
+/**
+ * cfg80211_any_usable_channels - check for usable channels
+ * @wiphy: the wiphy to check for
+ * @band_mask: which bands to check on
+ * @prohibited_flags: which channels to not consider usable,
+ * %IEEE80211_CHAN_DISABLED is always taken into account
+ */
+bool cfg80211_any_usable_channels(struct wiphy *wiphy,
+ unsigned long band_mask,
+ u32 prohibited_flags);
+
+/**
+ * enum survey_info_flags - survey information flags
+ *
+ * @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in
+ * @SURVEY_INFO_IN_USE: channel is currently being used
+ * @SURVEY_INFO_TIME: active time (in ms) was filled in
+ * @SURVEY_INFO_TIME_BUSY: busy time was filled in
+ * @SURVEY_INFO_TIME_EXT_BUSY: extension channel busy time was filled in
+ * @SURVEY_INFO_TIME_RX: receive time was filled in
+ * @SURVEY_INFO_TIME_TX: transmit time was filled in
+ * @SURVEY_INFO_TIME_SCAN: scan time was filled in
+ * @SURVEY_INFO_TIME_BSS_RX: local BSS receive time was filled in
+ *
+ * Used by the driver to indicate which info in &struct survey_info
+ * it has filled in during the get_survey().
+ */
+enum survey_info_flags {
+ SURVEY_INFO_NOISE_DBM = BIT(0),
+ SURVEY_INFO_IN_USE = BIT(1),
+ SURVEY_INFO_TIME = BIT(2),
+ SURVEY_INFO_TIME_BUSY = BIT(3),
+ SURVEY_INFO_TIME_EXT_BUSY = BIT(4),
+ SURVEY_INFO_TIME_RX = BIT(5),
+ SURVEY_INFO_TIME_TX = BIT(6),
+ SURVEY_INFO_TIME_SCAN = BIT(7),
+ SURVEY_INFO_TIME_BSS_RX = BIT(8),
+};
+
+/**
+ * struct survey_info - channel survey response
+ *
+ * @channel: the channel this survey record reports, may be %NULL for a single
+ * record to report global statistics
+ * @filled: bitflag of flags from &enum survey_info_flags
+ * @noise: channel noise in dBm. This and all following fields are
+ * optional
+ * @time: amount of time in ms the radio was turn on (on the channel)
+ * @time_busy: amount of time the primary channel was sensed busy
+ * @time_ext_busy: amount of time the extension channel was sensed busy
+ * @time_rx: amount of time the radio spent receiving data
+ * @time_tx: amount of time the radio spent transmitting data
+ * @time_scan: amount of time the radio spent for scanning
+ * @time_bss_rx: amount of time the radio spent receiving data on a local BSS
+ *
+ * Used by dump_survey() to report back per-channel survey information.
+ *
+ * This structure can later be expanded with things like
+ * channel duty cycle etc.
+ */
+struct survey_info {
+ struct ieee80211_channel *channel;
+ u64 time;
+ u64 time_busy;
+ u64 time_ext_busy;
+ u64 time_rx;
+ u64 time_tx;
+ u64 time_scan;
+ u64 time_bss_rx;
+ u32 filled;
+ s8 noise;
+};
+
+#define CFG80211_MAX_WEP_KEYS 4
+#define CFG80211_MAX_NUM_AKM_SUITES 10
+
+/**
+ * struct cfg80211_crypto_settings - Crypto settings
+ * @wpa_versions: indicates which, if any, WPA versions are enabled
+ * (from enum nl80211_wpa_versions)
+ * @cipher_group: group key cipher suite (or 0 if unset)
+ * @n_ciphers_pairwise: number of AP supported unicast ciphers
+ * @ciphers_pairwise: unicast key cipher suites
+ * @n_akm_suites: number of AKM suites
+ * @akm_suites: AKM suites
+ * @control_port: Whether user space controls IEEE 802.1X port, i.e.,
+ * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is
+ * required to assume that the port is unauthorized until authorized by
+ * user space. Otherwise, port is marked authorized by default.
+ * @control_port_ethertype: the control port protocol that should be
+ * allowed through even on unauthorized ports
+ * @control_port_no_encrypt: TRUE to prevent encryption of control port
+ * protocol frames.
+ * @control_port_over_nl80211: TRUE if userspace expects to exchange control
+ * port frames over NL80211 instead of the network interface.
+ * @control_port_no_preauth: disables pre-auth rx over the nl80211 control
+ * port for mac80211
+ * @wep_keys: static WEP keys, if not NULL points to an array of
+ * CFG80211_MAX_WEP_KEYS WEP keys
+ * @wep_tx_key: key index (0..3) of the default TX static WEP key
+ * @psk: PSK (for devices supporting 4-way-handshake offload)
+ * @sae_pwd: password for SAE authentication (for devices supporting SAE
+ * offload)
+ * @sae_pwd_len: length of SAE password (for devices supporting SAE offload)
+ * @sae_pwe: The mechanisms allowed for SAE PWE derivation:
+ *
+ * NL80211_SAE_PWE_UNSPECIFIED
+ * Not-specified, used to indicate userspace did not specify any
+ * preference. The driver should follow its internal policy in
+ * such a scenario.
+ *
+ * NL80211_SAE_PWE_HUNT_AND_PECK
+ * Allow hunting-and-pecking loop only
+ *
+ * NL80211_SAE_PWE_HASH_TO_ELEMENT
+ * Allow hash-to-element only
+ *
+ * NL80211_SAE_PWE_BOTH
+ * Allow either hunting-and-pecking loop or hash-to-element
+ */
+struct cfg80211_crypto_settings {
+ u32 wpa_versions;
+ u32 cipher_group;
+ int n_ciphers_pairwise;
+ u32 ciphers_pairwise[NL80211_MAX_NR_CIPHER_SUITES];
+ int n_akm_suites;
+ u32 akm_suites[CFG80211_MAX_NUM_AKM_SUITES];
+ bool control_port;
+ __be16 control_port_ethertype;
+ bool control_port_no_encrypt;
+ bool control_port_over_nl80211;
+ bool control_port_no_preauth;
+ struct key_params *wep_keys;
+ int wep_tx_key;
+ const u8 *psk;
+ const u8 *sae_pwd;
+ u8 sae_pwd_len;
+ enum nl80211_sae_pwe_mechanism sae_pwe;
+};
+
+/**
+ * struct cfg80211_mbssid_config - AP settings for multi bssid
+ *
+ * @tx_wdev: pointer to the transmitted interface in the MBSSID set
+ * @index: index of this AP in the multi bssid group.
+ * @ema: set to true if the beacons should be sent out in EMA mode.
+ */
+struct cfg80211_mbssid_config {
+ struct wireless_dev *tx_wdev;
+ u8 index;
+ bool ema;
+};
+
+/**
+ * struct cfg80211_mbssid_elems - Multiple BSSID elements
+ *
+ * @cnt: Number of elements in array %elems.
+ *
+ * @elem: Array of multiple BSSID element(s) to be added into Beacon frames.
+ * @elem.data: Data for multiple BSSID elements.
+ * @elem.len: Length of data.
+ */
+struct cfg80211_mbssid_elems {
+ u8 cnt;
+ struct {
+ const u8 *data;
+ size_t len;
+ } elem[];
+};
+
+/**
+ * struct cfg80211_beacon_data - beacon data
+ * @link_id: the link ID for the AP MLD link sending this beacon
+ * @head: head portion of beacon (before TIM IE)
+ * or %NULL if not changed
+ * @tail: tail portion of beacon (after TIM IE)
+ * or %NULL if not changed
+ * @head_len: length of @head
+ * @tail_len: length of @tail
+ * @beacon_ies: extra information element(s) to add into Beacon frames or %NULL
+ * @beacon_ies_len: length of beacon_ies in octets
+ * @proberesp_ies: extra information element(s) to add into Probe Response
+ * frames or %NULL
+ * @proberesp_ies_len: length of proberesp_ies in octets
+ * @assocresp_ies: extra information element(s) to add into (Re)Association
+ * Response frames or %NULL
+ * @assocresp_ies_len: length of assocresp_ies in octets
+ * @probe_resp_len: length of probe response template (@probe_resp)
+ * @probe_resp: probe response template (AP mode only)
+ * @mbssid_ies: multiple BSSID elements
+ * @ftm_responder: enable FTM responder functionality; -1 for no change
+ * (which also implies no change in LCI/civic location data)
+ * @lci: Measurement Report element content, starting with Measurement Token
+ * (measurement type 8)
+ * @civicloc: Measurement Report element content, starting with Measurement
+ * Token (measurement type 11)
+ * @lci_len: LCI data length
+ * @civicloc_len: Civic location data length
+ * @he_bss_color: BSS Color settings
+ * @he_bss_color_valid: indicates whether bss color
+ * attribute is present in beacon data or not.
+ */
+struct cfg80211_beacon_data {
+ unsigned int link_id;
+
+ const u8 *head, *tail;
+ const u8 *beacon_ies;
+ const u8 *proberesp_ies;
+ const u8 *assocresp_ies;
+ const u8 *probe_resp;
+ const u8 *lci;
+ const u8 *civicloc;
+ struct cfg80211_mbssid_elems *mbssid_ies;
+ s8 ftm_responder;
+
+ size_t head_len, tail_len;
+ size_t beacon_ies_len;
+ size_t proberesp_ies_len;
+ size_t assocresp_ies_len;
+ size_t probe_resp_len;
+ size_t lci_len;
+ size_t civicloc_len;
+ struct cfg80211_he_bss_color he_bss_color;
+ bool he_bss_color_valid;
+};
+
+struct mac_address {
+ u8 addr[ETH_ALEN];
+};
+
+/**
+ * struct cfg80211_acl_data - Access control list data
+ *
+ * @acl_policy: ACL policy to be applied on the station's
+ * entry specified by mac_addr
+ * @n_acl_entries: Number of MAC address entries passed
+ * @mac_addrs: List of MAC addresses of stations to be used for ACL
+ */
+struct cfg80211_acl_data {
+ enum nl80211_acl_policy acl_policy;
+ int n_acl_entries;
+
+ /* Keep it last */
+ struct mac_address mac_addrs[];
+};
+
+/**
+ * struct cfg80211_fils_discovery - FILS discovery parameters from
+ * IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
+ *
+ * @min_interval: Minimum packet interval in TUs (0 - 10000)
+ * @max_interval: Maximum packet interval in TUs (0 - 10000)
+ * @tmpl_len: Template length
+ * @tmpl: Template data for FILS discovery frame including the action
+ * frame headers.
+ */
+struct cfg80211_fils_discovery {
+ u32 min_interval;
+ u32 max_interval;
+ size_t tmpl_len;
+ const u8 *tmpl;
+};
+
+/**
+ * struct cfg80211_unsol_bcast_probe_resp - Unsolicited broadcast probe
+ * response parameters in 6GHz.
+ *
+ * @interval: Packet interval in TUs. Maximum allowed is 20 TU, as mentioned
+ * in IEEE P802.11ax/D6.0 26.17.2.3.2 - AP behavior for fast passive
+ * scanning
+ * @tmpl_len: Template length
+ * @tmpl: Template data for probe response
+ */
+struct cfg80211_unsol_bcast_probe_resp {
+ u32 interval;
+ size_t tmpl_len;
+ const u8 *tmpl;
+};
+
+/**
+ * struct cfg80211_ap_settings - AP configuration
+ *
+ * Used to configure an AP interface.
+ *
+ * @chandef: defines the channel to use
+ * @beacon: beacon data
+ * @beacon_interval: beacon interval
+ * @dtim_period: DTIM period
+ * @ssid: SSID to be used in the BSS (note: may be %NULL if not provided from
+ * user space)
+ * @ssid_len: length of @ssid
+ * @hidden_ssid: whether to hide the SSID in Beacon/Probe Response frames
+ * @crypto: crypto settings
+ * @privacy: the BSS uses privacy
+ * @auth_type: Authentication type (algorithm)
+ * @smps_mode: SMPS mode
+ * @inactivity_timeout: time in seconds to determine station's inactivity.
+ * @p2p_ctwindow: P2P CT Window
+ * @p2p_opp_ps: P2P opportunistic PS
+ * @acl: ACL configuration used by the drivers which has support for
+ * MAC address based access control
+ * @pbss: If set, start as a PCP instead of AP. Relevant for DMG
+ * networks.
+ * @beacon_rate: bitrate to be used for beacons
+ * @ht_cap: HT capabilities (or %NULL if HT isn't enabled)
+ * @vht_cap: VHT capabilities (or %NULL if VHT isn't enabled)
+ * @he_cap: HE capabilities (or %NULL if HE isn't enabled)
+ * @eht_cap: EHT capabilities (or %NULL if EHT isn't enabled)
+ * @eht_oper: EHT operation IE (or %NULL if EHT isn't enabled)
+ * @ht_required: stations must support HT
+ * @vht_required: stations must support VHT
+ * @twt_responder: Enable Target Wait Time
+ * @he_required: stations must support HE
+ * @sae_h2e_required: stations must support direct H2E technique in SAE
+ * @flags: flags, as defined in enum cfg80211_ap_settings_flags
+ * @he_obss_pd: OBSS Packet Detection settings
+ * @he_oper: HE operation IE (or %NULL if HE isn't enabled)
+ * @fils_discovery: FILS discovery transmission parameters
+ * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
+ * @mbssid_config: AP settings for multiple bssid
+ */
+struct cfg80211_ap_settings {
+ struct cfg80211_chan_def chandef;
+
+ struct cfg80211_beacon_data beacon;
+
+ int beacon_interval, dtim_period;
+ const u8 *ssid;
+ size_t ssid_len;
+ enum nl80211_hidden_ssid hidden_ssid;
+ struct cfg80211_crypto_settings crypto;
+ bool privacy;
+ enum nl80211_auth_type auth_type;
+ enum nl80211_smps_mode smps_mode;
+ int inactivity_timeout;
+ u8 p2p_ctwindow;
+ bool p2p_opp_ps;
+ const struct cfg80211_acl_data *acl;
+ bool pbss;
+ struct cfg80211_bitrate_mask beacon_rate;
+
+ const struct ieee80211_ht_cap *ht_cap;
+ const struct ieee80211_vht_cap *vht_cap;
+ const struct ieee80211_he_cap_elem *he_cap;
+ const struct ieee80211_he_operation *he_oper;
+ const struct ieee80211_eht_cap_elem *eht_cap;
+ const struct ieee80211_eht_operation *eht_oper;
+ bool ht_required, vht_required, he_required, sae_h2e_required;
+ bool twt_responder;
+ u32 flags;
+ struct ieee80211_he_obss_pd he_obss_pd;
+ struct cfg80211_fils_discovery fils_discovery;
+ struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
+ struct cfg80211_mbssid_config mbssid_config;
+};
+
+/**
+ * struct cfg80211_csa_settings - channel switch settings
+ *
+ * Used for channel switch
+ *
+ * @chandef: defines the channel to use after the switch
+ * @beacon_csa: beacon data while performing the switch
+ * @counter_offsets_beacon: offsets of the counters within the beacon (tail)
+ * @counter_offsets_presp: offsets of the counters within the probe response
+ * @n_counter_offsets_beacon: number of csa counters the beacon (tail)
+ * @n_counter_offsets_presp: number of csa counters in the probe response
+ * @beacon_after: beacon data to be used on the new channel
+ * @radar_required: whether radar detection is required on the new channel
+ * @block_tx: whether transmissions should be blocked while changing
+ * @count: number of beacons until switch
+ */
+struct cfg80211_csa_settings {
+ struct cfg80211_chan_def chandef;
+ struct cfg80211_beacon_data beacon_csa;
+ const u16 *counter_offsets_beacon;
+ const u16 *counter_offsets_presp;
+ unsigned int n_counter_offsets_beacon;
+ unsigned int n_counter_offsets_presp;
+ struct cfg80211_beacon_data beacon_after;
+ bool radar_required;
+ bool block_tx;
+ u8 count;
+};
+
+/**
+ * struct cfg80211_color_change_settings - color change settings
+ *
+ * Used for bss color change
+ *
+ * @beacon_color_change: beacon data while performing the color countdown
+ * @counter_offset_beacon: offsets of the counters within the beacon (tail)
+ * @counter_offset_presp: offsets of the counters within the probe response
+ * @beacon_next: beacon data to be used after the color change
+ * @count: number of beacons until the color change
+ * @color: the color used after the change
+ */
+struct cfg80211_color_change_settings {
+ struct cfg80211_beacon_data beacon_color_change;
+ u16 counter_offset_beacon;
+ u16 counter_offset_presp;
+ struct cfg80211_beacon_data beacon_next;
+ u8 count;
+ u8 color;
+};
+
+/**
+ * struct iface_combination_params - input parameters for interface combinations
+ *
+ * Used to pass interface combination parameters
+ *
+ * @num_different_channels: the number of different channels we want
+ * to use for verification
+ * @radar_detect: a bitmap where each bit corresponds to a channel
+ * width where radar detection is needed, as in the definition of
+ * &struct ieee80211_iface_combination.@radar_detect_widths
+ * @iftype_num: array with the number of interfaces of each interface
+ * type. The index is the interface type as specified in &enum
+ * nl80211_iftype.
+ * @new_beacon_int: set this to the beacon interval of a new interface
+ * that's not operating yet, if such is to be checked as part of
+ * the verification
+ */
+struct iface_combination_params {
+ int num_different_channels;
+ u8 radar_detect;
+ int iftype_num[NUM_NL80211_IFTYPES];
+ u32 new_beacon_int;
+};
+
+/**
+ * enum station_parameters_apply_mask - station parameter values to apply
+ * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
+ * @STATION_PARAM_APPLY_CAPABILITY: apply new capability
+ * @STATION_PARAM_APPLY_PLINK_STATE: apply new plink state
+ * @STATION_PARAM_APPLY_STA_TXPOWER: apply tx power for STA
+ *
+ * Not all station parameters have in-band "no change" signalling,
+ * for those that don't these flags will are used.
+ */
+enum station_parameters_apply_mask {
+ STATION_PARAM_APPLY_UAPSD = BIT(0),
+ STATION_PARAM_APPLY_CAPABILITY = BIT(1),
+ STATION_PARAM_APPLY_PLINK_STATE = BIT(2),
+};
+
+/**
+ * struct sta_txpwr - station txpower configuration
+ *
+ * Used to configure txpower for station.
+ *
+ * @power: tx power (in dBm) to be used for sending data traffic. If tx power
+ * is not provided, the default per-interface tx power setting will be
+ * overriding. Driver should be picking up the lowest tx power, either tx
+ * power per-interface or per-station.
+ * @type: In particular if TPC %type is NL80211_TX_POWER_LIMITED then tx power
+ * will be less than or equal to specified from userspace, whereas if TPC
+ * %type is NL80211_TX_POWER_AUTOMATIC then it indicates default tx power.
+ * NL80211_TX_POWER_FIXED is not a valid configuration option for
+ * per peer TPC.
+ */
+struct sta_txpwr {
+ s16 power;
+ enum nl80211_tx_power_setting type;
+};
+
+/**
+ * struct link_station_parameters - link station parameters
+ *
+ * Used to change and create a new link station.
+ *
+ * @mld_mac: MAC address of the station
+ * @link_id: the link id (-1 for non-MLD station)
+ * @link_mac: MAC address of the link
+ * @supported_rates: supported rates in IEEE 802.11 format
+ * (or NULL for no change)
+ * @supported_rates_len: number of supported rates
+ * @ht_capa: HT capabilities of station
+ * @vht_capa: VHT capabilities of station
+ * @opmode_notif: operating mode field from Operating Mode Notification
+ * @opmode_notif_used: information if operating mode field is used
+ * @he_capa: HE capabilities of station
+ * @he_capa_len: the length of the HE capabilities
+ * @txpwr: transmit power for an associated station
+ * @txpwr_set: txpwr field is set
+ * @he_6ghz_capa: HE 6 GHz Band capabilities of station
+ * @eht_capa: EHT capabilities of station
+ * @eht_capa_len: the length of the EHT capabilities
+ */
+struct link_station_parameters {
+ const u8 *mld_mac;
+ int link_id;
+ const u8 *link_mac;
+ const u8 *supported_rates;
+ u8 supported_rates_len;
+ const struct ieee80211_ht_cap *ht_capa;
+ const struct ieee80211_vht_cap *vht_capa;
+ u8 opmode_notif;
+ bool opmode_notif_used;
+ const struct ieee80211_he_cap_elem *he_capa;
+ u8 he_capa_len;
+ struct sta_txpwr txpwr;
+ bool txpwr_set;
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
+ const struct ieee80211_eht_cap_elem *eht_capa;
+ u8 eht_capa_len;
+};
+
+/**
+ * struct link_station_del_parameters - link station deletion parameters
+ *
+ * Used to delete a link station entry (or all stations).
+ *
+ * @mld_mac: MAC address of the station
+ * @link_id: the link id
+ */
+struct link_station_del_parameters {
+ const u8 *mld_mac;
+ u32 link_id;
+};
+
+/**
+ * struct station_parameters - station parameters
+ *
+ * Used to change and create a new station.
+ *
+ * @vlan: vlan interface station should belong to
+ * @sta_flags_mask: station flags that changed
+ * (bitmask of BIT(%NL80211_STA_FLAG_...))
+ * @sta_flags_set: station flags values
+ * (bitmask of BIT(%NL80211_STA_FLAG_...))
+ * @listen_interval: listen interval or -1 for no change
+ * @aid: AID or zero for no change
+ * @vlan_id: VLAN ID for station (if nonzero)
+ * @peer_aid: mesh peer AID or zero for no change
+ * @plink_action: plink action to take
+ * @plink_state: set the peer link state for a station
+ * @uapsd_queues: bitmap of queues configured for uapsd. same format
+ * as the AC bitmap in the QoS info field
+ * @max_sp: max Service Period. same format as the MAX_SP in the
+ * QoS info field (but already shifted down)
+ * @sta_modify_mask: bitmap indicating which parameters changed
+ * (for those that don't have a natural "no change" value),
+ * see &enum station_parameters_apply_mask
+ * @local_pm: local link-specific mesh power save mode (no change when set
+ * to unknown)
+ * @capability: station capability
+ * @ext_capab: extended capabilities of the station
+ * @ext_capab_len: number of extended capabilities
+ * @supported_channels: supported channels in IEEE 802.11 format
+ * @supported_channels_len: number of supported channels
+ * @supported_oper_classes: supported oper classes in IEEE 802.11 format
+ * @supported_oper_classes_len: number of supported operating classes
+ * @support_p2p_ps: information if station supports P2P PS mechanism
+ * @airtime_weight: airtime scheduler weight for this station
+ * @link_sta_params: link related params.
+ */
+struct station_parameters {
+ struct net_device *vlan;
+ u32 sta_flags_mask, sta_flags_set;
+ u32 sta_modify_mask;
+ int listen_interval;
+ u16 aid;
+ u16 vlan_id;
+ u16 peer_aid;
+ u8 plink_action;
+ u8 plink_state;
+ u8 uapsd_queues;
+ u8 max_sp;
+ enum nl80211_mesh_power_mode local_pm;
+ u16 capability;
+ const u8 *ext_capab;
+ u8 ext_capab_len;
+ const u8 *supported_channels;
+ u8 supported_channels_len;
+ const u8 *supported_oper_classes;
+ u8 supported_oper_classes_len;
+ int support_p2p_ps;
+ u16 airtime_weight;
+ struct link_station_parameters link_sta_params;
+};
+
+/**
+ * struct station_del_parameters - station deletion parameters
+ *
+ * Used to delete a station entry (or all stations).
+ *
+ * @mac: MAC address of the station to remove or NULL to remove all stations
+ * @subtype: Management frame subtype to use for indicating removal
+ * (10 = Disassociation, 12 = Deauthentication)
+ * @reason_code: Reason code for the Disassociation/Deauthentication frame
+ */
+struct station_del_parameters {
+ const u8 *mac;
+ u8 subtype;
+ u16 reason_code;
+};
+
+/**
+ * enum cfg80211_station_type - the type of station being modified
+ * @CFG80211_STA_AP_CLIENT: client of an AP interface
+ * @CFG80211_STA_AP_CLIENT_UNASSOC: client of an AP interface that is still
+ * unassociated (update properties for this type of client is permitted)
+ * @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has
+ * the AP MLME in the device
+ * @CFG80211_STA_AP_STA: AP station on managed interface
+ * @CFG80211_STA_IBSS: IBSS station
+ * @CFG80211_STA_TDLS_PEER_SETUP: TDLS peer on managed interface (dummy entry
+ * while TDLS setup is in progress, it moves out of this state when
+ * being marked authorized; use this only if TDLS with external setup is
+ * supported/used)
+ * @CFG80211_STA_TDLS_PEER_ACTIVE: TDLS peer on managed interface (active
+ * entry that is operating, has been marked authorized by userspace)
+ * @CFG80211_STA_MESH_PEER_KERNEL: peer on mesh interface (kernel managed)
+ * @CFG80211_STA_MESH_PEER_USER: peer on mesh interface (user managed)
+ */
+enum cfg80211_station_type {
+ CFG80211_STA_AP_CLIENT,
+ CFG80211_STA_AP_CLIENT_UNASSOC,
+ CFG80211_STA_AP_MLME_CLIENT,
+ CFG80211_STA_AP_STA,
+ CFG80211_STA_IBSS,
+ CFG80211_STA_TDLS_PEER_SETUP,
+ CFG80211_STA_TDLS_PEER_ACTIVE,
+ CFG80211_STA_MESH_PEER_KERNEL,
+ CFG80211_STA_MESH_PEER_USER,
+};
+
+/**
+ * cfg80211_check_station_change - validate parameter changes
+ * @wiphy: the wiphy this operates on
+ * @params: the new parameters for a station
+ * @statype: the type of station being modified
+ *
+ * Utility function for the @change_station driver method. Call this function
+ * with the appropriate station type looking up the station (and checking that
+ * it exists). It will verify whether the station change is acceptable, and if
+ * not will return an error code. Note that it may modify the parameters for
+ * backward compatibility reasons, so don't use them before calling this.
+ */
+int cfg80211_check_station_change(struct wiphy *wiphy,
+ struct station_parameters *params,
+ enum cfg80211_station_type statype);
+
+/**
+ * enum rate_info_flags - bitrate info flags
+ *
+ * Used by the driver to indicate the specific rate transmission
+ * type for 802.11n transmissions.
+ *
+ * @RATE_INFO_FLAGS_MCS: mcs field filled with HT MCS
+ * @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS
+ * @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
+ * @RATE_INFO_FLAGS_DMG: 60GHz MCS
+ * @RATE_INFO_FLAGS_HE_MCS: HE MCS information
+ * @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode
+ * @RATE_INFO_FLAGS_EXTENDED_SC_DMG: 60GHz extended SC MCS
+ * @RATE_INFO_FLAGS_EHT_MCS: EHT MCS information
+ */
+enum rate_info_flags {
+ RATE_INFO_FLAGS_MCS = BIT(0),
+ RATE_INFO_FLAGS_VHT_MCS = BIT(1),
+ RATE_INFO_FLAGS_SHORT_GI = BIT(2),
+ RATE_INFO_FLAGS_DMG = BIT(3),
+ RATE_INFO_FLAGS_HE_MCS = BIT(4),
+ RATE_INFO_FLAGS_EDMG = BIT(5),
+ RATE_INFO_FLAGS_EXTENDED_SC_DMG = BIT(6),
+ RATE_INFO_FLAGS_EHT_MCS = BIT(7),
+};
+
+/**
+ * enum rate_info_bw - rate bandwidth information
+ *
+ * Used by the driver to indicate the rate bandwidth.
+ *
+ * @RATE_INFO_BW_5: 5 MHz bandwidth
+ * @RATE_INFO_BW_10: 10 MHz bandwidth
+ * @RATE_INFO_BW_20: 20 MHz bandwidth
+ * @RATE_INFO_BW_40: 40 MHz bandwidth
+ * @RATE_INFO_BW_80: 80 MHz bandwidth
+ * @RATE_INFO_BW_160: 160 MHz bandwidth
+ * @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation
+ * @RATE_INFO_BW_320: 320 MHz bandwidth
+ * @RATE_INFO_BW_EHT_RU: bandwidth determined by EHT RU allocation
+ */
+enum rate_info_bw {
+ RATE_INFO_BW_20 = 0,
+ RATE_INFO_BW_5,
+ RATE_INFO_BW_10,
+ RATE_INFO_BW_40,
+ RATE_INFO_BW_80,
+ RATE_INFO_BW_160,
+ RATE_INFO_BW_HE_RU,
+ RATE_INFO_BW_320,
+ RATE_INFO_BW_EHT_RU,
+};
+
+/**
+ * struct rate_info - bitrate information
+ *
+ * Information about a receiving or transmitting bitrate
+ *
+ * @flags: bitflag of flags from &enum rate_info_flags
+ * @mcs: mcs index if struct describes an HT/VHT/HE rate
+ * @legacy: bitrate in 100kbit/s for 802.11abg
+ * @nss: number of streams (VHT & HE only)
+ * @bw: bandwidth (from &enum rate_info_bw)
+ * @he_gi: HE guard interval (from &enum nl80211_he_gi)
+ * @he_dcm: HE DCM value
+ * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc,
+ * only valid if bw is %RATE_INFO_BW_HE_RU)
+ * @n_bonded_ch: In case of EDMG the number of bonded channels (1-4)
+ * @eht_gi: EHT guard interval (from &enum nl80211_eht_gi)
+ * @eht_ru_alloc: EHT RU allocation (from &enum nl80211_eht_ru_alloc,
+ * only valid if bw is %RATE_INFO_BW_EHT_RU)
+ */
+struct rate_info {
+ u8 flags;
+ u8 mcs;
+ u16 legacy;
+ u8 nss;
+ u8 bw;
+ u8 he_gi;
+ u8 he_dcm;
+ u8 he_ru_alloc;
+ u8 n_bonded_ch;
+ u8 eht_gi;
+ u8 eht_ru_alloc;
+};
+
+/**
+ * enum bss_param_flags - bitrate info flags
+ *
+ * Used by the driver to indicate the specific rate transmission
+ * type for 802.11n transmissions.
+ *
+ * @BSS_PARAM_FLAGS_CTS_PROT: whether CTS protection is enabled
+ * @BSS_PARAM_FLAGS_SHORT_PREAMBLE: whether short preamble is enabled
+ * @BSS_PARAM_FLAGS_SHORT_SLOT_TIME: whether short slot time is enabled
+ */
+enum bss_param_flags {
+ BSS_PARAM_FLAGS_CTS_PROT = 1<<0,
+ BSS_PARAM_FLAGS_SHORT_PREAMBLE = 1<<1,
+ BSS_PARAM_FLAGS_SHORT_SLOT_TIME = 1<<2,
+};
+
+/**
+ * struct sta_bss_parameters - BSS parameters for the attached station
+ *
+ * Information about the currently associated BSS
+ *
+ * @flags: bitflag of flags from &enum bss_param_flags
+ * @dtim_period: DTIM period for the BSS
+ * @beacon_interval: beacon interval
+ */
+struct sta_bss_parameters {
+ u8 flags;
+ u8 dtim_period;
+ u16 beacon_interval;
+};
+
+/**
+ * struct cfg80211_txq_stats - TXQ statistics for this TID
+ * @filled: bitmap of flags using the bits of &enum nl80211_txq_stats to
+ * indicate the relevant values in this struct are filled
+ * @backlog_bytes: total number of bytes currently backlogged
+ * @backlog_packets: total number of packets currently backlogged
+ * @flows: number of new flows seen
+ * @drops: total number of packets dropped
+ * @ecn_marks: total number of packets marked with ECN CE
+ * @overlimit: number of drops due to queue space overflow
+ * @overmemory: number of drops due to memory limit overflow
+ * @collisions: number of hash collisions
+ * @tx_bytes: total number of bytes dequeued
+ * @tx_packets: total number of packets dequeued
+ * @max_flows: maximum number of flows supported
+ */
+struct cfg80211_txq_stats {
+ u32 filled;
+ u32 backlog_bytes;
+ u32 backlog_packets;
+ u32 flows;
+ u32 drops;
+ u32 ecn_marks;
+ u32 overlimit;
+ u32 overmemory;
+ u32 collisions;
+ u32 tx_bytes;
+ u32 tx_packets;
+ u32 max_flows;
+};
+
+/**
+ * struct cfg80211_tid_stats - per-TID statistics
+ * @filled: bitmap of flags using the bits of &enum nl80211_tid_stats to
+ * indicate the relevant values in this struct are filled
+ * @rx_msdu: number of received MSDUs
+ * @tx_msdu: number of (attempted) transmitted MSDUs
+ * @tx_msdu_retries: number of retries (not counting the first) for
+ * transmitted MSDUs
+ * @tx_msdu_failed: number of failed transmitted MSDUs
+ * @txq_stats: TXQ statistics
+ */
+struct cfg80211_tid_stats {
+ u32 filled;
+ u64 rx_msdu;
+ u64 tx_msdu;
+ u64 tx_msdu_retries;
+ u64 tx_msdu_failed;
+ struct cfg80211_txq_stats txq_stats;
+};
+
+#define IEEE80211_MAX_CHAINS 4
+
+/**
+ * struct station_info - station information
+ *
+ * Station information filled by driver for get_station() and dump_station.
+ *
+ * @filled: bitflag of flags using the bits of &enum nl80211_sta_info to
+ * indicate the relevant values in this struct for them
+ * @connected_time: time(in secs) since a station is last connected
+ * @inactive_time: time since last station activity (tx/rx) in milliseconds
+ * @assoc_at: bootime (ns) of the last association
+ * @rx_bytes: bytes (size of MPDUs) received from this station
+ * @tx_bytes: bytes (size of MPDUs) transmitted to this station
+ * @llid: mesh local link id
+ * @plid: mesh peer link id
+ * @plink_state: mesh peer link state
+ * @signal: The signal strength, type depends on the wiphy's signal_type.
+ * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
+ * @signal_avg: Average signal strength, type depends on the wiphy's signal_type.
+ * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
+ * @chains: bitmask for filled values in @chain_signal, @chain_signal_avg
+ * @chain_signal: per-chain signal strength of last received packet in dBm
+ * @chain_signal_avg: per-chain signal strength average in dBm
+ * @txrate: current unicast bitrate from this station
+ * @rxrate: current unicast bitrate to this station
+ * @rx_packets: packets (MSDUs & MMPDUs) received from this station
+ * @tx_packets: packets (MSDUs & MMPDUs) transmitted to this station
+ * @tx_retries: cumulative retry counts (MPDUs)
+ * @tx_failed: number of failed transmissions (MPDUs) (retries exceeded, no ACK)
+ * @rx_dropped_misc: Dropped for un-specified reason.
+ * @bss_param: current BSS parameters
+ * @generation: generation number for nl80211 dumps.
+ * This number should increase every time the list of stations
+ * changes, i.e. when a station is added or removed, so that
+ * userspace can tell whether it got a consistent snapshot.
+ * @assoc_req_ies: IEs from (Re)Association Request.
+ * This is used only when in AP mode with drivers that do not use
+ * user space MLME/SME implementation. The information is provided for
+ * the cfg80211_new_sta() calls to notify user space of the IEs.
+ * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
+ * @sta_flags: station flags mask & values
+ * @beacon_loss_count: Number of times beacon loss event has triggered.
+ * @t_offset: Time offset of the station relative to this host.
+ * @local_pm: local mesh STA power save mode
+ * @peer_pm: peer mesh STA power save mode
+ * @nonpeer_pm: non-peer mesh STA power save mode
+ * @expected_throughput: expected throughput in kbps (including 802.11 headers)
+ * towards this station.
+ * @rx_beacon: number of beacons received from this peer
+ * @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
+ * from this peer
+ * @connected_to_gate: true if mesh STA has a path to mesh gate
+ * @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
+ * @tx_duration: aggregate PPDU duration(usecs) for all the frames to a peer
+ * @airtime_weight: current airtime scheduling weight
+ * @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
+ * (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
+ * Note that this doesn't use the @filled bit, but is used if non-NULL.
+ * @ack_signal: signal strength (in dBm) of the last ACK frame.
+ * @avg_ack_signal: average rssi value of ack packet for the no of msdu's has
+ * been sent.
+ * @rx_mpdu_count: number of MPDUs received from this station
+ * @fcs_err_count: number of packets (MPDUs) received from this station with
+ * an FCS error. This counter should be incremented only when TA of the
+ * received packet with an FCS error matches the peer MAC address.
+ * @airtime_link_metric: mesh airtime link metric.
+ * @connected_to_as: true if mesh STA has a path to authentication server
+ */
+struct station_info {
+ u64 filled;
+ u32 connected_time;
+ u32 inactive_time;
+ u64 assoc_at;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ u16 llid;
+ u16 plid;
+ u8 plink_state;
+ s8 signal;
+ s8 signal_avg;
+
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+ s8 chain_signal_avg[IEEE80211_MAX_CHAINS];
+
+ struct rate_info txrate;
+ struct rate_info rxrate;
+ u32 rx_packets;
+ u32 tx_packets;
+ u32 tx_retries;
+ u32 tx_failed;
+ u32 rx_dropped_misc;
+ struct sta_bss_parameters bss_param;
+ struct nl80211_sta_flag_update sta_flags;
+
+ int generation;
+
+ const u8 *assoc_req_ies;
+ size_t assoc_req_ies_len;
+
+ u32 beacon_loss_count;
+ s64 t_offset;
+ enum nl80211_mesh_power_mode local_pm;
+ enum nl80211_mesh_power_mode peer_pm;
+ enum nl80211_mesh_power_mode nonpeer_pm;
+
+ u32 expected_throughput;
+
+ u64 tx_duration;
+ u64 rx_duration;
+ u64 rx_beacon;
+ u8 rx_beacon_signal_avg;
+ u8 connected_to_gate;
+
+ struct cfg80211_tid_stats *pertid;
+ s8 ack_signal;
+ s8 avg_ack_signal;
+
+ u16 airtime_weight;
+
+ u32 rx_mpdu_count;
+ u32 fcs_err_count;
+
+ u32 airtime_link_metric;
+
+ u8 connected_to_as;
+};
+
+/**
+ * struct cfg80211_sar_sub_specs - sub specs limit
+ * @power: power limitation in 0.25dbm
+ * @freq_range_index: index the power limitation applies to
+ */
+struct cfg80211_sar_sub_specs {
+ s32 power;
+ u32 freq_range_index;
+};
+
+/**
+ * struct cfg80211_sar_specs - sar limit specs
+ * @type: it's set with power in 0.25dbm or other types
+ * @num_sub_specs: number of sar sub specs
+ * @sub_specs: memory to hold the sar sub specs
+ */
+struct cfg80211_sar_specs {
+ enum nl80211_sar_type type;
+ u32 num_sub_specs;
+ struct cfg80211_sar_sub_specs sub_specs[];
+};
+
+
+/**
+ * struct cfg80211_sar_freq_ranges - sar frequency ranges
+ * @start_freq: start range edge frequency
+ * @end_freq: end range edge frequency
+ */
+struct cfg80211_sar_freq_ranges {
+ u32 start_freq;
+ u32 end_freq;
+};
+
+/**
+ * struct cfg80211_sar_capa - sar limit capability
+ * @type: it's set via power in 0.25dbm or other types
+ * @num_freq_ranges: number of frequency ranges
+ * @freq_ranges: memory to hold the freq ranges.
+ *
+ * Note: WLAN driver may append new ranges or split an existing
+ * range to small ones and then append them.
+ */
+struct cfg80211_sar_capa {
+ enum nl80211_sar_type type;
+ u32 num_freq_ranges;
+ const struct cfg80211_sar_freq_ranges *freq_ranges;
+};
+
+#if IS_ENABLED(CONFIG_CFG80211)
+/**
+ * cfg80211_get_station - retrieve information about a given station
+ * @dev: the device where the station is supposed to be connected to
+ * @mac_addr: the mac address of the station of interest
+ * @sinfo: pointer to the structure to fill with the information
+ *
+ * Returns 0 on success and sinfo is filled with the available information
+ * otherwise returns a negative error code and the content of sinfo has to be
+ * considered undefined.
+ */
+int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
+ struct station_info *sinfo);
+#else
+static inline int cfg80211_get_station(struct net_device *dev,
+ const u8 *mac_addr,
+ struct station_info *sinfo)
+{
+ return -ENOENT;
+}
+#endif
+
+/**
+ * enum monitor_flags - monitor flags
+ *
+ * Monitor interface configuration flags. Note that these must be the bits
+ * according to the nl80211 flags.
+ *
+ * @MONITOR_FLAG_CHANGED: set if the flags were changed
+ * @MONITOR_FLAG_FCSFAIL: pass frames with bad FCS
+ * @MONITOR_FLAG_PLCPFAIL: pass frames with bad PLCP
+ * @MONITOR_FLAG_CONTROL: pass control frames
+ * @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering
+ * @MONITOR_FLAG_COOK_FRAMES: report frames after processing
+ * @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address
+ */
+enum monitor_flags {
+ MONITOR_FLAG_CHANGED = 1<<__NL80211_MNTR_FLAG_INVALID,
+ MONITOR_FLAG_FCSFAIL = 1<<NL80211_MNTR_FLAG_FCSFAIL,
+ MONITOR_FLAG_PLCPFAIL = 1<<NL80211_MNTR_FLAG_PLCPFAIL,
+ MONITOR_FLAG_CONTROL = 1<<NL80211_MNTR_FLAG_CONTROL,
+ MONITOR_FLAG_OTHER_BSS = 1<<NL80211_MNTR_FLAG_OTHER_BSS,
+ MONITOR_FLAG_COOK_FRAMES = 1<<NL80211_MNTR_FLAG_COOK_FRAMES,
+ MONITOR_FLAG_ACTIVE = 1<<NL80211_MNTR_FLAG_ACTIVE,
+};
+
+/**
+ * enum mpath_info_flags - mesh path information flags
+ *
+ * Used by the driver to indicate which info in &struct mpath_info it has filled
+ * in during get_station() or dump_station().
+ *
+ * @MPATH_INFO_FRAME_QLEN: @frame_qlen filled
+ * @MPATH_INFO_SN: @sn filled
+ * @MPATH_INFO_METRIC: @metric filled
+ * @MPATH_INFO_EXPTIME: @exptime filled
+ * @MPATH_INFO_DISCOVERY_TIMEOUT: @discovery_timeout filled
+ * @MPATH_INFO_DISCOVERY_RETRIES: @discovery_retries filled
+ * @MPATH_INFO_FLAGS: @flags filled
+ * @MPATH_INFO_HOP_COUNT: @hop_count filled
+ * @MPATH_INFO_PATH_CHANGE: @path_change_count filled
+ */
+enum mpath_info_flags {
+ MPATH_INFO_FRAME_QLEN = BIT(0),
+ MPATH_INFO_SN = BIT(1),
+ MPATH_INFO_METRIC = BIT(2),
+ MPATH_INFO_EXPTIME = BIT(3),
+ MPATH_INFO_DISCOVERY_TIMEOUT = BIT(4),
+ MPATH_INFO_DISCOVERY_RETRIES = BIT(5),
+ MPATH_INFO_FLAGS = BIT(6),
+ MPATH_INFO_HOP_COUNT = BIT(7),
+ MPATH_INFO_PATH_CHANGE = BIT(8),
+};
+
+/**
+ * struct mpath_info - mesh path information
+ *
+ * Mesh path information filled by driver for get_mpath() and dump_mpath().
+ *
+ * @filled: bitfield of flags from &enum mpath_info_flags
+ * @frame_qlen: number of queued frames for this destination
+ * @sn: target sequence number
+ * @metric: metric (cost) of this mesh path
+ * @exptime: expiration time for the mesh path from now, in msecs
+ * @flags: mesh path flags
+ * @discovery_timeout: total mesh path discovery timeout, in msecs
+ * @discovery_retries: mesh path discovery retries
+ * @generation: generation number for nl80211 dumps.
+ * This number should increase every time the list of mesh paths
+ * changes, i.e. when a station is added or removed, so that
+ * userspace can tell whether it got a consistent snapshot.
+ * @hop_count: hops to destination
+ * @path_change_count: total number of path changes to destination
+ */
+struct mpath_info {
+ u32 filled;
+ u32 frame_qlen;
+ u32 sn;
+ u32 metric;
+ u32 exptime;
+ u32 discovery_timeout;
+ u8 discovery_retries;
+ u8 flags;
+ u8 hop_count;
+ u32 path_change_count;
+
+ int generation;
+};
+
+/**
+ * struct bss_parameters - BSS parameters
+ *
+ * Used to change BSS parameters (mainly for AP mode).
+ *
+ * @use_cts_prot: Whether to use CTS protection
+ * (0 = no, 1 = yes, -1 = do not change)
+ * @use_short_preamble: Whether the use of short preambles is allowed
+ * (0 = no, 1 = yes, -1 = do not change)
+ * @use_short_slot_time: Whether the use of short slot time is allowed
+ * (0 = no, 1 = yes, -1 = do not change)
+ * @basic_rates: basic rates in IEEE 802.11 format
+ * (or NULL for no change)
+ * @basic_rates_len: number of basic rates
+ * @ap_isolate: do not forward packets between connected stations
+ * (0 = no, 1 = yes, -1 = do not change)
+ * @ht_opmode: HT Operation mode
+ * (u16 = opmode, -1 = do not change)
+ * @p2p_ctwindow: P2P CT Window (-1 = no change)
+ * @p2p_opp_ps: P2P opportunistic PS (-1 = no change)
+ */
+struct bss_parameters {
+ int use_cts_prot;
+ int use_short_preamble;
+ int use_short_slot_time;
+ const u8 *basic_rates;
+ u8 basic_rates_len;
+ int ap_isolate;
+ int ht_opmode;
+ s8 p2p_ctwindow, p2p_opp_ps;
+};
+
+/**
+ * struct mesh_config - 802.11s mesh configuration
+ *
+ * These parameters can be changed while the mesh is active.
+ *
+ * @dot11MeshRetryTimeout: the initial retry timeout in millisecond units used
+ * by the Mesh Peering Open message
+ * @dot11MeshConfirmTimeout: the initial retry timeout in millisecond units
+ * used by the Mesh Peering Open message
+ * @dot11MeshHoldingTimeout: the confirm timeout in millisecond units used by
+ * the mesh peering management to close a mesh peering
+ * @dot11MeshMaxPeerLinks: the maximum number of peer links allowed on this
+ * mesh interface
+ * @dot11MeshMaxRetries: the maximum number of peer link open retries that can
+ * be sent to establish a new peer link instance in a mesh
+ * @dot11MeshTTL: the value of TTL field set at a source mesh STA
+ * @element_ttl: the value of TTL field set at a mesh STA for path selection
+ * elements
+ * @auto_open_plinks: whether we should automatically open peer links when we
+ * detect compatible mesh peers
+ * @dot11MeshNbrOffsetMaxNeighbor: the maximum number of neighbors to
+ * synchronize to for 11s default synchronization method
+ * @dot11MeshHWMPmaxPREQretries: the number of action frames containing a PREQ
+ * that an originator mesh STA can send to a particular path target
+ * @path_refresh_time: how frequently to refresh mesh paths in milliseconds
+ * @min_discovery_timeout: the minimum length of time to wait until giving up on
+ * a path discovery in milliseconds
+ * @dot11MeshHWMPactivePathTimeout: the time (in TUs) for which mesh STAs
+ * receiving a PREQ shall consider the forwarding information from the
+ * root to be valid. (TU = time unit)
+ * @dot11MeshHWMPpreqMinInterval: the minimum interval of time (in TUs) during
+ * which a mesh STA can send only one action frame containing a PREQ
+ * element
+ * @dot11MeshHWMPperrMinInterval: the minimum interval of time (in TUs) during
+ * which a mesh STA can send only one Action frame containing a PERR
+ * element
+ * @dot11MeshHWMPnetDiameterTraversalTime: the interval of time (in TUs) that
+ * it takes for an HWMP information element to propagate across the mesh
+ * @dot11MeshHWMPRootMode: the configuration of a mesh STA as root mesh STA
+ * @dot11MeshHWMPRannInterval: the interval of time (in TUs) between root
+ * announcements are transmitted
+ * @dot11MeshGateAnnouncementProtocol: whether to advertise that this mesh
+ * station has access to a broader network beyond the MBSS. (This is
+ * missnamed in draft 12.0: dot11MeshGateAnnouncementProtocol set to true
+ * only means that the station will announce others it's a mesh gate, but
+ * not necessarily using the gate announcement protocol. Still keeping the
+ * same nomenclature to be in sync with the spec)
+ * @dot11MeshForwarding: whether the Mesh STA is forwarding or non-forwarding
+ * entity (default is TRUE - forwarding entity)
+ * @rssi_threshold: the threshold for average signal strength of candidate
+ * station to establish a peer link
+ * @ht_opmode: mesh HT protection mode
+ *
+ * @dot11MeshHWMPactivePathToRootTimeout: The time (in TUs) for which mesh STAs
+ * receiving a proactive PREQ shall consider the forwarding information to
+ * the root mesh STA to be valid.
+ *
+ * @dot11MeshHWMProotInterval: The interval of time (in TUs) between proactive
+ * PREQs are transmitted.
+ * @dot11MeshHWMPconfirmationInterval: The minimum interval of time (in TUs)
+ * during which a mesh STA can send only one Action frame containing
+ * a PREQ element for root path confirmation.
+ * @power_mode: The default mesh power save mode which will be the initial
+ * setting for new peer links.
+ * @dot11MeshAwakeWindowDuration: The duration in TUs the STA will remain awake
+ * after transmitting its beacon.
+ * @plink_timeout: If no tx activity is seen from a STA we've established
+ * peering with for longer than this time (in seconds), then remove it
+ * from the STA's list of peers. Default is 30 minutes.
+ * @dot11MeshConnectedToAuthServer: if set to true then this mesh STA
+ * will advertise that it is connected to a authentication server
+ * in the mesh formation field.
+ * @dot11MeshConnectedToMeshGate: if set to true, advertise that this STA is
+ * connected to a mesh gate in mesh formation info. If false, the
+ * value in mesh formation is determined by the presence of root paths
+ * in the mesh path table
+ * @dot11MeshNolearn: Try to avoid multi-hop path discovery (e.g. PREQ/PREP
+ * for HWMP) if the destination is a direct neighbor. Note that this might
+ * not be the optimal decision as a multi-hop route might be better. So
+ * if using this setting you will likely also want to disable
+ * dot11MeshForwarding and use another mesh routing protocol on top.
+ */
+struct mesh_config {
+ u16 dot11MeshRetryTimeout;
+ u16 dot11MeshConfirmTimeout;
+ u16 dot11MeshHoldingTimeout;
+ u16 dot11MeshMaxPeerLinks;
+ u8 dot11MeshMaxRetries;
+ u8 dot11MeshTTL;
+ u8 element_ttl;
+ bool auto_open_plinks;
+ u32 dot11MeshNbrOffsetMaxNeighbor;
+ u8 dot11MeshHWMPmaxPREQretries;
+ u32 path_refresh_time;
+ u16 min_discovery_timeout;
+ u32 dot11MeshHWMPactivePathTimeout;
+ u16 dot11MeshHWMPpreqMinInterval;
+ u16 dot11MeshHWMPperrMinInterval;
+ u16 dot11MeshHWMPnetDiameterTraversalTime;
+ u8 dot11MeshHWMPRootMode;
+ bool dot11MeshConnectedToMeshGate;
+ bool dot11MeshConnectedToAuthServer;
+ u16 dot11MeshHWMPRannInterval;
+ bool dot11MeshGateAnnouncementProtocol;
+ bool dot11MeshForwarding;
+ s32 rssi_threshold;
+ u16 ht_opmode;
+ u32 dot11MeshHWMPactivePathToRootTimeout;
+ u16 dot11MeshHWMProotInterval;
+ u16 dot11MeshHWMPconfirmationInterval;
+ enum nl80211_mesh_power_mode power_mode;
+ u16 dot11MeshAwakeWindowDuration;
+ u32 plink_timeout;
+ bool dot11MeshNolearn;
+};
+
+/**
+ * struct mesh_setup - 802.11s mesh setup configuration
+ * @chandef: defines the channel to use
+ * @mesh_id: the mesh ID
+ * @mesh_id_len: length of the mesh ID, at least 1 and at most 32 bytes
+ * @sync_method: which synchronization method to use
+ * @path_sel_proto: which path selection protocol to use
+ * @path_metric: which metric to use
+ * @auth_id: which authentication method this mesh is using
+ * @ie: vendor information elements (optional)
+ * @ie_len: length of vendor information elements
+ * @is_authenticated: this mesh requires authentication
+ * @is_secure: this mesh uses security
+ * @user_mpm: userspace handles all MPM functions
+ * @dtim_period: DTIM period to use
+ * @beacon_interval: beacon interval to use
+ * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
+ * @basic_rates: basic rates to use when creating the mesh
+ * @beacon_rate: bitrate to be used for beacons
+ * @userspace_handles_dfs: whether user space controls DFS operation, i.e.
+ * changes the channel when a radar is detected. This is required
+ * to operate on DFS channels.
+ * @control_port_over_nl80211: TRUE if userspace expects to exchange control
+ * port frames over NL80211 instead of the network interface.
+ *
+ * These parameters are fixed when the mesh is created.
+ */
+struct mesh_setup {
+ struct cfg80211_chan_def chandef;
+ const u8 *mesh_id;
+ u8 mesh_id_len;
+ u8 sync_method;
+ u8 path_sel_proto;
+ u8 path_metric;
+ u8 auth_id;
+ const u8 *ie;
+ u8 ie_len;
+ bool is_authenticated;
+ bool is_secure;
+ bool user_mpm;
+ u8 dtim_period;
+ u16 beacon_interval;
+ int mcast_rate[NUM_NL80211_BANDS];
+ u32 basic_rates;
+ struct cfg80211_bitrate_mask beacon_rate;
+ bool userspace_handles_dfs;
+ bool control_port_over_nl80211;
+};
+
+/**
+ * struct ocb_setup - 802.11p OCB mode setup configuration
+ * @chandef: defines the channel to use
+ *
+ * These parameters are fixed when connecting to the network
+ */
+struct ocb_setup {
+ struct cfg80211_chan_def chandef;
+};
+
+/**
+ * struct ieee80211_txq_params - TX queue parameters
+ * @ac: AC identifier
+ * @txop: Maximum burst time in units of 32 usecs, 0 meaning disabled
+ * @cwmin: Minimum contention window [a value of the form 2^n-1 in the range
+ * 1..32767]
+ * @cwmax: Maximum contention window [a value of the form 2^n-1 in the range
+ * 1..32767]
+ * @aifs: Arbitration interframe space [0..255]
+ * @link_id: link_id or -1 for non-MLD
+ */
+struct ieee80211_txq_params {
+ enum nl80211_ac ac;
+ u16 txop;
+ u16 cwmin;
+ u16 cwmax;
+ u8 aifs;
+ int link_id;
+};
+
+/**
+ * DOC: Scanning and BSS list handling
+ *
+ * The scanning process itself is fairly simple, but cfg80211 offers quite
+ * a bit of helper functionality. To start a scan, the scan operation will
+ * be invoked with a scan definition. This scan definition contains the
+ * channels to scan, and the SSIDs to send probe requests for (including the
+ * wildcard, if desired). A passive scan is indicated by having no SSIDs to
+ * probe. Additionally, a scan request may contain extra information elements
+ * that should be added to the probe request. The IEs are guaranteed to be
+ * well-formed, and will not exceed the maximum length the driver advertised
+ * in the wiphy structure.
+ *
+ * When scanning finds a BSS, cfg80211 needs to be notified of that, because
+ * it is responsible for maintaining the BSS list; the driver should not
+ * maintain a list itself. For this notification, various functions exist.
+ *
+ * Since drivers do not maintain a BSS list, there are also a number of
+ * functions to search for a BSS and obtain information about it from the
+ * BSS structure cfg80211 maintains. The BSS list is also made available
+ * to userspace.
+ */
+
+/**
+ * struct cfg80211_ssid - SSID description
+ * @ssid: the SSID
+ * @ssid_len: length of the ssid
+ */
+struct cfg80211_ssid {
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+};
+
+/**
+ * struct cfg80211_scan_info - information about completed scan
+ * @scan_start_tsf: scan start time in terms of the TSF of the BSS that the
+ * wireless device that requested the scan is connected to. If this
+ * information is not available, this field is left zero.
+ * @tsf_bssid: the BSSID according to which %scan_start_tsf is set.
+ * @aborted: set to true if the scan was aborted for any reason,
+ * userspace will be notified of that
+ */
+struct cfg80211_scan_info {
+ u64 scan_start_tsf;
+ u8 tsf_bssid[ETH_ALEN] __aligned(2);
+ bool aborted;
+};
+
+/**
+ * struct cfg80211_scan_6ghz_params - relevant for 6 GHz only
+ *
+ * @short_ssid: short ssid to scan for
+ * @bssid: bssid to scan for
+ * @channel_idx: idx of the channel in the channel array in the scan request
+ * which the above info relvant to
+ * @unsolicited_probe: the AP transmits unsolicited probe response every 20 TU
+ * @short_ssid_valid: @short_ssid is valid and can be used
+ * @psc_no_listen: when set, and the channel is a PSC channel, no need to wait
+ * 20 TUs before starting to send probe requests.
+ */
+struct cfg80211_scan_6ghz_params {
+ u32 short_ssid;
+ u32 channel_idx;
+ u8 bssid[ETH_ALEN];
+ bool unsolicited_probe;
+ bool short_ssid_valid;
+ bool psc_no_listen;
+};
+
+/**
+ * struct cfg80211_scan_request - scan request description
+ *
+ * @ssids: SSIDs to scan for (active scan only)
+ * @n_ssids: number of SSIDs
+ * @channels: channels to scan on.
+ * @n_channels: total number of channels to scan
+ * @scan_width: channel width for scanning
+ * @ie: optional information element(s) to add into Probe Request or %NULL
+ * @ie_len: length of ie in octets
+ * @duration: how long to listen on each channel, in TUs. If
+ * %duration_mandatory is not set, this is the maximum dwell time and
+ * the actual dwell time may be shorter.
+ * @duration_mandatory: if set, the scan duration must be as specified by the
+ * %duration field.
+ * @flags: bit field of flags controlling operation
+ * @rates: bitmap of rates to advertise for each band
+ * @wiphy: the wiphy this was for
+ * @scan_start: time (in jiffies) when the scan started
+ * @wdev: the wireless device to scan for
+ * @info: (internal) information about completed scan
+ * @notified: (internal) scan request was notified as done or aborted
+ * @no_cck: used to send probe requests at non CCK rate in 2GHz band
+ * @mac_addr: MAC address used with randomisation
+ * @mac_addr_mask: MAC address mask used with randomisation, bits that
+ * are 0 in the mask should be randomised, bits that are 1 should
+ * be taken from the @mac_addr
+ * @scan_6ghz: relevant for split scan request only,
+ * true if this is the second scan request
+ * @n_6ghz_params: number of 6 GHz params
+ * @scan_6ghz_params: 6 GHz params
+ * @bssid: BSSID to scan for (most commonly, the wildcard BSSID)
+ */
+struct cfg80211_scan_request {
+ struct cfg80211_ssid *ssids;
+ int n_ssids;
+ u32 n_channels;
+ enum nl80211_bss_scan_width scan_width;
+ const u8 *ie;
+ size_t ie_len;
+ u16 duration;
+ bool duration_mandatory;
+ u32 flags;
+
+ u32 rates[NUM_NL80211_BANDS];
+
+ struct wireless_dev *wdev;
+
+ u8 mac_addr[ETH_ALEN] __aligned(2);
+ u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+ u8 bssid[ETH_ALEN] __aligned(2);
+
+ /* internal */
+ struct wiphy *wiphy;
+ unsigned long scan_start;
+ struct cfg80211_scan_info info;
+ bool notified;
+ bool no_cck;
+ bool scan_6ghz;
+ u32 n_6ghz_params;
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params;
+
+ /* keep last */
+ struct ieee80211_channel *channels[];
+};
+
+static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
+{
+ int i;
+
+ get_random_bytes(buf, ETH_ALEN);
+ for (i = 0; i < ETH_ALEN; i++) {
+ buf[i] &= ~mask[i];
+ buf[i] |= addr[i] & mask[i];
+ }
+}
+
+/**
+ * struct cfg80211_match_set - sets of attributes to match
+ *
+ * @ssid: SSID to be matched; may be zero-length in case of BSSID match
+ * or no match (RSSI only)
+ * @bssid: BSSID to be matched; may be all-zero BSSID in case of SSID match
+ * or no match (RSSI only)
+ * @rssi_thold: don't report scan results below this threshold (in s32 dBm)
+ * @per_band_rssi_thold: Minimum rssi threshold for each band to be applied
+ * for filtering out scan results received. Drivers advertize this support
+ * of band specific rssi based filtering through the feature capability
+ * %NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD. These band
+ * specific rssi thresholds take precedence over rssi_thold, if specified.
+ * If not specified for any band, it will be assigned with rssi_thold of
+ * corresponding matchset.
+ */
+struct cfg80211_match_set {
+ struct cfg80211_ssid ssid;
+ u8 bssid[ETH_ALEN];
+ s32 rssi_thold;
+ s32 per_band_rssi_thold[NUM_NL80211_BANDS];
+};
+
+/**
+ * struct cfg80211_sched_scan_plan - scan plan for scheduled scan
+ *
+ * @interval: interval between scheduled scan iterations. In seconds.
+ * @iterations: number of scan iterations in this scan plan. Zero means
+ * infinite loop.
+ * The last scan plan will always have this parameter set to zero,
+ * all other scan plans will have a finite number of iterations.
+ */
+struct cfg80211_sched_scan_plan {
+ u32 interval;
+ u32 iterations;
+};
+
+/**
+ * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment.
+ *
+ * @band: band of BSS which should match for RSSI level adjustment.
+ * @delta: value of RSSI level adjustment.
+ */
+struct cfg80211_bss_select_adjust {
+ enum nl80211_band band;
+ s8 delta;
+};
+
+/**
+ * struct cfg80211_sched_scan_request - scheduled scan request description
+ *
+ * @reqid: identifies this request.
+ * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
+ * @n_ssids: number of SSIDs
+ * @n_channels: total number of channels to scan
+ * @scan_width: channel width for scanning
+ * @ie: optional information element(s) to add into Probe Request or %NULL
+ * @ie_len: length of ie in octets
+ * @flags: bit field of flags controlling operation
+ * @match_sets: sets of parameters to be matched for a scan result
+ * entry to be considered valid and to be passed to the host
+ * (others are filtered out).
+ * If ommited, all results are passed.
+ * @n_match_sets: number of match sets
+ * @report_results: indicates that results were reported for this request
+ * @wiphy: the wiphy this was for
+ * @dev: the interface
+ * @scan_start: start time of the scheduled scan
+ * @channels: channels to scan
+ * @min_rssi_thold: for drivers only supporting a single threshold, this
+ * contains the minimum over all matchsets
+ * @mac_addr: MAC address used with randomisation
+ * @mac_addr_mask: MAC address mask used with randomisation, bits that
+ * are 0 in the mask should be randomised, bits that are 1 should
+ * be taken from the @mac_addr
+ * @scan_plans: scan plans to be executed in this scheduled scan. Lowest
+ * index must be executed first.
+ * @n_scan_plans: number of scan plans, at least 1.
+ * @rcu_head: RCU callback used to free the struct
+ * @owner_nlportid: netlink portid of owner (if this should is a request
+ * owned by a particular socket)
+ * @nl_owner_dead: netlink owner socket was closed - this request be freed
+ * @list: for keeping list of requests.
+ * @delay: delay in seconds to use before starting the first scan
+ * cycle. The driver may ignore this parameter and start
+ * immediately (or at any other time), if this feature is not
+ * supported.
+ * @relative_rssi_set: Indicates whether @relative_rssi is set or not.
+ * @relative_rssi: Relative RSSI threshold in dB to restrict scan result
+ * reporting in connected state to cases where a matching BSS is determined
+ * to have better or slightly worse RSSI than the current connected BSS.
+ * The relative RSSI threshold values are ignored in disconnected state.
+ * @rssi_adjust: delta dB of RSSI preference to be given to the BSSs that belong
+ * to the specified band while deciding whether a better BSS is reported
+ * using @relative_rssi. If delta is a negative number, the BSSs that
+ * belong to the specified band will be penalized by delta dB in relative
+ * comparisions.
+ */
+struct cfg80211_sched_scan_request {
+ u64 reqid;
+ struct cfg80211_ssid *ssids;
+ int n_ssids;
+ u32 n_channels;
+ enum nl80211_bss_scan_width scan_width;
+ const u8 *ie;
+ size_t ie_len;
+ u32 flags;
+ struct cfg80211_match_set *match_sets;
+ int n_match_sets;
+ s32 min_rssi_thold;
+ u32 delay;
+ struct cfg80211_sched_scan_plan *scan_plans;
+ int n_scan_plans;
+
+ u8 mac_addr[ETH_ALEN] __aligned(2);
+ u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+
+ bool relative_rssi_set;
+ s8 relative_rssi;
+ struct cfg80211_bss_select_adjust rssi_adjust;
+
+ /* internal */
+ struct wiphy *wiphy;
+ struct net_device *dev;
+ unsigned long scan_start;
+ bool report_results;
+ struct rcu_head rcu_head;
+ u32 owner_nlportid;
+ bool nl_owner_dead;
+ struct list_head list;
+
+ /* keep last */
+ struct ieee80211_channel *channels[];
+};
+
+/**
+ * enum cfg80211_signal_type - signal type
+ *
+ * @CFG80211_SIGNAL_TYPE_NONE: no signal strength information available
+ * @CFG80211_SIGNAL_TYPE_MBM: signal strength in mBm (100*dBm)
+ * @CFG80211_SIGNAL_TYPE_UNSPEC: signal strength, increasing from 0 through 100
+ */
+enum cfg80211_signal_type {
+ CFG80211_SIGNAL_TYPE_NONE,
+ CFG80211_SIGNAL_TYPE_MBM,
+ CFG80211_SIGNAL_TYPE_UNSPEC,
+};
+
+/**
+ * struct cfg80211_inform_bss - BSS inform data
+ * @chan: channel the frame was received on
+ * @scan_width: scan width that was used
+ * @signal: signal strength value, according to the wiphy's
+ * signal type
+ * @boottime_ns: timestamp (CLOCK_BOOTTIME) when the information was
+ * received; should match the time when the frame was actually
+ * received by the device (not just by the host, in case it was
+ * buffered on the device) and be accurate to about 10ms.
+ * If the frame isn't buffered, just passing the return value of
+ * ktime_get_boottime_ns() is likely appropriate.
+ * @parent_tsf: the time at the start of reception of the first octet of the
+ * timestamp field of the frame. The time is the TSF of the BSS specified
+ * by %parent_bssid.
+ * @parent_bssid: the BSS according to which %parent_tsf is set. This is set to
+ * the BSS that requested the scan in which the beacon/probe was received.
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
+ */
+struct cfg80211_inform_bss {
+ struct ieee80211_channel *chan;
+ enum nl80211_bss_scan_width scan_width;
+ s32 signal;
+ u64 boottime_ns;
+ u64 parent_tsf;
+ u8 parent_bssid[ETH_ALEN] __aligned(2);
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+};
+
+/**
+ * struct cfg80211_bss_ies - BSS entry IE data
+ * @tsf: TSF contained in the frame that carried these IEs
+ * @rcu_head: internal use, for freeing
+ * @len: length of the IEs
+ * @from_beacon: these IEs are known to come from a beacon
+ * @data: IE data
+ */
+struct cfg80211_bss_ies {
+ u64 tsf;
+ struct rcu_head rcu_head;
+ int len;
+ bool from_beacon;
+ u8 data[];
+};
+
+/**
+ * struct cfg80211_bss - BSS description
+ *
+ * This structure describes a BSS (which may also be a mesh network)
+ * for use in scan results and similar.
+ *
+ * @channel: channel this BSS is on
+ * @scan_width: width of the control channel
+ * @bssid: BSSID of the BSS
+ * @beacon_interval: the beacon interval as from the frame
+ * @capability: the capability field in host byte order
+ * @ies: the information elements (Note that there is no guarantee that these
+ * are well-formed!); this is a pointer to either the beacon_ies or
+ * proberesp_ies depending on whether Probe Response frame has been
+ * received. It is always non-%NULL.
+ * @beacon_ies: the information elements from the last Beacon frame
+ * (implementation note: if @hidden_beacon_bss is set this struct doesn't
+ * own the beacon_ies, but they're just pointers to the ones from the
+ * @hidden_beacon_bss struct)
+ * @proberesp_ies: the information elements from the last Probe Response frame
+ * @hidden_beacon_bss: in case this BSS struct represents a probe response from
+ * a BSS that hides the SSID in its beacon, this points to the BSS struct
+ * that holds the beacon data. @beacon_ies is still valid, of course, and
+ * points to the same data as hidden_beacon_bss->beacon_ies in that case.
+ * @transmitted_bss: pointer to the transmitted BSS, if this is a
+ * non-transmitted one (multi-BSSID support)
+ * @nontrans_list: list of non-transmitted BSS, if this is a transmitted one
+ * (multi-BSSID support)
+ * @signal: signal strength value (type depends on the wiphy's signal_type)
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
+ * @bssid_index: index in the multiple BSS set
+ * @max_bssid_indicator: max number of members in the BSS set
+ * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
+ */
+struct cfg80211_bss {
+ struct ieee80211_channel *channel;
+ enum nl80211_bss_scan_width scan_width;
+
+ const struct cfg80211_bss_ies __rcu *ies;
+ const struct cfg80211_bss_ies __rcu *beacon_ies;
+ const struct cfg80211_bss_ies __rcu *proberesp_ies;
+
+ struct cfg80211_bss *hidden_beacon_bss;
+ struct cfg80211_bss *transmitted_bss;
+ struct list_head nontrans_list;
+
+ s32 signal;
+
+ u16 beacon_interval;
+ u16 capability;
+
+ u8 bssid[ETH_ALEN];
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+
+ u8 bssid_index;
+ u8 max_bssid_indicator;
+
+ u8 priv[] __aligned(sizeof(void *));
+};
+
+/**
+ * ieee80211_bss_get_elem - find element with given ID
+ * @bss: the bss to search
+ * @id: the element ID
+ *
+ * Note that the return value is an RCU-protected pointer, so
+ * rcu_read_lock() must be held when calling this function.
+ * Return: %NULL if not found.
+ */
+const struct element *ieee80211_bss_get_elem(struct cfg80211_bss *bss, u8 id);
+
+/**
+ * ieee80211_bss_get_ie - find IE with given ID
+ * @bss: the bss to search
+ * @id: the element ID
+ *
+ * Note that the return value is an RCU-protected pointer, so
+ * rcu_read_lock() must be held when calling this function.
+ * Return: %NULL if not found.
+ */
+static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id)
+{
+ return (const void *)ieee80211_bss_get_elem(bss, id);
+}
+
+
+/**
+ * struct cfg80211_auth_request - Authentication request data
+ *
+ * This structure provides information needed to complete IEEE 802.11
+ * authentication.
+ *
+ * @bss: The BSS to authenticate with, the callee must obtain a reference
+ * to it if it needs to keep it.
+ * @auth_type: Authentication type (algorithm)
+ * @ie: Extra IEs to add to Authentication frame or %NULL
+ * @ie_len: Length of ie buffer in octets
+ * @key_len: length of WEP key for shared key authentication
+ * @key_idx: index of WEP key for shared key authentication
+ * @key: WEP key for shared key authentication
+ * @auth_data: Fields and elements in Authentication frames. This contains
+ * the authentication frame body (non-IE and IE data), excluding the
+ * Authentication algorithm number, i.e., starting at the Authentication
+ * transaction sequence number field.
+ * @auth_data_len: Length of auth_data buffer in octets
+ * @link_id: if >= 0, indicates authentication should be done as an MLD,
+ * the interface address is included as the MLD address and the
+ * necessary link (with the given link_id) will be created (and
+ * given an MLD address) by the driver
+ * @ap_mld_addr: AP MLD address in case of authentication request with
+ * an AP MLD, valid iff @link_id >= 0
+ */
+struct cfg80211_auth_request {
+ struct cfg80211_bss *bss;
+ const u8 *ie;
+ size_t ie_len;
+ enum nl80211_auth_type auth_type;
+ const u8 *key;
+ u8 key_len;
+ s8 key_idx;
+ const u8 *auth_data;
+ size_t auth_data_len;
+ s8 link_id;
+ const u8 *ap_mld_addr;
+};
+
+/**
+ * struct cfg80211_assoc_link - per-link information for MLO association
+ * @bss: the BSS pointer, see also &struct cfg80211_assoc_request::bss;
+ * if this is %NULL for a link, that link is not requested
+ * @elems: extra elements for the per-STA profile for this link
+ * @elems_len: length of the elements
+ */
+struct cfg80211_assoc_link {
+ struct cfg80211_bss *bss;
+ const u8 *elems;
+ size_t elems_len;
+};
+
+/**
+ * enum cfg80211_assoc_req_flags - Over-ride default behaviour in association.
+ *
+ * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n)
+ * @ASSOC_REQ_DISABLE_VHT: Disable VHT
+ * @ASSOC_REQ_USE_RRM: Declare RRM capability in this association
+ * @CONNECT_REQ_EXTERNAL_AUTH_SUPPORT: User space indicates external
+ * authentication capability. Drivers can offload authentication to
+ * userspace if this flag is set. Only applicable for cfg80211_connect()
+ * request (connect callback).
+ * @ASSOC_REQ_DISABLE_HE: Disable HE
+ * @ASSOC_REQ_DISABLE_EHT: Disable EHT
+ * @CONNECT_REQ_MLO_SUPPORT: Userspace indicates support for handling MLD links.
+ * Drivers shall disable MLO features for the current association if this
+ * flag is not set.
+ */
+enum cfg80211_assoc_req_flags {
+ ASSOC_REQ_DISABLE_HT = BIT(0),
+ ASSOC_REQ_DISABLE_VHT = BIT(1),
+ ASSOC_REQ_USE_RRM = BIT(2),
+ CONNECT_REQ_EXTERNAL_AUTH_SUPPORT = BIT(3),
+ ASSOC_REQ_DISABLE_HE = BIT(4),
+ ASSOC_REQ_DISABLE_EHT = BIT(5),
+ CONNECT_REQ_MLO_SUPPORT = BIT(6),
+};
+
+/**
+ * struct cfg80211_assoc_request - (Re)Association request data
+ *
+ * This structure provides information needed to complete IEEE 802.11
+ * (re)association.
+ * @bss: The BSS to associate with. If the call is successful the driver is
+ * given a reference that it must give back to cfg80211_send_rx_assoc()
+ * or to cfg80211_assoc_timeout(). To ensure proper refcounting, new
+ * association requests while already associating must be rejected.
+ * This also applies to the @links.bss parameter, which is used instead
+ * of this one (it is %NULL) for MLO associations.
+ * @ie: Extra IEs to add to (Re)Association Request frame or %NULL
+ * @ie_len: Length of ie buffer in octets
+ * @use_mfp: Use management frame protection (IEEE 802.11w) in this association
+ * @crypto: crypto settings
+ * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used
+ * to indicate a request to reassociate within the ESS instead of a request
+ * do the initial association with the ESS. When included, this is set to
+ * the BSSID of the current association, i.e., to the value that is
+ * included in the Current AP address field of the Reassociation Request
+ * frame.
+ * @flags: See &enum cfg80211_assoc_req_flags
+ * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
+ * will be used in ht_capa. Un-supported values will be ignored.
+ * @ht_capa_mask: The bits of ht_capa which are to be used.
+ * @vht_capa: VHT capability override
+ * @vht_capa_mask: VHT capability mask indicating which fields to use
+ * @fils_kek: FILS KEK for protecting (Re)Association Request/Response frame or
+ * %NULL if FILS is not used.
+ * @fils_kek_len: Length of fils_kek in octets
+ * @fils_nonces: FILS nonces (part of AAD) for protecting (Re)Association
+ * Request/Response frame or %NULL if FILS is not used. This field starts
+ * with 16 octets of STA Nonce followed by 16 octets of AP Nonce.
+ * @s1g_capa: S1G capability override
+ * @s1g_capa_mask: S1G capability override mask
+ * @links: per-link information for MLO connections
+ * @link_id: >= 0 for MLO connections, where links are given, and indicates
+ * the link on which the association request should be sent
+ * @ap_mld_addr: AP MLD address in case of MLO association request,
+ * valid iff @link_id >= 0
+ */
+struct cfg80211_assoc_request {
+ struct cfg80211_bss *bss;
+ const u8 *ie, *prev_bssid;
+ size_t ie_len;
+ struct cfg80211_crypto_settings crypto;
+ bool use_mfp;
+ u32 flags;
+ struct ieee80211_ht_cap ht_capa;
+ struct ieee80211_ht_cap ht_capa_mask;
+ struct ieee80211_vht_cap vht_capa, vht_capa_mask;
+ const u8 *fils_kek;
+ size_t fils_kek_len;
+ const u8 *fils_nonces;
+ struct ieee80211_s1g_cap s1g_capa, s1g_capa_mask;
+ struct cfg80211_assoc_link links[IEEE80211_MLD_MAX_NUM_LINKS];
+ const u8 *ap_mld_addr;
+ s8 link_id;
+};
+
+/**
+ * struct cfg80211_deauth_request - Deauthentication request data
+ *
+ * This structure provides information needed to complete IEEE 802.11
+ * deauthentication.
+ *
+ * @bssid: the BSSID or AP MLD address to deauthenticate from
+ * @ie: Extra IEs to add to Deauthentication frame or %NULL
+ * @ie_len: Length of ie buffer in octets
+ * @reason_code: The reason code for the deauthentication
+ * @local_state_change: if set, change local state only and
+ * do not set a deauth frame
+ */
+struct cfg80211_deauth_request {
+ const u8 *bssid;
+ const u8 *ie;
+ size_t ie_len;
+ u16 reason_code;
+ bool local_state_change;
+};
+
+/**
+ * struct cfg80211_disassoc_request - Disassociation request data
+ *
+ * This structure provides information needed to complete IEEE 802.11
+ * disassociation.
+ *
+ * @ap_addr: the BSSID or AP MLD address to disassociate from
+ * @ie: Extra IEs to add to Disassociation frame or %NULL
+ * @ie_len: Length of ie buffer in octets
+ * @reason_code: The reason code for the disassociation
+ * @local_state_change: This is a request for a local state only, i.e., no
+ * Disassociation frame is to be transmitted.
+ */
+struct cfg80211_disassoc_request {
+ const u8 *ap_addr;
+ const u8 *ie;
+ size_t ie_len;
+ u16 reason_code;
+ bool local_state_change;
+};
+
+/**
+ * struct cfg80211_ibss_params - IBSS parameters
+ *
+ * This structure defines the IBSS parameters for the join_ibss()
+ * method.
+ *
+ * @ssid: The SSID, will always be non-null.
+ * @ssid_len: The length of the SSID, will always be non-zero.
+ * @bssid: Fixed BSSID requested, maybe be %NULL, if set do not
+ * search for IBSSs with a different BSSID.
+ * @chandef: defines the channel to use if no other IBSS to join can be found
+ * @channel_fixed: The channel should be fixed -- do not search for
+ * IBSSs to join on other channels.
+ * @ie: information element(s) to include in the beacon
+ * @ie_len: length of that
+ * @beacon_interval: beacon interval to use
+ * @privacy: this is a protected network, keys will be configured
+ * after joining
+ * @control_port: whether user space controls IEEE 802.1X port, i.e.,
+ * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is
+ * required to assume that the port is unauthorized until authorized by
+ * user space. Otherwise, port is marked authorized by default.
+ * @control_port_over_nl80211: TRUE if userspace expects to exchange control
+ * port frames over NL80211 instead of the network interface.
+ * @userspace_handles_dfs: whether user space controls DFS operation, i.e.
+ * changes the channel when a radar is detected. This is required
+ * to operate on DFS channels.
+ * @basic_rates: bitmap of basic rates to use when creating the IBSS
+ * @mcast_rate: per-band multicast rate index + 1 (0: disabled)
+ * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
+ * will be used in ht_capa. Un-supported values will be ignored.
+ * @ht_capa_mask: The bits of ht_capa which are to be used.
+ * @wep_keys: static WEP keys, if not NULL points to an array of
+ * CFG80211_MAX_WEP_KEYS WEP keys
+ * @wep_tx_key: key index (0..3) of the default TX static WEP key
+ */
+struct cfg80211_ibss_params {
+ const u8 *ssid;
+ const u8 *bssid;
+ struct cfg80211_chan_def chandef;
+ const u8 *ie;
+ u8 ssid_len, ie_len;
+ u16 beacon_interval;
+ u32 basic_rates;
+ bool channel_fixed;
+ bool privacy;
+ bool control_port;
+ bool control_port_over_nl80211;
+ bool userspace_handles_dfs;
+ int mcast_rate[NUM_NL80211_BANDS];
+ struct ieee80211_ht_cap ht_capa;
+ struct ieee80211_ht_cap ht_capa_mask;
+ struct key_params *wep_keys;
+ int wep_tx_key;
+};
+
+/**
+ * struct cfg80211_bss_selection - connection parameters for BSS selection.
+ *
+ * @behaviour: requested BSS selection behaviour.
+ * @param: parameters for requestion behaviour.
+ * @band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF.
+ * @adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST.
+ */
+struct cfg80211_bss_selection {
+ enum nl80211_bss_select_attr behaviour;
+ union {
+ enum nl80211_band band_pref;
+ struct cfg80211_bss_select_adjust adjust;
+ } param;
+};
+
+/**
+ * struct cfg80211_connect_params - Connection parameters
+ *
+ * This structure provides information needed to complete IEEE 802.11
+ * authentication and association.
+ *
+ * @channel: The channel to use or %NULL if not specified (auto-select based
+ * on scan results)
+ * @channel_hint: The channel of the recommended BSS for initial connection or
+ * %NULL if not specified
+ * @bssid: The AP BSSID or %NULL if not specified (auto-select based on scan
+ * results)
+ * @bssid_hint: The recommended AP BSSID for initial connection to the BSS or
+ * %NULL if not specified. Unlike the @bssid parameter, the driver is
+ * allowed to ignore this @bssid_hint if it has knowledge of a better BSS
+ * to use.
+ * @ssid: SSID
+ * @ssid_len: Length of ssid in octets
+ * @auth_type: Authentication type (algorithm)
+ * @ie: IEs for association request
+ * @ie_len: Length of assoc_ie in octets
+ * @privacy: indicates whether privacy-enabled APs should be used
+ * @mfp: indicate whether management frame protection is used
+ * @crypto: crypto settings
+ * @key_len: length of WEP key for shared key authentication
+ * @key_idx: index of WEP key for shared key authentication
+ * @key: WEP key for shared key authentication
+ * @flags: See &enum cfg80211_assoc_req_flags
+ * @bg_scan_period: Background scan period in seconds
+ * or -1 to indicate that default value is to be used.
+ * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
+ * will be used in ht_capa. Un-supported values will be ignored.
+ * @ht_capa_mask: The bits of ht_capa which are to be used.
+ * @vht_capa: VHT Capability overrides
+ * @vht_capa_mask: The bits of vht_capa which are to be used.
+ * @pbss: if set, connect to a PCP instead of AP. Valid for DMG
+ * networks.
+ * @bss_select: criteria to be used for BSS selection.
+ * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used
+ * to indicate a request to reassociate within the ESS instead of a request
+ * do the initial association with the ESS. When included, this is set to
+ * the BSSID of the current association, i.e., to the value that is
+ * included in the Current AP address field of the Reassociation Request
+ * frame.
+ * @fils_erp_username: EAP re-authentication protocol (ERP) username part of the
+ * NAI or %NULL if not specified. This is used to construct FILS wrapped
+ * data IE.
+ * @fils_erp_username_len: Length of @fils_erp_username in octets.
+ * @fils_erp_realm: EAP re-authentication protocol (ERP) realm part of NAI or
+ * %NULL if not specified. This specifies the domain name of ER server and
+ * is used to construct FILS wrapped data IE.
+ * @fils_erp_realm_len: Length of @fils_erp_realm in octets.
+ * @fils_erp_next_seq_num: The next sequence number to use in the FILS ERP
+ * messages. This is also used to construct FILS wrapped data IE.
+ * @fils_erp_rrk: ERP re-authentication Root Key (rRK) used to derive additional
+ * keys in FILS or %NULL if not specified.
+ * @fils_erp_rrk_len: Length of @fils_erp_rrk in octets.
+ * @want_1x: indicates user-space supports and wants to use 802.1X driver
+ * offload of 4-way handshake.
+ * @edmg: define the EDMG channels.
+ * This may specify multiple channels and bonding options for the driver
+ * to choose from, based on BSS configuration.
+ */
+struct cfg80211_connect_params {
+ struct ieee80211_channel *channel;
+ struct ieee80211_channel *channel_hint;
+ const u8 *bssid;
+ const u8 *bssid_hint;
+ const u8 *ssid;
+ size_t ssid_len;
+ enum nl80211_auth_type auth_type;
+ const u8 *ie;
+ size_t ie_len;
+ bool privacy;
+ enum nl80211_mfp mfp;
+ struct cfg80211_crypto_settings crypto;
+ const u8 *key;
+ u8 key_len, key_idx;
+ u32 flags;
+ int bg_scan_period;
+ struct ieee80211_ht_cap ht_capa;
+ struct ieee80211_ht_cap ht_capa_mask;
+ struct ieee80211_vht_cap vht_capa;
+ struct ieee80211_vht_cap vht_capa_mask;
+ bool pbss;
+ struct cfg80211_bss_selection bss_select;
+ const u8 *prev_bssid;
+ const u8 *fils_erp_username;
+ size_t fils_erp_username_len;
+ const u8 *fils_erp_realm;
+ size_t fils_erp_realm_len;
+ u16 fils_erp_next_seq_num;
+ const u8 *fils_erp_rrk;
+ size_t fils_erp_rrk_len;
+ bool want_1x;
+ struct ieee80211_edmg edmg;
+};
+
+/**
+ * enum cfg80211_connect_params_changed - Connection parameters being updated
+ *
+ * This enum provides information of all connect parameters that
+ * have to be updated as part of update_connect_params() call.
+ *
+ * @UPDATE_ASSOC_IES: Indicates whether association request IEs are updated
+ * @UPDATE_FILS_ERP_INFO: Indicates that FILS connection parameters (realm,
+ * username, erp sequence number and rrk) are updated
+ * @UPDATE_AUTH_TYPE: Indicates that authentication type is updated
+ */
+enum cfg80211_connect_params_changed {
+ UPDATE_ASSOC_IES = BIT(0),
+ UPDATE_FILS_ERP_INFO = BIT(1),
+ UPDATE_AUTH_TYPE = BIT(2),
+};
+
+/**
+ * enum wiphy_params_flags - set_wiphy_params bitfield values
+ * @WIPHY_PARAM_RETRY_SHORT: wiphy->retry_short has changed
+ * @WIPHY_PARAM_RETRY_LONG: wiphy->retry_long has changed
+ * @WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed
+ * @WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed
+ * @WIPHY_PARAM_COVERAGE_CLASS: coverage class changed
+ * @WIPHY_PARAM_DYN_ACK: dynack has been enabled
+ * @WIPHY_PARAM_TXQ_LIMIT: TXQ packet limit has been changed
+ * @WIPHY_PARAM_TXQ_MEMORY_LIMIT: TXQ memory limit has been changed
+ * @WIPHY_PARAM_TXQ_QUANTUM: TXQ scheduler quantum
+ */
+enum wiphy_params_flags {
+ WIPHY_PARAM_RETRY_SHORT = 1 << 0,
+ WIPHY_PARAM_RETRY_LONG = 1 << 1,
+ WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2,
+ WIPHY_PARAM_RTS_THRESHOLD = 1 << 3,
+ WIPHY_PARAM_COVERAGE_CLASS = 1 << 4,
+ WIPHY_PARAM_DYN_ACK = 1 << 5,
+ WIPHY_PARAM_TXQ_LIMIT = 1 << 6,
+ WIPHY_PARAM_TXQ_MEMORY_LIMIT = 1 << 7,
+ WIPHY_PARAM_TXQ_QUANTUM = 1 << 8,
+};
+
+#define IEEE80211_DEFAULT_AIRTIME_WEIGHT 256
+
+/* The per TXQ device queue limit in airtime */
+#define IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L 5000
+#define IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H 12000
+
+/* The per interface airtime threshold to switch to lower queue limit */
+#define IEEE80211_AQL_THRESHOLD 24000
+
+/**
+ * struct cfg80211_pmksa - PMK Security Association
+ *
+ * This structure is passed to the set/del_pmksa() method for PMKSA
+ * caching.
+ *
+ * @bssid: The AP's BSSID (may be %NULL).
+ * @pmkid: The identifier to refer a PMKSA.
+ * @pmk: The PMK for the PMKSA identified by @pmkid. This is used for key
+ * derivation by a FILS STA. Otherwise, %NULL.
+ * @pmk_len: Length of the @pmk. The length of @pmk can differ depending on
+ * the hash algorithm used to generate this.
+ * @ssid: SSID to specify the ESS within which a PMKSA is valid when using FILS
+ * cache identifier (may be %NULL).
+ * @ssid_len: Length of the @ssid in octets.
+ * @cache_id: 2-octet cache identifier advertized by a FILS AP identifying the
+ * scope of PMKSA. This is valid only if @ssid_len is non-zero (may be
+ * %NULL).
+ * @pmk_lifetime: Maximum lifetime for PMKSA in seconds
+ * (dot11RSNAConfigPMKLifetime) or 0 if not specified.
+ * The configured PMKSA must not be used for PMKSA caching after
+ * expiration and any keys derived from this PMK become invalid on
+ * expiration, i.e., the current association must be dropped if the PMK
+ * used for it expires.
+ * @pmk_reauth_threshold: Threshold time for reauthentication (percentage of
+ * PMK lifetime, dot11RSNAConfigPMKReauthThreshold) or 0 if not specified.
+ * Drivers are expected to trigger a full authentication instead of using
+ * this PMKSA for caching when reassociating to a new BSS after this
+ * threshold to generate a new PMK before the current one expires.
+ */
+struct cfg80211_pmksa {
+ const u8 *bssid;
+ const u8 *pmkid;
+ const u8 *pmk;
+ size_t pmk_len;
+ const u8 *ssid;
+ size_t ssid_len;
+ const u8 *cache_id;
+ u32 pmk_lifetime;
+ u8 pmk_reauth_threshold;
+};
+
+/**
+ * struct cfg80211_pkt_pattern - packet pattern
+ * @mask: bitmask where to match pattern and where to ignore bytes,
+ * one bit per byte, in same format as nl80211
+ * @pattern: bytes to match where bitmask is 1
+ * @pattern_len: length of pattern (in bytes)
+ * @pkt_offset: packet offset (in bytes)
+ *
+ * Internal note: @mask and @pattern are allocated in one chunk of
+ * memory, free @mask only!
+ */
+struct cfg80211_pkt_pattern {
+ const u8 *mask, *pattern;
+ int pattern_len;
+ int pkt_offset;
+};
+
+/**
+ * struct cfg80211_wowlan_tcp - TCP connection parameters
+ *
+ * @sock: (internal) socket for source port allocation
+ * @src: source IP address
+ * @dst: destination IP address
+ * @dst_mac: destination MAC address
+ * @src_port: source port
+ * @dst_port: destination port
+ * @payload_len: data payload length
+ * @payload: data payload buffer
+ * @payload_seq: payload sequence stamping configuration
+ * @data_interval: interval at which to send data packets
+ * @wake_len: wakeup payload match length
+ * @wake_data: wakeup payload match data
+ * @wake_mask: wakeup payload match mask
+ * @tokens_size: length of the tokens buffer
+ * @payload_tok: payload token usage configuration
+ */
+struct cfg80211_wowlan_tcp {
+ struct socket *sock;
+ __be32 src, dst;
+ u16 src_port, dst_port;
+ u8 dst_mac[ETH_ALEN];
+ int payload_len;
+ const u8 *payload;
+ struct nl80211_wowlan_tcp_data_seq payload_seq;
+ u32 data_interval;
+ u32 wake_len;
+ const u8 *wake_data, *wake_mask;
+ u32 tokens_size;
+ /* must be last, variable member */
+ struct nl80211_wowlan_tcp_data_token payload_tok;
+};
+
+/**
+ * struct cfg80211_wowlan - Wake on Wireless-LAN support info
+ *
+ * This structure defines the enabled WoWLAN triggers for the device.
+ * @any: wake up on any activity -- special trigger if device continues
+ * operating as normal during suspend
+ * @disconnect: wake up if getting disconnected
+ * @magic_pkt: wake up on receiving magic packet
+ * @patterns: wake up on receiving packet matching a pattern
+ * @n_patterns: number of patterns
+ * @gtk_rekey_failure: wake up on GTK rekey failure
+ * @eap_identity_req: wake up on EAP identity request packet
+ * @four_way_handshake: wake up on 4-way handshake
+ * @rfkill_release: wake up when rfkill is released
+ * @tcp: TCP connection establishment/wakeup parameters, see nl80211.h.
+ * NULL if not configured.
+ * @nd_config: configuration for the scan to be used for net detect wake.
+ */
+struct cfg80211_wowlan {
+ bool any, disconnect, magic_pkt, gtk_rekey_failure,
+ eap_identity_req, four_way_handshake,
+ rfkill_release;
+ struct cfg80211_pkt_pattern *patterns;
+ struct cfg80211_wowlan_tcp *tcp;
+ int n_patterns;
+ struct cfg80211_sched_scan_request *nd_config;
+};
+
+/**
+ * struct cfg80211_coalesce_rules - Coalesce rule parameters
+ *
+ * This structure defines coalesce rule for the device.
+ * @delay: maximum coalescing delay in msecs.
+ * @condition: condition for packet coalescence.
+ * see &enum nl80211_coalesce_condition.
+ * @patterns: array of packet patterns
+ * @n_patterns: number of patterns
+ */
+struct cfg80211_coalesce_rules {
+ int delay;
+ enum nl80211_coalesce_condition condition;
+ struct cfg80211_pkt_pattern *patterns;
+ int n_patterns;
+};
+
+/**
+ * struct cfg80211_coalesce - Packet coalescing settings
+ *
+ * This structure defines coalescing settings.
+ * @rules: array of coalesce rules
+ * @n_rules: number of rules
+ */
+struct cfg80211_coalesce {
+ struct cfg80211_coalesce_rules *rules;
+ int n_rules;
+};
+
+/**
+ * struct cfg80211_wowlan_nd_match - information about the match
+ *
+ * @ssid: SSID of the match that triggered the wake up
+ * @n_channels: Number of channels where the match occurred. This
+ * value may be zero if the driver can't report the channels.
+ * @channels: center frequencies of the channels where a match
+ * occurred (in MHz)
+ */
+struct cfg80211_wowlan_nd_match {
+ struct cfg80211_ssid ssid;
+ int n_channels;
+ u32 channels[];
+};
+
+/**
+ * struct cfg80211_wowlan_nd_info - net detect wake up information
+ *
+ * @n_matches: Number of match information instances provided in
+ * @matches. This value may be zero if the driver can't provide
+ * match information.
+ * @matches: Array of pointers to matches containing information about
+ * the matches that triggered the wake up.
+ */
+struct cfg80211_wowlan_nd_info {
+ int n_matches;
+ struct cfg80211_wowlan_nd_match *matches[];
+};
+
+/**
+ * struct cfg80211_wowlan_wakeup - wakeup report
+ * @disconnect: woke up by getting disconnected
+ * @magic_pkt: woke up by receiving magic packet
+ * @gtk_rekey_failure: woke up by GTK rekey failure
+ * @eap_identity_req: woke up by EAP identity request packet
+ * @four_way_handshake: woke up by 4-way handshake
+ * @rfkill_release: woke up by rfkill being released
+ * @pattern_idx: pattern that caused wakeup, -1 if not due to pattern
+ * @packet_present_len: copied wakeup packet data
+ * @packet_len: original wakeup packet length
+ * @packet: The packet causing the wakeup, if any.
+ * @packet_80211: For pattern match, magic packet and other data
+ * frame triggers an 802.3 frame should be reported, for
+ * disconnect due to deauth 802.11 frame. This indicates which
+ * it is.
+ * @tcp_match: TCP wakeup packet received
+ * @tcp_connlost: TCP connection lost or failed to establish
+ * @tcp_nomoretokens: TCP data ran out of tokens
+ * @net_detect: if not %NULL, woke up because of net detect
+ */
+struct cfg80211_wowlan_wakeup {
+ bool disconnect, magic_pkt, gtk_rekey_failure,
+ eap_identity_req, four_way_handshake,
+ rfkill_release, packet_80211,
+ tcp_match, tcp_connlost, tcp_nomoretokens;
+ s32 pattern_idx;
+ u32 packet_present_len, packet_len;
+ const void *packet;
+ struct cfg80211_wowlan_nd_info *net_detect;
+};
+
+/**
+ * struct cfg80211_gtk_rekey_data - rekey data
+ * @kek: key encryption key (@kek_len bytes)
+ * @kck: key confirmation key (@kck_len bytes)
+ * @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes)
+ * @kek_len: length of kek
+ * @kck_len: length of kck
+ * @akm: akm (oui, id)
+ */
+struct cfg80211_gtk_rekey_data {
+ const u8 *kek, *kck, *replay_ctr;
+ u32 akm;
+ u8 kek_len, kck_len;
+};
+
+/**
+ * struct cfg80211_update_ft_ies_params - FT IE Information
+ *
+ * This structure provides information needed to update the fast transition IE
+ *
+ * @md: The Mobility Domain ID, 2 Octet value
+ * @ie: Fast Transition IEs
+ * @ie_len: Length of ft_ie in octets
+ */
+struct cfg80211_update_ft_ies_params {
+ u16 md;
+ const u8 *ie;
+ size_t ie_len;
+};
+
+/**
+ * struct cfg80211_mgmt_tx_params - mgmt tx parameters
+ *
+ * This structure provides information needed to transmit a mgmt frame
+ *
+ * @chan: channel to use
+ * @offchan: indicates wether off channel operation is required
+ * @wait: duration for ROC
+ * @buf: buffer to transmit
+ * @len: buffer length
+ * @no_cck: don't use cck rates for this frame
+ * @dont_wait_for_ack: tells the low level not to wait for an ack
+ * @n_csa_offsets: length of csa_offsets array
+ * @csa_offsets: array of all the csa offsets in the frame
+ * @link_id: for MLO, the link ID to transmit on, -1 if not given; note
+ * that the link ID isn't validated (much), it's in range but the
+ * link might not exist (or be used by the receiver STA)
+ */
+struct cfg80211_mgmt_tx_params {
+ struct ieee80211_channel *chan;
+ bool offchan;
+ unsigned int wait;
+ const u8 *buf;
+ size_t len;
+ bool no_cck;
+ bool dont_wait_for_ack;
+ int n_csa_offsets;
+ const u16 *csa_offsets;
+ int link_id;
+};
+
+/**
+ * struct cfg80211_dscp_exception - DSCP exception
+ *
+ * @dscp: DSCP value that does not adhere to the user priority range definition
+ * @up: user priority value to which the corresponding DSCP value belongs
+ */
+struct cfg80211_dscp_exception {
+ u8 dscp;
+ u8 up;
+};
+
+/**
+ * struct cfg80211_dscp_range - DSCP range definition for user priority
+ *
+ * @low: lowest DSCP value of this user priority range, inclusive
+ * @high: highest DSCP value of this user priority range, inclusive
+ */
+struct cfg80211_dscp_range {
+ u8 low;
+ u8 high;
+};
+
+/* QoS Map Set element length defined in IEEE Std 802.11-2012, 8.4.2.97 */
+#define IEEE80211_QOS_MAP_MAX_EX 21
+#define IEEE80211_QOS_MAP_LEN_MIN 16
+#define IEEE80211_QOS_MAP_LEN_MAX \
+ (IEEE80211_QOS_MAP_LEN_MIN + 2 * IEEE80211_QOS_MAP_MAX_EX)
+
+/**
+ * struct cfg80211_qos_map - QoS Map Information
+ *
+ * This struct defines the Interworking QoS map setting for DSCP values
+ *
+ * @num_des: number of DSCP exceptions (0..21)
+ * @dscp_exception: optionally up to maximum of 21 DSCP exceptions from
+ * the user priority DSCP range definition
+ * @up: DSCP range definition for a particular user priority
+ */
+struct cfg80211_qos_map {
+ u8 num_des;
+ struct cfg80211_dscp_exception dscp_exception[IEEE80211_QOS_MAP_MAX_EX];
+ struct cfg80211_dscp_range up[8];
+};
+
+/**
+ * struct cfg80211_nan_conf - NAN configuration
+ *
+ * This struct defines NAN configuration parameters
+ *
+ * @master_pref: master preference (1 - 255)
+ * @bands: operating bands, a bitmap of &enum nl80211_band values.
+ * For instance, for NL80211_BAND_2GHZ, bit 0 would be set
+ * (i.e. BIT(NL80211_BAND_2GHZ)).
+ */
+struct cfg80211_nan_conf {
+ u8 master_pref;
+ u8 bands;
+};
+
+/**
+ * enum cfg80211_nan_conf_changes - indicates changed fields in NAN
+ * configuration
+ *
+ * @CFG80211_NAN_CONF_CHANGED_PREF: master preference
+ * @CFG80211_NAN_CONF_CHANGED_BANDS: operating bands
+ */
+enum cfg80211_nan_conf_changes {
+ CFG80211_NAN_CONF_CHANGED_PREF = BIT(0),
+ CFG80211_NAN_CONF_CHANGED_BANDS = BIT(1),
+};
+
+/**
+ * struct cfg80211_nan_func_filter - a NAN function Rx / Tx filter
+ *
+ * @filter: the content of the filter
+ * @len: the length of the filter
+ */
+struct cfg80211_nan_func_filter {
+ const u8 *filter;
+ u8 len;
+};
+
+/**
+ * struct cfg80211_nan_func - a NAN function
+ *
+ * @type: &enum nl80211_nan_function_type
+ * @service_id: the service ID of the function
+ * @publish_type: &nl80211_nan_publish_type
+ * @close_range: if true, the range should be limited. Threshold is
+ * implementation specific.
+ * @publish_bcast: if true, the solicited publish should be broadcasted
+ * @subscribe_active: if true, the subscribe is active
+ * @followup_id: the instance ID for follow up
+ * @followup_reqid: the requestor instance ID for follow up
+ * @followup_dest: MAC address of the recipient of the follow up
+ * @ttl: time to live counter in DW.
+ * @serv_spec_info: Service Specific Info
+ * @serv_spec_info_len: Service Specific Info length
+ * @srf_include: if true, SRF is inclusive
+ * @srf_bf: Bloom Filter
+ * @srf_bf_len: Bloom Filter length
+ * @srf_bf_idx: Bloom Filter index
+ * @srf_macs: SRF MAC addresses
+ * @srf_num_macs: number of MAC addresses in SRF
+ * @rx_filters: rx filters that are matched with corresponding peer's tx_filter
+ * @tx_filters: filters that should be transmitted in the SDF.
+ * @num_rx_filters: length of &rx_filters.
+ * @num_tx_filters: length of &tx_filters.
+ * @instance_id: driver allocated id of the function.
+ * @cookie: unique NAN function identifier.
+ */
+struct cfg80211_nan_func {
+ enum nl80211_nan_function_type type;
+ u8 service_id[NL80211_NAN_FUNC_SERVICE_ID_LEN];
+ u8 publish_type;
+ bool close_range;
+ bool publish_bcast;
+ bool subscribe_active;
+ u8 followup_id;
+ u8 followup_reqid;
+ struct mac_address followup_dest;
+ u32 ttl;
+ const u8 *serv_spec_info;
+ u8 serv_spec_info_len;
+ bool srf_include;
+ const u8 *srf_bf;
+ u8 srf_bf_len;
+ u8 srf_bf_idx;
+ struct mac_address *srf_macs;
+ int srf_num_macs;
+ struct cfg80211_nan_func_filter *rx_filters;
+ struct cfg80211_nan_func_filter *tx_filters;
+ u8 num_tx_filters;
+ u8 num_rx_filters;
+ u8 instance_id;
+ u64 cookie;
+};
+
+/**
+ * struct cfg80211_pmk_conf - PMK configuration
+ *
+ * @aa: authenticator address
+ * @pmk_len: PMK length in bytes.
+ * @pmk: the PMK material
+ * @pmk_r0_name: PMK-R0 Name. NULL if not applicable (i.e., the PMK
+ * is not PMK-R0). When pmk_r0_name is not NULL, the pmk field
+ * holds PMK-R0.
+ */
+struct cfg80211_pmk_conf {
+ const u8 *aa;
+ u8 pmk_len;
+ const u8 *pmk;
+ const u8 *pmk_r0_name;
+};
+
+/**
+ * struct cfg80211_external_auth_params - Trigger External authentication.
+ *
+ * Commonly used across the external auth request and event interfaces.
+ *
+ * @action: action type / trigger for external authentication. Only significant
+ * for the authentication request event interface (driver to user space).
+ * @bssid: BSSID of the peer with which the authentication has
+ * to happen. Used by both the authentication request event and
+ * authentication response command interface.
+ * @ssid: SSID of the AP. Used by both the authentication request event and
+ * authentication response command interface.
+ * @key_mgmt_suite: AKM suite of the respective authentication. Used by the
+ * authentication request event interface.
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful authentication,
+ * use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space cannot give you
+ * the real status code for failures. Used only for the authentication
+ * response command interface (user space to driver).
+ * @pmkid: The identifier to refer a PMKSA.
+ */
+struct cfg80211_external_auth_params {
+ enum nl80211_external_auth_action action;
+ u8 bssid[ETH_ALEN] __aligned(2);
+ struct cfg80211_ssid ssid;
+ unsigned int key_mgmt_suite;
+ u16 status;
+ const u8 *pmkid;
+};
+
+/**
+ * struct cfg80211_ftm_responder_stats - FTM responder statistics
+ *
+ * @filled: bitflag of flags using the bits of &enum nl80211_ftm_stats to
+ * indicate the relevant values in this struct for them
+ * @success_num: number of FTM sessions in which all frames were successfully
+ * answered
+ * @partial_num: number of FTM sessions in which part of frames were
+ * successfully answered
+ * @failed_num: number of failed FTM sessions
+ * @asap_num: number of ASAP FTM sessions
+ * @non_asap_num: number of non-ASAP FTM sessions
+ * @total_duration_ms: total sessions durations - gives an indication
+ * of how much time the responder was busy
+ * @unknown_triggers_num: number of unknown FTM triggers - triggers from
+ * initiators that didn't finish successfully the negotiation phase with
+ * the responder
+ * @reschedule_requests_num: number of FTM reschedule requests - initiator asks
+ * for a new scheduling although it already has scheduled FTM slot
+ * @out_of_window_triggers_num: total FTM triggers out of scheduled window
+ */
+struct cfg80211_ftm_responder_stats {
+ u32 filled;
+ u32 success_num;
+ u32 partial_num;
+ u32 failed_num;
+ u32 asap_num;
+ u32 non_asap_num;
+ u64 total_duration_ms;
+ u32 unknown_triggers_num;
+ u32 reschedule_requests_num;
+ u32 out_of_window_triggers_num;
+};
+
+/**
+ * struct cfg80211_pmsr_ftm_result - FTM result
+ * @failure_reason: if this measurement failed (PMSR status is
+ * %NL80211_PMSR_STATUS_FAILURE), this gives a more precise
+ * reason than just "failure"
+ * @burst_index: if reporting partial results, this is the index
+ * in [0 .. num_bursts-1] of the burst that's being reported
+ * @num_ftmr_attempts: number of FTM request frames transmitted
+ * @num_ftmr_successes: number of FTM request frames acked
+ * @busy_retry_time: if failure_reason is %NL80211_PMSR_FTM_FAILURE_PEER_BUSY,
+ * fill this to indicate in how many seconds a retry is deemed possible
+ * by the responder
+ * @num_bursts_exp: actual number of bursts exponent negotiated
+ * @burst_duration: actual burst duration negotiated
+ * @ftms_per_burst: actual FTMs per burst negotiated
+ * @lci_len: length of LCI information (if present)
+ * @civicloc_len: length of civic location information (if present)
+ * @lci: LCI data (may be %NULL)
+ * @civicloc: civic location data (may be %NULL)
+ * @rssi_avg: average RSSI over FTM action frames reported
+ * @rssi_spread: spread of the RSSI over FTM action frames reported
+ * @tx_rate: bitrate for transmitted FTM action frame response
+ * @rx_rate: bitrate of received FTM action frame
+ * @rtt_avg: average of RTTs measured (must have either this or @dist_avg)
+ * @rtt_variance: variance of RTTs measured (note that standard deviation is
+ * the square root of the variance)
+ * @rtt_spread: spread of the RTTs measured
+ * @dist_avg: average of distances (mm) measured
+ * (must have either this or @rtt_avg)
+ * @dist_variance: variance of distances measured (see also @rtt_variance)
+ * @dist_spread: spread of distances measured (see also @rtt_spread)
+ * @num_ftmr_attempts_valid: @num_ftmr_attempts is valid
+ * @num_ftmr_successes_valid: @num_ftmr_successes is valid
+ * @rssi_avg_valid: @rssi_avg is valid
+ * @rssi_spread_valid: @rssi_spread is valid
+ * @tx_rate_valid: @tx_rate is valid
+ * @rx_rate_valid: @rx_rate is valid
+ * @rtt_avg_valid: @rtt_avg is valid
+ * @rtt_variance_valid: @rtt_variance is valid
+ * @rtt_spread_valid: @rtt_spread is valid
+ * @dist_avg_valid: @dist_avg is valid
+ * @dist_variance_valid: @dist_variance is valid
+ * @dist_spread_valid: @dist_spread is valid
+ */
+struct cfg80211_pmsr_ftm_result {
+ const u8 *lci;
+ const u8 *civicloc;
+ unsigned int lci_len;
+ unsigned int civicloc_len;
+ enum nl80211_peer_measurement_ftm_failure_reasons failure_reason;
+ u32 num_ftmr_attempts, num_ftmr_successes;
+ s16 burst_index;
+ u8 busy_retry_time;
+ u8 num_bursts_exp;
+ u8 burst_duration;
+ u8 ftms_per_burst;
+ s32 rssi_avg;
+ s32 rssi_spread;
+ struct rate_info tx_rate, rx_rate;
+ s64 rtt_avg;
+ s64 rtt_variance;
+ s64 rtt_spread;
+ s64 dist_avg;
+ s64 dist_variance;
+ s64 dist_spread;
+
+ u16 num_ftmr_attempts_valid:1,
+ num_ftmr_successes_valid:1,
+ rssi_avg_valid:1,
+ rssi_spread_valid:1,
+ tx_rate_valid:1,
+ rx_rate_valid:1,
+ rtt_avg_valid:1,
+ rtt_variance_valid:1,
+ rtt_spread_valid:1,
+ dist_avg_valid:1,
+ dist_variance_valid:1,
+ dist_spread_valid:1;
+};
+
+/**
+ * struct cfg80211_pmsr_result - peer measurement result
+ * @addr: address of the peer
+ * @host_time: host time (use ktime_get_boottime() adjust to the time when the
+ * measurement was made)
+ * @ap_tsf: AP's TSF at measurement time
+ * @status: status of the measurement
+ * @final: if reporting partial results, mark this as the last one; if not
+ * reporting partial results always set this flag
+ * @ap_tsf_valid: indicates the @ap_tsf value is valid
+ * @type: type of the measurement reported, note that we only support reporting
+ * one type at a time, but you can report multiple results separately and
+ * they're all aggregated for userspace.
+ * @ftm: FTM result
+ */
+struct cfg80211_pmsr_result {
+ u64 host_time, ap_tsf;
+ enum nl80211_peer_measurement_status status;
+
+ u8 addr[ETH_ALEN];
+
+ u8 final:1,
+ ap_tsf_valid:1;
+
+ enum nl80211_peer_measurement_type type;
+
+ union {
+ struct cfg80211_pmsr_ftm_result ftm;
+ };
+};
+
+/**
+ * struct cfg80211_pmsr_ftm_request_peer - FTM request data
+ * @requested: indicates FTM is requested
+ * @preamble: frame preamble to use
+ * @burst_period: burst period to use
+ * @asap: indicates to use ASAP mode
+ * @num_bursts_exp: number of bursts exponent
+ * @burst_duration: burst duration
+ * @ftms_per_burst: number of FTMs per burst
+ * @ftmr_retries: number of retries for FTM request
+ * @request_lci: request LCI information
+ * @request_civicloc: request civic location information
+ * @trigger_based: use trigger based ranging for the measurement
+ * If neither @trigger_based nor @non_trigger_based is set,
+ * EDCA based ranging will be used.
+ * @non_trigger_based: use non trigger based ranging for the measurement
+ * If neither @trigger_based nor @non_trigger_based is set,
+ * EDCA based ranging will be used.
+ * @lmr_feedback: negotiate for I2R LMR feedback. Only valid if either
+ * @trigger_based or @non_trigger_based is set.
+ * @bss_color: the bss color of the responder. Optional. Set to zero to
+ * indicate the driver should set the BSS color. Only valid if
+ * @non_trigger_based or @trigger_based is set.
+ *
+ * See also nl80211 for the respective attribute documentation.
+ */
+struct cfg80211_pmsr_ftm_request_peer {
+ enum nl80211_preamble preamble;
+ u16 burst_period;
+ u8 requested:1,
+ asap:1,
+ request_lci:1,
+ request_civicloc:1,
+ trigger_based:1,
+ non_trigger_based:1,
+ lmr_feedback:1;
+ u8 num_bursts_exp;
+ u8 burst_duration;
+ u8 ftms_per_burst;
+ u8 ftmr_retries;
+ u8 bss_color;
+};
+
+/**
+ * struct cfg80211_pmsr_request_peer - peer data for a peer measurement request
+ * @addr: MAC address
+ * @chandef: channel to use
+ * @report_ap_tsf: report the associated AP's TSF
+ * @ftm: FTM data, see &struct cfg80211_pmsr_ftm_request_peer
+ */
+struct cfg80211_pmsr_request_peer {
+ u8 addr[ETH_ALEN];
+ struct cfg80211_chan_def chandef;
+ u8 report_ap_tsf:1;
+ struct cfg80211_pmsr_ftm_request_peer ftm;
+};
+
+/**
+ * struct cfg80211_pmsr_request - peer measurement request
+ * @cookie: cookie, set by cfg80211
+ * @nl_portid: netlink portid - used by cfg80211
+ * @drv_data: driver data for this request, if required for aborting,
+ * not otherwise freed or anything by cfg80211
+ * @mac_addr: MAC address used for (randomised) request
+ * @mac_addr_mask: MAC address mask used for randomisation, bits that
+ * are 0 in the mask should be randomised, bits that are 1 should
+ * be taken from the @mac_addr
+ * @list: used by cfg80211 to hold on to the request
+ * @timeout: timeout (in milliseconds) for the whole operation, if
+ * zero it means there's no timeout
+ * @n_peers: number of peers to do measurements with
+ * @peers: per-peer measurement request data
+ */
+struct cfg80211_pmsr_request {
+ u64 cookie;
+ void *drv_data;
+ u32 n_peers;
+ u32 nl_portid;
+
+ u32 timeout;
+
+ u8 mac_addr[ETH_ALEN] __aligned(2);
+ u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+
+ struct list_head list;
+
+ struct cfg80211_pmsr_request_peer peers[];
+};
+
+/**
+ * struct cfg80211_update_owe_info - OWE Information
+ *
+ * This structure provides information needed for the drivers to offload OWE
+ * (Opportunistic Wireless Encryption) processing to the user space.
+ *
+ * Commonly used across update_owe_info request and event interfaces.
+ *
+ * @peer: MAC address of the peer device for which the OWE processing
+ * has to be done.
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful OWE info
+ * processing, use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space
+ * cannot give you the real status code for failures. Used only for
+ * OWE update request command interface (user space to driver).
+ * @ie: IEs obtained from the peer or constructed by the user space. These are
+ * the IEs of the remote peer in the event from the host driver and
+ * the constructed IEs by the user space in the request interface.
+ * @ie_len: Length of IEs in octets.
+ */
+struct cfg80211_update_owe_info {
+ u8 peer[ETH_ALEN] __aligned(2);
+ u16 status;
+ const u8 *ie;
+ size_t ie_len;
+};
+
+/**
+ * struct mgmt_frame_regs - management frame registrations data
+ * @global_stypes: bitmap of management frame subtypes registered
+ * for the entire device
+ * @interface_stypes: bitmap of management frame subtypes registered
+ * for the given interface
+ * @global_mcast_stypes: mcast RX is needed globally for these subtypes
+ * @interface_mcast_stypes: mcast RX is needed on this interface
+ * for these subtypes
+ */
+struct mgmt_frame_regs {
+ u32 global_stypes, interface_stypes;
+ u32 global_mcast_stypes, interface_mcast_stypes;
+};
+
+/**
+ * struct cfg80211_ops - backend description for wireless configuration
+ *
+ * This struct is registered by fullmac card drivers and/or wireless stacks
+ * in order to handle configuration requests on their interfaces.
+ *
+ * All callbacks except where otherwise noted should return 0
+ * on success or a negative error code.
+ *
+ * All operations are invoked with the wiphy mutex held. The RTNL may be
+ * held in addition (due to wireless extensions) but this cannot be relied
+ * upon except in cases where documented below. Note that due to ordering,
+ * the RTNL also cannot be acquired in any handlers.
+ *
+ * @suspend: wiphy device needs to be suspended. The variable @wow will
+ * be %NULL or contain the enabled Wake-on-Wireless triggers that are
+ * configured for the device.
+ * @resume: wiphy device needs to be resumed
+ * @set_wakeup: Called when WoWLAN is enabled/disabled, use this callback
+ * to call device_set_wakeup_enable() to enable/disable wakeup from
+ * the device.
+ *
+ * @add_virtual_intf: create a new virtual interface with the given name,
+ * must set the struct wireless_dev's iftype. Beware: You must create
+ * the new netdev in the wiphy's network namespace! Returns the struct
+ * wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must
+ * also set the address member in the wdev.
+ * This additionally holds the RTNL to be able to do netdev changes.
+ *
+ * @del_virtual_intf: remove the virtual interface
+ * This additionally holds the RTNL to be able to do netdev changes.
+ *
+ * @change_virtual_intf: change type/configuration of virtual interface,
+ * keep the struct wireless_dev's iftype updated.
+ * This additionally holds the RTNL to be able to do netdev changes.
+ *
+ * @add_intf_link: Add a new MLO link to the given interface. Note that
+ * the wdev->link[] data structure has been updated, so the new link
+ * address is available.
+ * @del_intf_link: Remove an MLO link from the given interface.
+ *
+ * @add_key: add a key with the given parameters. @mac_addr will be %NULL
+ * when adding a group key. @link_id will be -1 for non-MLO connection.
+ * For MLO connection, @link_id will be >= 0 for group key and -1 for
+ * pairwise key, @mac_addr will be peer's MLD address for MLO pairwise key.
+ *
+ * @get_key: get information about the key with the given parameters.
+ * @mac_addr will be %NULL when requesting information for a group
+ * key. All pointers given to the @callback function need not be valid
+ * after it returns. This function should return an error if it is
+ * not possible to retrieve the key, -ENOENT if it doesn't exist.
+ * @link_id will be -1 for non-MLO connection. For MLO connection,
+ * @link_id will be >= 0 for group key and -1 for pairwise key, @mac_addr
+ * will be peer's MLD address for MLO pairwise key.
+ *
+ * @del_key: remove a key given the @mac_addr (%NULL for a group key)
+ * and @key_index, return -ENOENT if the key doesn't exist. @link_id will
+ * be -1 for non-MLO connection. For MLO connection, @link_id will be >= 0
+ * for group key and -1 for pairwise key, @mac_addr will be peer's MLD
+ * address for MLO pairwise key.
+ *
+ * @set_default_key: set the default key on an interface. @link_id will be >= 0
+ * for MLO connection and -1 for non-MLO connection.
+ *
+ * @set_default_mgmt_key: set the default management frame key on an interface.
+ * @link_id will be >= 0 for MLO connection and -1 for non-MLO connection.
+ *
+ * @set_default_beacon_key: set the default Beacon frame key on an interface.
+ * @link_id will be >= 0 for MLO connection and -1 for non-MLO connection.
+ *
+ * @set_rekey_data: give the data necessary for GTK rekeying to the driver
+ *
+ * @start_ap: Start acting in AP mode defined by the parameters.
+ * @change_beacon: Change the beacon parameters for an access point mode
+ * interface. This should reject the call when AP mode wasn't started.
+ * @stop_ap: Stop being an AP, including stopping beaconing.
+ *
+ * @add_station: Add a new station.
+ * @del_station: Remove a station
+ * @change_station: Modify a given station. Note that flags changes are not much
+ * validated in cfg80211, in particular the auth/assoc/authorized flags
+ * might come to the driver in invalid combinations -- make sure to check
+ * them, also against the existing state! Drivers must call
+ * cfg80211_check_station_change() to validate the information.
+ * @get_station: get station information for the station identified by @mac
+ * @dump_station: dump station callback -- resume dump at index @idx
+ *
+ * @add_mpath: add a fixed mesh path
+ * @del_mpath: delete a given mesh path
+ * @change_mpath: change a given mesh path
+ * @get_mpath: get a mesh path for the given parameters
+ * @dump_mpath: dump mesh path callback -- resume dump at index @idx
+ * @get_mpp: get a mesh proxy path for the given parameters
+ * @dump_mpp: dump mesh proxy path callback -- resume dump at index @idx
+ * @join_mesh: join the mesh network with the specified parameters
+ * (invoked with the wireless_dev mutex held)
+ * @leave_mesh: leave the current mesh network
+ * (invoked with the wireless_dev mutex held)
+ *
+ * @get_mesh_config: Get the current mesh configuration
+ *
+ * @update_mesh_config: Update mesh parameters on a running mesh.
+ * The mask is a bitfield which tells us which parameters to
+ * set, and which to leave alone.
+ *
+ * @change_bss: Modify parameters for a given BSS.
+ *
+ * @set_txq_params: Set TX queue parameters
+ *
+ * @libertas_set_mesh_channel: Only for backward compatibility for libertas,
+ * as it doesn't implement join_mesh and needs to set the channel to
+ * join the mesh instead.
+ *
+ * @set_monitor_channel: Set the monitor mode channel for the device. If other
+ * interfaces are active this callback should reject the configuration.
+ * If no interfaces are active or the device is down, the channel should
+ * be stored for when a monitor interface becomes active.
+ *
+ * @scan: Request to do a scan. If returning zero, the scan request is given
+ * the driver, and will be valid until passed to cfg80211_scan_done().
+ * For scan results, call cfg80211_inform_bss(); you can call this outside
+ * the scan/scan_done bracket too.
+ * @abort_scan: Tell the driver to abort an ongoing scan. The driver shall
+ * indicate the status of the scan through cfg80211_scan_done().
+ *
+ * @auth: Request to authenticate with the specified peer
+ * (invoked with the wireless_dev mutex held)
+ * @assoc: Request to (re)associate with the specified peer
+ * (invoked with the wireless_dev mutex held)
+ * @deauth: Request to deauthenticate from the specified peer
+ * (invoked with the wireless_dev mutex held)
+ * @disassoc: Request to disassociate from the specified peer
+ * (invoked with the wireless_dev mutex held)
+ *
+ * @connect: Connect to the ESS with the specified parameters. When connected,
+ * call cfg80211_connect_result()/cfg80211_connect_bss() with status code
+ * %WLAN_STATUS_SUCCESS. If the connection fails for some reason, call
+ * cfg80211_connect_result()/cfg80211_connect_bss() with the status code
+ * from the AP or cfg80211_connect_timeout() if no frame with status code
+ * was received.
+ * The driver is allowed to roam to other BSSes within the ESS when the
+ * other BSS matches the connect parameters. When such roaming is initiated
+ * by the driver, the driver is expected to verify that the target matches
+ * the configured security parameters and to use Reassociation Request
+ * frame instead of Association Request frame.
+ * The connect function can also be used to request the driver to perform a
+ * specific roam when connected to an ESS. In that case, the prev_bssid
+ * parameter is set to the BSSID of the currently associated BSS as an
+ * indication of requesting reassociation.
+ * In both the driver-initiated and new connect() call initiated roaming
+ * cases, the result of roaming is indicated with a call to
+ * cfg80211_roamed(). (invoked with the wireless_dev mutex held)
+ * @update_connect_params: Update the connect parameters while connected to a
+ * BSS. The updated parameters can be used by driver/firmware for
+ * subsequent BSS selection (roaming) decisions and to form the
+ * Authentication/(Re)Association Request frames. This call does not
+ * request an immediate disassociation or reassociation with the current
+ * BSS, i.e., this impacts only subsequent (re)associations. The bits in
+ * changed are defined in &enum cfg80211_connect_params_changed.
+ * (invoked with the wireless_dev mutex held)
+ * @disconnect: Disconnect from the BSS/ESS or stop connection attempts if
+ * connection is in progress. Once done, call cfg80211_disconnected() in
+ * case connection was already established (invoked with the
+ * wireless_dev mutex held), otherwise call cfg80211_connect_timeout().
+ *
+ * @join_ibss: Join the specified IBSS (or create if necessary). Once done, call
+ * cfg80211_ibss_joined(), also call that function when changing BSSID due
+ * to a merge.
+ * (invoked with the wireless_dev mutex held)
+ * @leave_ibss: Leave the IBSS.
+ * (invoked with the wireless_dev mutex held)
+ *
+ * @set_mcast_rate: Set the specified multicast rate (only if vif is in ADHOC or
+ * MESH mode)
+ *
+ * @set_wiphy_params: Notify that wiphy parameters have changed;
+ * @changed bitfield (see &enum wiphy_params_flags) describes which values
+ * have changed. The actual parameter values are available in
+ * struct wiphy. If returning an error, no value should be changed.
+ *
+ * @set_tx_power: set the transmit power according to the parameters,
+ * the power passed is in mBm, to get dBm use MBM_TO_DBM(). The
+ * wdev may be %NULL if power was set for the wiphy, and will
+ * always be %NULL unless the driver supports per-vif TX power
+ * (as advertised by the nl80211 feature flag.)
+ * @get_tx_power: store the current TX power into the dbm variable;
+ * return 0 if successful
+ *
+ * @rfkill_poll: polls the hw rfkill line, use cfg80211 reporting
+ * functions to adjust rfkill hw state
+ *
+ * @dump_survey: get site survey information.
+ *
+ * @remain_on_channel: Request the driver to remain awake on the specified
+ * channel for the specified duration to complete an off-channel
+ * operation (e.g., public action frame exchange). When the driver is
+ * ready on the requested channel, it must indicate this with an event
+ * notification by calling cfg80211_ready_on_channel().
+ * @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation.
+ * This allows the operation to be terminated prior to timeout based on
+ * the duration value.
+ * @mgmt_tx: Transmit a management frame.
+ * @mgmt_tx_cancel_wait: Cancel the wait time from transmitting a management
+ * frame on another channel
+ *
+ * @testmode_cmd: run a test mode command; @wdev may be %NULL
+ * @testmode_dump: Implement a test mode dump. The cb->args[2] and up may be
+ * used by the function, but 0 and 1 must not be touched. Additionally,
+ * return error codes other than -ENOBUFS and -ENOENT will terminate the
+ * dump and return to userspace with an error, so be careful. If any data
+ * was passed in from userspace then the data/len arguments will be present
+ * and point to the data contained in %NL80211_ATTR_TESTDATA.
+ *
+ * @set_bitrate_mask: set the bitrate mask configuration
+ *
+ * @set_pmksa: Cache a PMKID for a BSSID. This is mostly useful for fullmac
+ * devices running firmwares capable of generating the (re) association
+ * RSN IE. It allows for faster roaming between WPA2 BSSIDs.
+ * @del_pmksa: Delete a cached PMKID.
+ * @flush_pmksa: Flush all cached PMKIDs.
+ * @set_power_mgmt: Configure WLAN power management. A timeout value of -1
+ * allows the driver to adjust the dynamic ps timeout value.
+ * @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold.
+ * After configuration, the driver should (soon) send an event indicating
+ * the current level is above/below the configured threshold; this may
+ * need some care when the configuration is changed (without first being
+ * disabled.)
+ * @set_cqm_rssi_range_config: Configure two RSSI thresholds in the
+ * connection quality monitor. An event is to be sent only when the
+ * signal level is found to be outside the two values. The driver should
+ * set %NL80211_EXT_FEATURE_CQM_RSSI_LIST if this method is implemented.
+ * If it is provided then there's no point providing @set_cqm_rssi_config.
+ * @set_cqm_txe_config: Configure connection quality monitor TX error
+ * thresholds.
+ * @sched_scan_start: Tell the driver to start a scheduled scan.
+ * @sched_scan_stop: Tell the driver to stop an ongoing scheduled scan with
+ * given request id. This call must stop the scheduled scan and be ready
+ * for starting a new one before it returns, i.e. @sched_scan_start may be
+ * called immediately after that again and should not fail in that case.
+ * The driver should not call cfg80211_sched_scan_stopped() for a requested
+ * stop (when this method returns 0).
+ *
+ * @update_mgmt_frame_registrations: Notify the driver that management frame
+ * registrations were updated. The callback is allowed to sleep.
+ *
+ * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
+ * Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
+ * reject TX/RX mask combinations they cannot support by returning -EINVAL
+ * (also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX).
+ *
+ * @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant).
+ *
+ * @tdls_mgmt: Transmit a TDLS management frame.
+ * @tdls_oper: Perform a high-level TDLS operation (e.g. TDLS link setup).
+ *
+ * @probe_client: probe an associated client, must return a cookie that it
+ * later passes to cfg80211_probe_status().
+ *
+ * @set_noack_map: Set the NoAck Map for the TIDs.
+ *
+ * @get_channel: Get the current operating channel for the virtual interface.
+ * For monitor interfaces, it should return %NULL unless there's a single
+ * current monitoring channel.
+ *
+ * @start_p2p_device: Start the given P2P device.
+ * @stop_p2p_device: Stop the given P2P device.
+ *
+ * @set_mac_acl: Sets MAC address control list in AP and P2P GO mode.
+ * Parameters include ACL policy, an array of MAC address of stations
+ * and the number of MAC addresses. If there is already a list in driver
+ * this new list replaces the existing one. Driver has to clear its ACL
+ * when number of MAC addresses entries is passed as 0. Drivers which
+ * advertise the support for MAC based ACL have to implement this callback.
+ *
+ * @start_radar_detection: Start radar detection in the driver.
+ *
+ * @end_cac: End running CAC, probably because a related CAC
+ * was finished on another phy.
+ *
+ * @update_ft_ies: Provide updated Fast BSS Transition information to the
+ * driver. If the SME is in the driver/firmware, this information can be
+ * used in building Authentication and Reassociation Request frames.
+ *
+ * @crit_proto_start: Indicates a critical protocol needs more link reliability
+ * for a given duration (milliseconds). The protocol is provided so the
+ * driver can take the most appropriate actions.
+ * @crit_proto_stop: Indicates critical protocol no longer needs increased link
+ * reliability. This operation can not fail.
+ * @set_coalesce: Set coalesce parameters.
+ *
+ * @channel_switch: initiate channel-switch procedure (with CSA). Driver is
+ * responsible for veryfing if the switch is possible. Since this is
+ * inherently tricky driver may decide to disconnect an interface later
+ * with cfg80211_stop_iface(). This doesn't mean driver can accept
+ * everything. It should do it's best to verify requests and reject them
+ * as soon as possible.
+ *
+ * @set_qos_map: Set QoS mapping information to the driver
+ *
+ * @set_ap_chanwidth: Set the AP (including P2P GO) mode channel width for the
+ * given interface This is used e.g. for dynamic HT 20/40 MHz channel width
+ * changes during the lifetime of the BSS.
+ *
+ * @add_tx_ts: validate (if admitted_time is 0) or add a TX TS to the device
+ * with the given parameters; action frame exchange has been handled by
+ * userspace so this just has to modify the TX path to take the TS into
+ * account.
+ * If the admitted time is 0 just validate the parameters to make sure
+ * the session can be created at all; it is valid to just always return
+ * success for that but that may result in inefficient behaviour (handshake
+ * with the peer followed by immediate teardown when the addition is later
+ * rejected)
+ * @del_tx_ts: remove an existing TX TS
+ *
+ * @join_ocb: join the OCB network with the specified parameters
+ * (invoked with the wireless_dev mutex held)
+ * @leave_ocb: leave the current OCB network
+ * (invoked with the wireless_dev mutex held)
+ *
+ * @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver
+ * is responsible for continually initiating channel-switching operations
+ * and returning to the base channel for communication with the AP.
+ * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both
+ * peers must be on the base channel when the call completes.
+ * @start_nan: Start the NAN interface.
+ * @stop_nan: Stop the NAN interface.
+ * @add_nan_func: Add a NAN function. Returns negative value on failure.
+ * On success @nan_func ownership is transferred to the driver and
+ * it may access it outside of the scope of this function. The driver
+ * should free the @nan_func when no longer needed by calling
+ * cfg80211_free_nan_func().
+ * On success the driver should assign an instance_id in the
+ * provided @nan_func.
+ * @del_nan_func: Delete a NAN function.
+ * @nan_change_conf: changes NAN configuration. The changed parameters must
+ * be specified in @changes (using &enum cfg80211_nan_conf_changes);
+ * All other parameters must be ignored.
+ *
+ * @set_multicast_to_unicast: configure multicast to unicast conversion for BSS
+ *
+ * @get_txq_stats: Get TXQ stats for interface or phy. If wdev is %NULL, this
+ * function should return phy stats, and interface stats otherwise.
+ *
+ * @set_pmk: configure the PMK to be used for offloaded 802.1X 4-Way handshake.
+ * If not deleted through @del_pmk the PMK remains valid until disconnect
+ * upon which the driver should clear it.
+ * (invoked with the wireless_dev mutex held)
+ * @del_pmk: delete the previously configured PMK for the given authenticator.
+ * (invoked with the wireless_dev mutex held)
+ *
+ * @external_auth: indicates result of offloaded authentication processing from
+ * user space
+ *
+ * @tx_control_port: TX a control port frame (EAPoL). The noencrypt parameter
+ * tells the driver that the frame should not be encrypted.
+ *
+ * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available.
+ * Statistics should be cumulative, currently no way to reset is provided.
+ * @start_pmsr: start peer measurement (e.g. FTM)
+ * @abort_pmsr: abort peer measurement
+ *
+ * @update_owe_info: Provide updated OWE info to driver. Driver implementing SME
+ * but offloading OWE processing to the user space will get the updated
+ * DH IE through this interface.
+ *
+ * @probe_mesh_link: Probe direct Mesh peer's link quality by sending data frame
+ * and overrule HWMP path selection algorithm.
+ * @set_tid_config: TID specific configuration, this can be peer or BSS specific
+ * This callback may sleep.
+ * @reset_tid_config: Reset TID specific configuration for the peer, for the
+ * given TIDs. This callback may sleep.
+ *
+ * @set_sar_specs: Update the SAR (TX power) settings.
+ *
+ * @color_change: Initiate a color change.
+ *
+ * @set_fils_aad: Set FILS AAD data to the AP driver so that the driver can use
+ * those to decrypt (Re)Association Request and encrypt (Re)Association
+ * Response frame.
+ *
+ * @set_radar_background: Configure dedicated offchannel chain available for
+ * radar/CAC detection on some hw. This chain can't be used to transmit
+ * or receive frames and it is bounded to a running wdev.
+ * Background radar/CAC detection allows to avoid the CAC downtime
+ * switching to a different channel during CAC detection on the selected
+ * radar channel.
+ * The caller is expected to set chandef pointer to NULL in order to
+ * disable background CAC/radar detection.
+ * @add_link_station: Add a link to a station.
+ * @mod_link_station: Modify a link of a station.
+ * @del_link_station: Remove a link of a station.
+ */
+struct cfg80211_ops {
+ int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
+ int (*resume)(struct wiphy *wiphy);
+ void (*set_wakeup)(struct wiphy *wiphy, bool enabled);
+
+ struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy,
+ const char *name,
+ unsigned char name_assign_type,
+ enum nl80211_iftype type,
+ struct vif_params *params);
+ int (*del_virtual_intf)(struct wiphy *wiphy,
+ struct wireless_dev *wdev);
+ int (*change_virtual_intf)(struct wiphy *wiphy,
+ struct net_device *dev,
+ enum nl80211_iftype type,
+ struct vif_params *params);
+
+ int (*add_intf_link)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ unsigned int link_id);
+ void (*del_intf_link)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ unsigned int link_id);
+
+ int (*add_key)(struct wiphy *wiphy, struct net_device *netdev,
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params);
+ int (*get_key)(struct wiphy *wiphy, struct net_device *netdev,
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, void *cookie,
+ void (*callback)(void *cookie, struct key_params*));
+ int (*del_key)(struct wiphy *wiphy, struct net_device *netdev,
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr);
+ int (*set_default_key)(struct wiphy *wiphy,
+ struct net_device *netdev, int link_id,
+ u8 key_index, bool unicast, bool multicast);
+ int (*set_default_mgmt_key)(struct wiphy *wiphy,
+ struct net_device *netdev, int link_id,
+ u8 key_index);
+ int (*set_default_beacon_key)(struct wiphy *wiphy,
+ struct net_device *netdev,
+ int link_id,
+ u8 key_index);
+
+ int (*start_ap)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ap_settings *settings);
+ int (*change_beacon)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_beacon_data *info);
+ int (*stop_ap)(struct wiphy *wiphy, struct net_device *dev,
+ unsigned int link_id);
+
+
+ int (*add_station)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac,
+ struct station_parameters *params);
+ int (*del_station)(struct wiphy *wiphy, struct net_device *dev,
+ struct station_del_parameters *params);
+ int (*change_station)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac,
+ struct station_parameters *params);
+ int (*get_station)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac, struct station_info *sinfo);
+ int (*dump_station)(struct wiphy *wiphy, struct net_device *dev,
+ int idx, u8 *mac, struct station_info *sinfo);
+
+ int (*add_mpath)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *dst, const u8 *next_hop);
+ int (*del_mpath)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *dst);
+ int (*change_mpath)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *dst, const u8 *next_hop);
+ int (*get_mpath)(struct wiphy *wiphy, struct net_device *dev,
+ u8 *dst, u8 *next_hop, struct mpath_info *pinfo);
+ int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev,
+ int idx, u8 *dst, u8 *next_hop,
+ struct mpath_info *pinfo);
+ int (*get_mpp)(struct wiphy *wiphy, struct net_device *dev,
+ u8 *dst, u8 *mpp, struct mpath_info *pinfo);
+ int (*dump_mpp)(struct wiphy *wiphy, struct net_device *dev,
+ int idx, u8 *dst, u8 *mpp,
+ struct mpath_info *pinfo);
+ int (*get_mesh_config)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct mesh_config *conf);
+ int (*update_mesh_config)(struct wiphy *wiphy,
+ struct net_device *dev, u32 mask,
+ const struct mesh_config *nconf);
+ int (*join_mesh)(struct wiphy *wiphy, struct net_device *dev,
+ const struct mesh_config *conf,
+ const struct mesh_setup *setup);
+ int (*leave_mesh)(struct wiphy *wiphy, struct net_device *dev);
+
+ int (*join_ocb)(struct wiphy *wiphy, struct net_device *dev,
+ struct ocb_setup *setup);
+ int (*leave_ocb)(struct wiphy *wiphy, struct net_device *dev);
+
+ int (*change_bss)(struct wiphy *wiphy, struct net_device *dev,
+ struct bss_parameters *params);
+
+ int (*set_txq_params)(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_txq_params *params);
+
+ int (*libertas_set_mesh_channel)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct ieee80211_channel *chan);
+
+ int (*set_monitor_channel)(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef);
+
+ int (*scan)(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request);
+ void (*abort_scan)(struct wiphy *wiphy, struct wireless_dev *wdev);
+
+ int (*auth)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_auth_request *req);
+ int (*assoc)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_assoc_request *req);
+ int (*deauth)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_deauth_request *req);
+ int (*disassoc)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_disassoc_request *req);
+
+ int (*connect)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+ int (*update_connect_params)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_connect_params *sme,
+ u32 changed);
+ int (*disconnect)(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code);
+
+ int (*join_ibss)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *params);
+ int (*leave_ibss)(struct wiphy *wiphy, struct net_device *dev);
+
+ int (*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev,
+ int rate[NUM_NL80211_BANDS]);
+
+ int (*set_wiphy_params)(struct wiphy *wiphy, u32 changed);
+
+ int (*set_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ enum nl80211_tx_power_setting type, int mbm);
+ int (*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int *dbm);
+
+ void (*rfkill_poll)(struct wiphy *wiphy);
+
+#ifdef CONFIG_NL80211_TESTMODE
+ int (*testmode_cmd)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ void *data, int len);
+ int (*testmode_dump)(struct wiphy *wiphy, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ void *data, int len);
+#endif
+
+ int (*set_bitrate_mask)(struct wiphy *wiphy,
+ struct net_device *dev,
+ unsigned int link_id,
+ const u8 *peer,
+ const struct cfg80211_bitrate_mask *mask);
+
+ int (*dump_survey)(struct wiphy *wiphy, struct net_device *netdev,
+ int idx, struct survey_info *info);
+
+ int (*set_pmksa)(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa);
+ int (*del_pmksa)(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa);
+ int (*flush_pmksa)(struct wiphy *wiphy, struct net_device *netdev);
+
+ int (*remain_on_channel)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct ieee80211_channel *chan,
+ unsigned int duration,
+ u64 *cookie);
+ int (*cancel_remain_on_channel)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie);
+
+ int (*mgmt_tx)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params,
+ u64 *cookie);
+ int (*mgmt_tx_cancel_wait)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie);
+
+ int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev,
+ bool enabled, int timeout);
+
+ int (*set_cqm_rssi_config)(struct wiphy *wiphy,
+ struct net_device *dev,
+ s32 rssi_thold, u32 rssi_hyst);
+
+ int (*set_cqm_rssi_range_config)(struct wiphy *wiphy,
+ struct net_device *dev,
+ s32 rssi_low, s32 rssi_high);
+
+ int (*set_cqm_txe_config)(struct wiphy *wiphy,
+ struct net_device *dev,
+ u32 rate, u32 pkts, u32 intvl);
+
+ void (*update_mgmt_frame_registrations)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct mgmt_frame_regs *upd);
+
+ int (*set_antenna)(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant);
+ int (*get_antenna)(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant);
+
+ int (*sched_scan_start)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request);
+ int (*sched_scan_stop)(struct wiphy *wiphy, struct net_device *dev,
+ u64 reqid);
+
+ int (*set_rekey_data)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_gtk_rekey_data *data);
+
+ int (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, u32 peer_capability,
+ bool initiator, const u8 *buf, size_t len);
+ int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, enum nl80211_tdls_operation oper);
+
+ int (*probe_client)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u64 *cookie);
+
+ int (*set_noack_map)(struct wiphy *wiphy,
+ struct net_device *dev,
+ u16 noack_map);
+
+ int (*get_channel)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ unsigned int link_id,
+ struct cfg80211_chan_def *chandef);
+
+ int (*start_p2p_device)(struct wiphy *wiphy,
+ struct wireless_dev *wdev);
+ void (*stop_p2p_device)(struct wiphy *wiphy,
+ struct wireless_dev *wdev);
+
+ int (*set_mac_acl)(struct wiphy *wiphy, struct net_device *dev,
+ const struct cfg80211_acl_data *params);
+
+ int (*start_radar_detection)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_chan_def *chandef,
+ u32 cac_time_ms);
+ void (*end_cac)(struct wiphy *wiphy,
+ struct net_device *dev);
+ int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_update_ft_ies_params *ftie);
+ int (*crit_proto_start)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ enum nl80211_crit_proto_id protocol,
+ u16 duration);
+ void (*crit_proto_stop)(struct wiphy *wiphy,
+ struct wireless_dev *wdev);
+ int (*set_coalesce)(struct wiphy *wiphy,
+ struct cfg80211_coalesce *coalesce);
+
+ int (*channel_switch)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_csa_settings *params);
+
+ int (*set_qos_map)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_qos_map *qos_map);
+
+ int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev,
+ unsigned int link_id,
+ struct cfg80211_chan_def *chandef);
+
+ int (*add_tx_ts)(struct wiphy *wiphy, struct net_device *dev,
+ u8 tsid, const u8 *peer, u8 user_prio,
+ u16 admitted_time);
+ int (*del_tx_ts)(struct wiphy *wiphy, struct net_device *dev,
+ u8 tsid, const u8 *peer);
+
+ int (*tdls_channel_switch)(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *addr, u8 oper_class,
+ struct cfg80211_chan_def *chandef);
+ void (*tdls_cancel_channel_switch)(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *addr);
+ int (*start_nan)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_nan_conf *conf);
+ void (*stop_nan)(struct wiphy *wiphy, struct wireless_dev *wdev);
+ int (*add_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_nan_func *nan_func);
+ void (*del_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ u64 cookie);
+ int (*nan_change_conf)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_nan_conf *conf,
+ u32 changes);
+
+ int (*set_multicast_to_unicast)(struct wiphy *wiphy,
+ struct net_device *dev,
+ const bool enabled);
+
+ int (*get_txq_stats)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_txq_stats *txqstats);
+
+ int (*set_pmk)(struct wiphy *wiphy, struct net_device *dev,
+ const struct cfg80211_pmk_conf *conf);
+ int (*del_pmk)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *aa);
+ int (*external_auth)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_external_auth_params *params);
+
+ int (*tx_control_port)(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *buf, size_t len,
+ const u8 *dest, const __be16 proto,
+ const bool noencrypt, int link_id,
+ u64 *cookie);
+
+ int (*get_ftm_responder_stats)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_ftm_responder_stats *ftm_stats);
+
+ int (*start_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *request);
+ void (*abort_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *request);
+ int (*update_owe_info)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_update_owe_info *owe_info);
+ int (*probe_mesh_link)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *buf, size_t len);
+ int (*set_tid_config)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_tid_config *tid_conf);
+ int (*reset_tid_config)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 tids);
+ int (*set_sar_specs)(struct wiphy *wiphy,
+ struct cfg80211_sar_specs *sar);
+ int (*color_change)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_color_change_settings *params);
+ int (*set_fils_aad)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_fils_aad *fils_aad);
+ int (*set_radar_background)(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef);
+ int (*add_link_station)(struct wiphy *wiphy, struct net_device *dev,
+ struct link_station_parameters *params);
+ int (*mod_link_station)(struct wiphy *wiphy, struct net_device *dev,
+ struct link_station_parameters *params);
+ int (*del_link_station)(struct wiphy *wiphy, struct net_device *dev,
+ struct link_station_del_parameters *params);
+};
+
+/*
+ * wireless hardware and networking interfaces structures
+ * and registration/helper functions
+ */
+
+/**
+ * enum wiphy_flags - wiphy capability flags
+ *
+ * @WIPHY_FLAG_SPLIT_SCAN_6GHZ: if set to true, the scan request will be split
+ * into two, first for legacy bands and second for UHB.
+ * @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this
+ * wiphy at all
+ * @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled
+ * by default -- this flag will be set depending on the kernel's default
+ * on wiphy_new(), but can be changed by the driver if it has a good
+ * reason to override the default
+ * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station
+ * on a VLAN interface). This flag also serves an extra purpose of
+ * supporting 4ADDR AP mode on devices which do not support AP/VLAN iftype.
+ * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station
+ * @WIPHY_FLAG_CONTROL_PORT_PROTOCOL: This device supports setting the
+ * control port protocol ethertype. The device also honours the
+ * control_port_no_encrypt flag.
+ * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN.
+ * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing
+ * auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH.
+ * @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the
+ * firmware.
+ * @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP.
+ * @WIPHY_FLAG_SUPPORTS_TDLS: The device supports TDLS (802.11z) operation.
+ * @WIPHY_FLAG_TDLS_EXTERNAL_SETUP: The device does not handle TDLS (802.11z)
+ * link setup/discovery operations internally. Setup, discovery and
+ * teardown packets should be sent through the @NL80211_CMD_TDLS_MGMT
+ * command. When this flag is not set, @NL80211_CMD_TDLS_OPER should be
+ * used for asking the driver/firmware to perform a TDLS operation.
+ * @WIPHY_FLAG_HAVE_AP_SME: device integrates AP SME
+ * @WIPHY_FLAG_REPORTS_OBSS: the device will report beacons from other BSSes
+ * when there are virtual interfaces in AP mode by calling
+ * cfg80211_report_obss_beacon().
+ * @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD: When operating as an AP, the device
+ * responds to probe-requests in hardware.
+ * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX.
+ * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call.
+ * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
+ * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
+ * beaconing mode (AP, IBSS, Mesh, ...).
+ * @WIPHY_FLAG_HAS_STATIC_WEP: The device supports static WEP key installation
+ * before connection.
+ * @WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK: The device supports bigger kek and kck keys
+ * @WIPHY_FLAG_SUPPORTS_MLO: This is a temporary flag gating the MLO APIs,
+ * in order to not have them reachable in normal drivers, until we have
+ * complete feature/interface combinations/etc. advertisement. No driver
+ * should set this flag for now.
+ */
+enum wiphy_flags {
+ WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0),
+ WIPHY_FLAG_SUPPORTS_MLO = BIT(1),
+ WIPHY_FLAG_SPLIT_SCAN_6GHZ = BIT(2),
+ WIPHY_FLAG_NETNS_OK = BIT(3),
+ WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4),
+ WIPHY_FLAG_4ADDR_AP = BIT(5),
+ WIPHY_FLAG_4ADDR_STATION = BIT(6),
+ WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7),
+ WIPHY_FLAG_IBSS_RSN = BIT(8),
+ WIPHY_FLAG_MESH_AUTH = BIT(10),
+ /* use hole at 11 */
+ /* use hole at 12 */
+ WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13),
+ WIPHY_FLAG_AP_UAPSD = BIT(14),
+ WIPHY_FLAG_SUPPORTS_TDLS = BIT(15),
+ WIPHY_FLAG_TDLS_EXTERNAL_SETUP = BIT(16),
+ WIPHY_FLAG_HAVE_AP_SME = BIT(17),
+ WIPHY_FLAG_REPORTS_OBSS = BIT(18),
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19),
+ WIPHY_FLAG_OFFCHAN_TX = BIT(20),
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
+ WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22),
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23),
+ WIPHY_FLAG_HAS_STATIC_WEP = BIT(24),
+};
+
+/**
+ * struct ieee80211_iface_limit - limit on certain interface types
+ * @max: maximum number of interfaces of these types
+ * @types: interface types (bits)
+ */
+struct ieee80211_iface_limit {
+ u16 max;
+ u16 types;
+};
+
+/**
+ * struct ieee80211_iface_combination - possible interface combination
+ *
+ * With this structure the driver can describe which interface
+ * combinations it supports concurrently.
+ *
+ * Examples:
+ *
+ * 1. Allow #STA <= 1, #AP <= 1, matching BI, channels = 1, 2 total:
+ *
+ * .. code-block:: c
+ *
+ * struct ieee80211_iface_limit limits1[] = {
+ * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
+ * { .max = 1, .types = BIT(NL80211_IFTYPE_AP}, },
+ * };
+ * struct ieee80211_iface_combination combination1 = {
+ * .limits = limits1,
+ * .n_limits = ARRAY_SIZE(limits1),
+ * .max_interfaces = 2,
+ * .beacon_int_infra_match = true,
+ * };
+ *
+ *
+ * 2. Allow #{AP, P2P-GO} <= 8, channels = 1, 8 total:
+ *
+ * .. code-block:: c
+ *
+ * struct ieee80211_iface_limit limits2[] = {
+ * { .max = 8, .types = BIT(NL80211_IFTYPE_AP) |
+ * BIT(NL80211_IFTYPE_P2P_GO), },
+ * };
+ * struct ieee80211_iface_combination combination2 = {
+ * .limits = limits2,
+ * .n_limits = ARRAY_SIZE(limits2),
+ * .max_interfaces = 8,
+ * .num_different_channels = 1,
+ * };
+ *
+ *
+ * 3. Allow #STA <= 1, #{P2P-client,P2P-GO} <= 3 on two channels, 4 total.
+ *
+ * This allows for an infrastructure connection and three P2P connections.
+ *
+ * .. code-block:: c
+ *
+ * struct ieee80211_iface_limit limits3[] = {
+ * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
+ * { .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) |
+ * BIT(NL80211_IFTYPE_P2P_CLIENT), },
+ * };
+ * struct ieee80211_iface_combination combination3 = {
+ * .limits = limits3,
+ * .n_limits = ARRAY_SIZE(limits3),
+ * .max_interfaces = 4,
+ * .num_different_channels = 2,
+ * };
+ *
+ */
+struct ieee80211_iface_combination {
+ /**
+ * @limits:
+ * limits for the given interface types
+ */
+ const struct ieee80211_iface_limit *limits;
+
+ /**
+ * @num_different_channels:
+ * can use up to this many different channels
+ */
+ u32 num_different_channels;
+
+ /**
+ * @max_interfaces:
+ * maximum number of interfaces in total allowed in this group
+ */
+ u16 max_interfaces;
+
+ /**
+ * @n_limits:
+ * number of limitations
+ */
+ u8 n_limits;
+
+ /**
+ * @beacon_int_infra_match:
+ * In this combination, the beacon intervals between infrastructure
+ * and AP types must match. This is required only in special cases.
+ */
+ bool beacon_int_infra_match;
+
+ /**
+ * @radar_detect_widths:
+ * bitmap of channel widths supported for radar detection
+ */
+ u8 radar_detect_widths;
+
+ /**
+ * @radar_detect_regions:
+ * bitmap of regions supported for radar detection
+ */
+ u8 radar_detect_regions;
+
+ /**
+ * @beacon_int_min_gcd:
+ * This interface combination supports different beacon intervals.
+ *
+ * = 0
+ * all beacon intervals for different interface must be same.
+ * > 0
+ * any beacon interval for the interface part of this combination AND
+ * GCD of all beacon intervals from beaconing interfaces of this
+ * combination must be greater or equal to this value.
+ */
+ u32 beacon_int_min_gcd;
+};
+
+struct ieee80211_txrx_stypes {
+ u16 tx, rx;
+};
+
+/**
+ * enum wiphy_wowlan_support_flags - WoWLAN support flags
+ * @WIPHY_WOWLAN_ANY: supports wakeup for the special "any"
+ * trigger that keeps the device operating as-is and
+ * wakes up the host on any activity, for example a
+ * received packet that passed filtering; note that the
+ * packet should be preserved in that case
+ * @WIPHY_WOWLAN_MAGIC_PKT: supports wakeup on magic packet
+ * (see nl80211.h)
+ * @WIPHY_WOWLAN_DISCONNECT: supports wakeup on disconnect
+ * @WIPHY_WOWLAN_SUPPORTS_GTK_REKEY: supports GTK rekeying while asleep
+ * @WIPHY_WOWLAN_GTK_REKEY_FAILURE: supports wakeup on GTK rekey failure
+ * @WIPHY_WOWLAN_EAP_IDENTITY_REQ: supports wakeup on EAP identity request
+ * @WIPHY_WOWLAN_4WAY_HANDSHAKE: supports wakeup on 4-way handshake failure
+ * @WIPHY_WOWLAN_RFKILL_RELEASE: supports wakeup on RF-kill release
+ * @WIPHY_WOWLAN_NET_DETECT: supports wakeup on network detection
+ */
+enum wiphy_wowlan_support_flags {
+ WIPHY_WOWLAN_ANY = BIT(0),
+ WIPHY_WOWLAN_MAGIC_PKT = BIT(1),
+ WIPHY_WOWLAN_DISCONNECT = BIT(2),
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY = BIT(3),
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE = BIT(4),
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ = BIT(5),
+ WIPHY_WOWLAN_4WAY_HANDSHAKE = BIT(6),
+ WIPHY_WOWLAN_RFKILL_RELEASE = BIT(7),
+ WIPHY_WOWLAN_NET_DETECT = BIT(8),
+};
+
+struct wiphy_wowlan_tcp_support {
+ const struct nl80211_wowlan_tcp_data_token_feature *tok;
+ u32 data_payload_max;
+ u32 data_interval_max;
+ u32 wake_payload_max;
+ bool seq;
+};
+
+/**
+ * struct wiphy_wowlan_support - WoWLAN support data
+ * @flags: see &enum wiphy_wowlan_support_flags
+ * @n_patterns: number of supported wakeup patterns
+ * (see nl80211.h for the pattern definition)
+ * @pattern_max_len: maximum length of each pattern
+ * @pattern_min_len: minimum length of each pattern
+ * @max_pkt_offset: maximum Rx packet offset
+ * @max_nd_match_sets: maximum number of matchsets for net-detect,
+ * similar, but not necessarily identical, to max_match_sets for
+ * scheduled scans.
+ * See &struct cfg80211_sched_scan_request.@match_sets for more
+ * details.
+ * @tcp: TCP wakeup support information
+ */
+struct wiphy_wowlan_support {
+ u32 flags;
+ int n_patterns;
+ int pattern_max_len;
+ int pattern_min_len;
+ int max_pkt_offset;
+ int max_nd_match_sets;
+ const struct wiphy_wowlan_tcp_support *tcp;
+};
+
+/**
+ * struct wiphy_coalesce_support - coalesce support data
+ * @n_rules: maximum number of coalesce rules
+ * @max_delay: maximum supported coalescing delay in msecs
+ * @n_patterns: number of supported patterns in a rule
+ * (see nl80211.h for the pattern definition)
+ * @pattern_max_len: maximum length of each pattern
+ * @pattern_min_len: minimum length of each pattern
+ * @max_pkt_offset: maximum Rx packet offset
+ */
+struct wiphy_coalesce_support {
+ int n_rules;
+ int max_delay;
+ int n_patterns;
+ int pattern_max_len;
+ int pattern_min_len;
+ int max_pkt_offset;
+};
+
+/**
+ * enum wiphy_vendor_command_flags - validation flags for vendor commands
+ * @WIPHY_VENDOR_CMD_NEED_WDEV: vendor command requires wdev
+ * @WIPHY_VENDOR_CMD_NEED_NETDEV: vendor command requires netdev
+ * @WIPHY_VENDOR_CMD_NEED_RUNNING: interface/wdev must be up & running
+ * (must be combined with %_WDEV or %_NETDEV)
+ */
+enum wiphy_vendor_command_flags {
+ WIPHY_VENDOR_CMD_NEED_WDEV = BIT(0),
+ WIPHY_VENDOR_CMD_NEED_NETDEV = BIT(1),
+ WIPHY_VENDOR_CMD_NEED_RUNNING = BIT(2),
+};
+
+/**
+ * enum wiphy_opmode_flag - Station's ht/vht operation mode information flags
+ *
+ * @STA_OPMODE_MAX_BW_CHANGED: Max Bandwidth changed
+ * @STA_OPMODE_SMPS_MODE_CHANGED: SMPS mode changed
+ * @STA_OPMODE_N_SS_CHANGED: max N_SS (number of spatial streams) changed
+ *
+ */
+enum wiphy_opmode_flag {
+ STA_OPMODE_MAX_BW_CHANGED = BIT(0),
+ STA_OPMODE_SMPS_MODE_CHANGED = BIT(1),
+ STA_OPMODE_N_SS_CHANGED = BIT(2),
+};
+
+/**
+ * struct sta_opmode_info - Station's ht/vht operation mode information
+ * @changed: contains value from &enum wiphy_opmode_flag
+ * @smps_mode: New SMPS mode value from &enum nl80211_smps_mode of a station
+ * @bw: new max bandwidth value from &enum nl80211_chan_width of a station
+ * @rx_nss: new rx_nss value of a station
+ */
+
+struct sta_opmode_info {
+ u32 changed;
+ enum nl80211_smps_mode smps_mode;
+ enum nl80211_chan_width bw;
+ u8 rx_nss;
+};
+
+#define VENDOR_CMD_RAW_DATA ((const struct nla_policy *)(long)(-ENODATA))
+
+/**
+ * struct wiphy_vendor_command - vendor command definition
+ * @info: vendor command identifying information, as used in nl80211
+ * @flags: flags, see &enum wiphy_vendor_command_flags
+ * @doit: callback for the operation, note that wdev is %NULL if the
+ * flags didn't ask for a wdev and non-%NULL otherwise; the data
+ * pointer may be %NULL if userspace provided no data at all
+ * @dumpit: dump callback, for transferring bigger/multiple items. The
+ * @storage points to cb->args[5], ie. is preserved over the multiple
+ * dumpit calls.
+ * @policy: policy pointer for attributes within %NL80211_ATTR_VENDOR_DATA.
+ * Set this to %VENDOR_CMD_RAW_DATA if no policy can be given and the
+ * attribute is just raw data (e.g. a firmware command).
+ * @maxattr: highest attribute number in policy
+ * It's recommended to not have the same sub command with both @doit and
+ * @dumpit, so that userspace can assume certain ones are get and others
+ * are used with dump requests.
+ */
+struct wiphy_vendor_command {
+ struct nl80211_vendor_cmd_info info;
+ u32 flags;
+ int (*doit)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len);
+ int (*dumpit)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct sk_buff *skb, const void *data, int data_len,
+ unsigned long *storage);
+ const struct nla_policy *policy;
+ unsigned int maxattr;
+};
+
+/**
+ * struct wiphy_iftype_ext_capab - extended capabilities per interface type
+ * @iftype: interface type
+ * @extended_capabilities: extended capabilities supported by the driver,
+ * additional capabilities might be supported by userspace; these are the
+ * 802.11 extended capabilities ("Extended Capabilities element") and are
+ * in the same format as in the information element. See IEEE Std
+ * 802.11-2012 8.4.2.29 for the defined fields.
+ * @extended_capabilities_mask: mask of the valid values
+ * @extended_capabilities_len: length of the extended capabilities
+ * @eml_capabilities: EML capabilities (for MLO)
+ * @mld_capa_and_ops: MLD capabilities and operations (for MLO)
+ */
+struct wiphy_iftype_ext_capab {
+ enum nl80211_iftype iftype;
+ const u8 *extended_capabilities;
+ const u8 *extended_capabilities_mask;
+ u8 extended_capabilities_len;
+ u16 eml_capabilities;
+ u16 mld_capa_and_ops;
+};
+
+/**
+ * cfg80211_get_iftype_ext_capa - lookup interface type extended capability
+ * @wiphy: the wiphy to look up from
+ * @type: the interface type to look up
+ */
+const struct wiphy_iftype_ext_capab *
+cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type);
+
+/**
+ * struct cfg80211_pmsr_capabilities - cfg80211 peer measurement capabilities
+ * @max_peers: maximum number of peers in a single measurement
+ * @report_ap_tsf: can report assoc AP's TSF for radio resource measurement
+ * @randomize_mac_addr: can randomize MAC address for measurement
+ * @ftm: FTM measurement data
+ * @ftm.supported: FTM measurement is supported
+ * @ftm.asap: ASAP-mode is supported
+ * @ftm.non_asap: non-ASAP-mode is supported
+ * @ftm.request_lci: can request LCI data
+ * @ftm.request_civicloc: can request civic location data
+ * @ftm.preambles: bitmap of preambles supported (&enum nl80211_preamble)
+ * @ftm.bandwidths: bitmap of bandwidths supported (&enum nl80211_chan_width)
+ * @ftm.max_bursts_exponent: maximum burst exponent supported
+ * (set to -1 if not limited; note that setting this will necessarily
+ * forbid using the value 15 to let the responder pick)
+ * @ftm.max_ftms_per_burst: maximum FTMs per burst supported (set to 0 if
+ * not limited)
+ * @ftm.trigger_based: trigger based ranging measurement is supported
+ * @ftm.non_trigger_based: non trigger based ranging measurement is supported
+ */
+struct cfg80211_pmsr_capabilities {
+ unsigned int max_peers;
+ u8 report_ap_tsf:1,
+ randomize_mac_addr:1;
+
+ struct {
+ u32 preambles;
+ u32 bandwidths;
+ s8 max_bursts_exponent;
+ u8 max_ftms_per_burst;
+ u8 supported:1,
+ asap:1,
+ non_asap:1,
+ request_lci:1,
+ request_civicloc:1,
+ trigger_based:1,
+ non_trigger_based:1;
+ } ftm;
+};
+
+/**
+ * struct wiphy_iftype_akm_suites - This structure encapsulates supported akm
+ * suites for interface types defined in @iftypes_mask. Each type in the
+ * @iftypes_mask must be unique across all instances of iftype_akm_suites.
+ *
+ * @iftypes_mask: bitmask of interfaces types
+ * @akm_suites: points to an array of supported akm suites
+ * @n_akm_suites: number of supported AKM suites
+ */
+struct wiphy_iftype_akm_suites {
+ u16 iftypes_mask;
+ const u32 *akm_suites;
+ int n_akm_suites;
+};
+
+/**
+ * struct wiphy - wireless hardware description
+ * @mtx: mutex for the data (structures) of this device
+ * @reg_notifier: the driver's regulatory notification callback,
+ * note that if your driver uses wiphy_apply_custom_regulatory()
+ * the reg_notifier's request can be passed as NULL
+ * @regd: the driver's regulatory domain, if one was requested via
+ * the regulatory_hint() API. This can be used by the driver
+ * on the reg_notifier() if it chooses to ignore future
+ * regulatory domain changes caused by other drivers.
+ * @signal_type: signal type reported in &struct cfg80211_bss.
+ * @cipher_suites: supported cipher suites
+ * @n_cipher_suites: number of supported cipher suites
+ * @akm_suites: supported AKM suites. These are the default AKMs supported if
+ * the supported AKMs not advertized for a specific interface type in
+ * iftype_akm_suites.
+ * @n_akm_suites: number of supported AKM suites
+ * @iftype_akm_suites: array of supported akm suites info per interface type.
+ * Note that the bits in @iftypes_mask inside this structure cannot
+ * overlap (i.e. only one occurrence of each type is allowed across all
+ * instances of iftype_akm_suites).
+ * @num_iftype_akm_suites: number of interface types for which supported akm
+ * suites are specified separately.
+ * @retry_short: Retry limit for short frames (dot11ShortRetryLimit)
+ * @retry_long: Retry limit for long frames (dot11LongRetryLimit)
+ * @frag_threshold: Fragmentation threshold (dot11FragmentationThreshold);
+ * -1 = fragmentation disabled, only odd values >= 256 used
+ * @rts_threshold: RTS threshold (dot11RTSThreshold); -1 = RTS/CTS disabled
+ * @_net: the network namespace this wiphy currently lives in
+ * @perm_addr: permanent MAC address of this device
+ * @addr_mask: If the device supports multiple MAC addresses by masking,
+ * set this to a mask with variable bits set to 1, e.g. if the last
+ * four bits are variable then set it to 00-00-00-00-00-0f. The actual
+ * variable bits shall be determined by the interfaces added, with
+ * interfaces not matching the mask being rejected to be brought up.
+ * @n_addresses: number of addresses in @addresses.
+ * @addresses: If the device has more than one address, set this pointer
+ * to a list of addresses (6 bytes each). The first one will be used
+ * by default for perm_addr. In this case, the mask should be set to
+ * all-zeroes. In this case it is assumed that the device can handle
+ * the same number of arbitrary MAC addresses.
+ * @registered: protects ->resume and ->suspend sysfs callbacks against
+ * unregister hardware
+ * @debugfsdir: debugfs directory used for this wiphy (ieee80211/<wiphyname>).
+ * It will be renamed automatically on wiphy renames
+ * @dev: (virtual) struct device for this wiphy. The item in
+ * /sys/class/ieee80211/ points to this. You need use set_wiphy_dev()
+ * (see below).
+ * @wext: wireless extension handlers
+ * @priv: driver private data (sized according to wiphy_new() parameter)
+ * @interface_modes: bitmask of interfaces types valid for this wiphy,
+ * must be set by driver
+ * @iface_combinations: Valid interface combinations array, should not
+ * list single interface types.
+ * @n_iface_combinations: number of entries in @iface_combinations array.
+ * @software_iftypes: bitmask of software interface types, these are not
+ * subject to any restrictions since they are purely managed in SW.
+ * @flags: wiphy flags, see &enum wiphy_flags
+ * @regulatory_flags: wiphy regulatory flags, see
+ * &enum ieee80211_regulatory_flags
+ * @features: features advertised to nl80211, see &enum nl80211_feature_flags.
+ * @ext_features: extended features advertised to nl80211, see
+ * &enum nl80211_ext_feature_index.
+ * @bss_priv_size: each BSS struct has private data allocated with it,
+ * this variable determines its size
+ * @max_scan_ssids: maximum number of SSIDs the device can scan for in
+ * any given scan
+ * @max_sched_scan_reqs: maximum number of scheduled scan requests that
+ * the device can run concurrently.
+ * @max_sched_scan_ssids: maximum number of SSIDs the device can scan
+ * for in any given scheduled scan
+ * @max_match_sets: maximum number of match sets the device can handle
+ * when performing a scheduled scan, 0 if filtering is not
+ * supported.
+ * @max_scan_ie_len: maximum length of user-controlled IEs device can
+ * add to probe request frames transmitted during a scan, must not
+ * include fixed IEs like supported rates
+ * @max_sched_scan_ie_len: same as max_scan_ie_len, but for scheduled
+ * scans
+ * @max_sched_scan_plans: maximum number of scan plans (scan interval and number
+ * of iterations) for scheduled scan supported by the device.
+ * @max_sched_scan_plan_interval: maximum interval (in seconds) for a
+ * single scan plan supported by the device.
+ * @max_sched_scan_plan_iterations: maximum number of iterations for a single
+ * scan plan supported by the device.
+ * @coverage_class: current coverage class
+ * @fw_version: firmware version for ethtool reporting
+ * @hw_version: hardware version for ethtool reporting
+ * @max_num_pmkids: maximum number of PMKIDs supported by device
+ * @privid: a pointer that drivers can use to identify if an arbitrary
+ * wiphy is theirs, e.g. in global notifiers
+ * @bands: information about bands/channels supported by this device
+ *
+ * @mgmt_stypes: bitmasks of frame subtypes that can be subscribed to or
+ * transmitted through nl80211, points to an array indexed by interface
+ * type
+ *
+ * @available_antennas_tx: bitmap of antennas which are available to be
+ * configured as TX antennas. Antenna configuration commands will be
+ * rejected unless this or @available_antennas_rx is set.
+ *
+ * @available_antennas_rx: bitmap of antennas which are available to be
+ * configured as RX antennas. Antenna configuration commands will be
+ * rejected unless this or @available_antennas_tx is set.
+ *
+ * @probe_resp_offload:
+ * Bitmap of supported protocols for probe response offloading.
+ * See &enum nl80211_probe_resp_offload_support_attr. Only valid
+ * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set.
+ *
+ * @max_remain_on_channel_duration: Maximum time a remain-on-channel operation
+ * may request, if implemented.
+ *
+ * @wowlan: WoWLAN support information
+ * @wowlan_config: current WoWLAN configuration; this should usually not be
+ * used since access to it is necessarily racy, use the parameter passed
+ * to the suspend() operation instead.
+ *
+ * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features.
+ * @ht_capa_mod_mask: Specify what ht_cap values can be over-ridden.
+ * If null, then none can be over-ridden.
+ * @vht_capa_mod_mask: Specify what VHT capabilities can be over-ridden.
+ * If null, then none can be over-ridden.
+ *
+ * @wdev_list: the list of associated (virtual) interfaces; this list must
+ * not be modified by the driver, but can be read with RTNL/RCU protection.
+ *
+ * @max_acl_mac_addrs: Maximum number of MAC addresses that the device
+ * supports for ACL.
+ *
+ * @extended_capabilities: extended capabilities supported by the driver,
+ * additional capabilities might be supported by userspace; these are
+ * the 802.11 extended capabilities ("Extended Capabilities element")
+ * and are in the same format as in the information element. See
+ * 802.11-2012 8.4.2.29 for the defined fields. These are the default
+ * extended capabilities to be used if the capabilities are not specified
+ * for a specific interface type in iftype_ext_capab.
+ * @extended_capabilities_mask: mask of the valid values
+ * @extended_capabilities_len: length of the extended capabilities
+ * @iftype_ext_capab: array of extended capabilities per interface type
+ * @num_iftype_ext_capab: number of interface types for which extended
+ * capabilities are specified separately.
+ * @coalesce: packet coalescing support information
+ *
+ * @vendor_commands: array of vendor commands supported by the hardware
+ * @n_vendor_commands: number of vendor commands
+ * @vendor_events: array of vendor events supported by the hardware
+ * @n_vendor_events: number of vendor events
+ *
+ * @max_ap_assoc_sta: maximum number of associated stations supported in AP mode
+ * (including P2P GO) or 0 to indicate no such limit is advertised. The
+ * driver is allowed to advertise a theoretical limit that it can reach in
+ * some cases, but may not always reach.
+ *
+ * @max_num_csa_counters: Number of supported csa_counters in beacons
+ * and probe responses. This value should be set if the driver
+ * wishes to limit the number of csa counters. Default (0) means
+ * infinite.
+ * @bss_select_support: bitmask indicating the BSS selection criteria supported
+ * by the driver in the .connect() callback. The bit position maps to the
+ * attribute indices defined in &enum nl80211_bss_select_attr.
+ *
+ * @nan_supported_bands: bands supported by the device in NAN mode, a
+ * bitmap of &enum nl80211_band values. For instance, for
+ * NL80211_BAND_2GHZ, bit 0 would be set
+ * (i.e. BIT(NL80211_BAND_2GHZ)).
+ *
+ * @txq_limit: configuration of internal TX queue frame limit
+ * @txq_memory_limit: configuration internal TX queue memory limit
+ * @txq_quantum: configuration of internal TX queue scheduler quantum
+ *
+ * @tx_queue_len: allow setting transmit queue len for drivers not using
+ * wake_tx_queue
+ *
+ * @support_mbssid: can HW support association with nontransmitted AP
+ * @support_only_he_mbssid: don't parse MBSSID elements if it is not
+ * HE AP, in order to avoid compatibility issues.
+ * @support_mbssid must be set for this to have any effect.
+ *
+ * @pmsr_capa: peer measurement capabilities
+ *
+ * @tid_config_support: describes the per-TID config support that the
+ * device has
+ * @tid_config_support.vif: bitmap of attributes (configurations)
+ * supported by the driver for each vif
+ * @tid_config_support.peer: bitmap of attributes (configurations)
+ * supported by the driver for each peer
+ * @tid_config_support.max_retry: maximum supported retry count for
+ * long/short retry configuration
+ *
+ * @max_data_retry_count: maximum supported per TID retry count for
+ * configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and
+ * %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
+ * @sar_capa: SAR control capabilities
+ * @rfkill: a pointer to the rfkill structure
+ *
+ * @mbssid_max_interfaces: maximum number of interfaces supported by the driver
+ * in a multiple BSSID set. This field must be set to a non-zero value
+ * by the driver to advertise MBSSID support.
+ * @ema_max_profile_periodicity: maximum profile periodicity supported by
+ * the driver. Setting this field to a non-zero value indicates that the
+ * driver supports enhanced multi-BSSID advertisements (EMA AP).
+ * @max_num_akm_suites: maximum number of AKM suites allowed for
+ * configuration through %NL80211_CMD_CONNECT, %NL80211_CMD_ASSOCIATE and
+ * %NL80211_CMD_START_AP. Set to NL80211_MAX_NR_AKM_SUITES if not set by
+ * driver. If set by driver minimum allowed value is
+ * NL80211_MAX_NR_AKM_SUITES in order to avoid compatibility issues with
+ * legacy userspace and maximum allowed value is
+ * CFG80211_MAX_NUM_AKM_SUITES.
+ */
+struct wiphy {
+ struct mutex mtx;
+
+ /* assign these fields before you register the wiphy */
+
+ u8 perm_addr[ETH_ALEN];
+ u8 addr_mask[ETH_ALEN];
+
+ struct mac_address *addresses;
+
+ const struct ieee80211_txrx_stypes *mgmt_stypes;
+
+ const struct ieee80211_iface_combination *iface_combinations;
+ int n_iface_combinations;
+ u16 software_iftypes;
+
+ u16 n_addresses;
+
+ /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */
+ u16 interface_modes;
+
+ u16 max_acl_mac_addrs;
+
+ u32 flags, regulatory_flags, features;
+ u8 ext_features[DIV_ROUND_UP(NUM_NL80211_EXT_FEATURES, 8)];
+
+ u32 ap_sme_capa;
+
+ enum cfg80211_signal_type signal_type;
+
+ int bss_priv_size;
+ u8 max_scan_ssids;
+ u8 max_sched_scan_reqs;
+ u8 max_sched_scan_ssids;
+ u8 max_match_sets;
+ u16 max_scan_ie_len;
+ u16 max_sched_scan_ie_len;
+ u32 max_sched_scan_plans;
+ u32 max_sched_scan_plan_interval;
+ u32 max_sched_scan_plan_iterations;
+
+ int n_cipher_suites;
+ const u32 *cipher_suites;
+
+ int n_akm_suites;
+ const u32 *akm_suites;
+
+ const struct wiphy_iftype_akm_suites *iftype_akm_suites;
+ unsigned int num_iftype_akm_suites;
+
+ u8 retry_short;
+ u8 retry_long;
+ u32 frag_threshold;
+ u32 rts_threshold;
+ u8 coverage_class;
+
+ char fw_version[ETHTOOL_FWVERS_LEN];
+ u32 hw_version;
+
+#ifdef CONFIG_PM
+ const struct wiphy_wowlan_support *wowlan;
+ struct cfg80211_wowlan *wowlan_config;
+#endif
+
+ u16 max_remain_on_channel_duration;
+
+ u8 max_num_pmkids;
+
+ u32 available_antennas_tx;
+ u32 available_antennas_rx;
+
+ u32 probe_resp_offload;
+
+ const u8 *extended_capabilities, *extended_capabilities_mask;
+ u8 extended_capabilities_len;
+
+ const struct wiphy_iftype_ext_capab *iftype_ext_capab;
+ unsigned int num_iftype_ext_capab;
+
+ const void *privid;
+
+ struct ieee80211_supported_band *bands[NUM_NL80211_BANDS];
+
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request);
+
+ /* fields below are read-only, assigned by cfg80211 */
+
+ const struct ieee80211_regdomain __rcu *regd;
+
+ struct device dev;
+
+ bool registered;
+
+ struct dentry *debugfsdir;
+
+ const struct ieee80211_ht_cap *ht_capa_mod_mask;
+ const struct ieee80211_vht_cap *vht_capa_mod_mask;
+
+ struct list_head wdev_list;
+
+ possible_net_t _net;
+
+#ifdef CONFIG_CFG80211_WEXT
+ const struct iw_handler_def *wext;
+#endif
+
+ const struct wiphy_coalesce_support *coalesce;
+
+ const struct wiphy_vendor_command *vendor_commands;
+ const struct nl80211_vendor_cmd_info *vendor_events;
+ int n_vendor_commands, n_vendor_events;
+
+ u16 max_ap_assoc_sta;
+
+ u8 max_num_csa_counters;
+
+ u32 bss_select_support;
+
+ u8 nan_supported_bands;
+
+ u32 txq_limit;
+ u32 txq_memory_limit;
+ u32 txq_quantum;
+
+ unsigned long tx_queue_len;
+
+ u8 support_mbssid:1,
+ support_only_he_mbssid:1;
+
+ const struct cfg80211_pmsr_capabilities *pmsr_capa;
+
+ struct {
+ u64 peer, vif;
+ u8 max_retry;
+ } tid_config_support;
+
+ u8 max_data_retry_count;
+
+ const struct cfg80211_sar_capa *sar_capa;
+
+ struct rfkill *rfkill;
+
+ u8 mbssid_max_interfaces;
+ u8 ema_max_profile_periodicity;
+ u16 max_num_akm_suites;
+
+ char priv[] __aligned(NETDEV_ALIGN);
+};
+
+static inline struct net *wiphy_net(struct wiphy *wiphy)
+{
+ return read_pnet(&wiphy->_net);
+}
+
+static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net)
+{
+ write_pnet(&wiphy->_net, net);
+}
+
+/**
+ * wiphy_priv - return priv from wiphy
+ *
+ * @wiphy: the wiphy whose priv pointer to return
+ * Return: The priv of @wiphy.
+ */
+static inline void *wiphy_priv(struct wiphy *wiphy)
+{
+ BUG_ON(!wiphy);
+ return &wiphy->priv;
+}
+
+/**
+ * priv_to_wiphy - return the wiphy containing the priv
+ *
+ * @priv: a pointer previously returned by wiphy_priv
+ * Return: The wiphy of @priv.
+ */
+static inline struct wiphy *priv_to_wiphy(void *priv)
+{
+ BUG_ON(!priv);
+ return container_of(priv, struct wiphy, priv);
+}
+
+/**
+ * set_wiphy_dev - set device pointer for wiphy
+ *
+ * @wiphy: The wiphy whose device to bind
+ * @dev: The device to parent it to
+ */
+static inline void set_wiphy_dev(struct wiphy *wiphy, struct device *dev)
+{
+ wiphy->dev.parent = dev;
+}
+
+/**
+ * wiphy_dev - get wiphy dev pointer
+ *
+ * @wiphy: The wiphy whose device struct to look up
+ * Return: The dev of @wiphy.
+ */
+static inline struct device *wiphy_dev(struct wiphy *wiphy)
+{
+ return wiphy->dev.parent;
+}
+
+/**
+ * wiphy_name - get wiphy name
+ *
+ * @wiphy: The wiphy whose name to return
+ * Return: The name of @wiphy.
+ */
+static inline const char *wiphy_name(const struct wiphy *wiphy)
+{
+ return dev_name(&wiphy->dev);
+}
+
+/**
+ * wiphy_new_nm - create a new wiphy for use with cfg80211
+ *
+ * @ops: The configuration operations for this device
+ * @sizeof_priv: The size of the private area to allocate
+ * @requested_name: Request a particular name.
+ * NULL is valid value, and means use the default phy%d naming.
+ *
+ * Create a new wiphy and associate the given operations with it.
+ * @sizeof_priv bytes are allocated for private use.
+ *
+ * Return: A pointer to the new wiphy. This pointer must be
+ * assigned to each netdev's ieee80211_ptr for proper operation.
+ */
+struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
+ const char *requested_name);
+
+/**
+ * wiphy_new - create a new wiphy for use with cfg80211
+ *
+ * @ops: The configuration operations for this device
+ * @sizeof_priv: The size of the private area to allocate
+ *
+ * Create a new wiphy and associate the given operations with it.
+ * @sizeof_priv bytes are allocated for private use.
+ *
+ * Return: A pointer to the new wiphy. This pointer must be
+ * assigned to each netdev's ieee80211_ptr for proper operation.
+ */
+static inline struct wiphy *wiphy_new(const struct cfg80211_ops *ops,
+ int sizeof_priv)
+{
+ return wiphy_new_nm(ops, sizeof_priv, NULL);
+}
+
+/**
+ * wiphy_register - register a wiphy with cfg80211
+ *
+ * @wiphy: The wiphy to register.
+ *
+ * Return: A non-negative wiphy index or a negative error code.
+ */
+int wiphy_register(struct wiphy *wiphy);
+
+/* this is a define for better error reporting (file/line) */
+#define lockdep_assert_wiphy(wiphy) lockdep_assert_held(&(wiphy)->mtx)
+
+/**
+ * rcu_dereference_wiphy - rcu_dereference with debug checking
+ * @wiphy: the wiphy to check the locking on
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
+ * or RTNL. Note: Please prefer wiphy_dereference() or rcu_dereference().
+ */
+#define rcu_dereference_wiphy(wiphy, p) \
+ rcu_dereference_check(p, lockdep_is_held(&wiphy->mtx))
+
+/**
+ * wiphy_dereference - fetch RCU pointer when updates are prevented by wiphy mtx
+ * @wiphy: the wiphy to check the locking on
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Return the value of the specified RCU-protected pointer, but omit the
+ * READ_ONCE(), because caller holds the wiphy mutex used for updates.
+ */
+#define wiphy_dereference(wiphy, p) \
+ rcu_dereference_protected(p, lockdep_is_held(&wiphy->mtx))
+
+/**
+ * get_wiphy_regdom - get custom regdomain for the given wiphy
+ * @wiphy: the wiphy to get the regdomain from
+ */
+const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy);
+
+/**
+ * wiphy_unregister - deregister a wiphy from cfg80211
+ *
+ * @wiphy: The wiphy to unregister.
+ *
+ * After this call, no more requests can be made with this priv
+ * pointer, but the call may sleep to wait for an outstanding
+ * request that is being handled.
+ */
+void wiphy_unregister(struct wiphy *wiphy);
+
+/**
+ * wiphy_free - free wiphy
+ *
+ * @wiphy: The wiphy to free
+ */
+void wiphy_free(struct wiphy *wiphy);
+
+/* internal structs */
+struct cfg80211_conn;
+struct cfg80211_internal_bss;
+struct cfg80211_cached_keys;
+struct cfg80211_cqm_config;
+
+/**
+ * wiphy_lock - lock the wiphy
+ * @wiphy: the wiphy to lock
+ *
+ * This is needed around registering and unregistering netdevs that
+ * aren't created through cfg80211 calls, since that requires locking
+ * in cfg80211 when the notifiers is called, but that cannot
+ * differentiate which way it's called.
+ *
+ * It can also be used by drivers for their own purposes.
+ *
+ * When cfg80211 ops are called, the wiphy is already locked.
+ *
+ * Note that this makes sure that no workers that have been queued
+ * with wiphy_queue_work() are running.
+ */
+static inline void wiphy_lock(struct wiphy *wiphy)
+ __acquires(&wiphy->mtx)
+{
+ mutex_lock(&wiphy->mtx);
+ __acquire(&wiphy->mtx);
+}
+
+/**
+ * wiphy_unlock - unlock the wiphy again
+ * @wiphy: the wiphy to unlock
+ */
+static inline void wiphy_unlock(struct wiphy *wiphy)
+ __releases(&wiphy->mtx)
+{
+ __release(&wiphy->mtx);
+ mutex_unlock(&wiphy->mtx);
+}
+
+struct wiphy_work;
+typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *);
+
+struct wiphy_work {
+ struct list_head entry;
+ wiphy_work_func_t func;
+};
+
+static inline void wiphy_work_init(struct wiphy_work *work,
+ wiphy_work_func_t func)
+{
+ INIT_LIST_HEAD(&work->entry);
+ work->func = func;
+}
+
+/**
+ * wiphy_work_queue - queue work for the wiphy
+ * @wiphy: the wiphy to queue for
+ * @work: the work item
+ *
+ * This is useful for work that must be done asynchronously, and work
+ * queued here has the special property that the wiphy mutex will be
+ * held as if wiphy_lock() was called, and that it cannot be running
+ * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can
+ * use just cancel_work() instead of cancel_work_sync(), it requires
+ * being in a section protected by wiphy_lock().
+ */
+void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work);
+
+/**
+ * wiphy_work_cancel - cancel previously queued work
+ * @wiphy: the wiphy, for debug purposes
+ * @work: the work to cancel
+ *
+ * Cancel the work *without* waiting for it, this assumes being
+ * called under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work);
+
+/**
+ * wiphy_work_flush - flush previously queued work
+ * @wiphy: the wiphy, for debug purposes
+ * @work: the work to flush, this can be %NULL to flush all work
+ *
+ * Flush the work (i.e. run it if pending). This must be called
+ * under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work);
+
+struct wiphy_delayed_work {
+ struct wiphy_work work;
+ struct wiphy *wiphy;
+ struct timer_list timer;
+};
+
+void wiphy_delayed_work_timer(struct timer_list *t);
+
+static inline void wiphy_delayed_work_init(struct wiphy_delayed_work *dwork,
+ wiphy_work_func_t func)
+{
+ timer_setup(&dwork->timer, wiphy_delayed_work_timer, 0);
+ wiphy_work_init(&dwork->work, func);
+}
+
+/**
+ * wiphy_delayed_work_queue - queue delayed work for the wiphy
+ * @wiphy: the wiphy to queue for
+ * @dwork: the delayable worker
+ * @delay: number of jiffies to wait before queueing
+ *
+ * This is useful for work that must be done asynchronously, and work
+ * queued here has the special property that the wiphy mutex will be
+ * held as if wiphy_lock() was called, and that it cannot be running
+ * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can
+ * use just cancel_work() instead of cancel_work_sync(), it requires
+ * being in a section protected by wiphy_lock().
+ */
+void wiphy_delayed_work_queue(struct wiphy *wiphy,
+ struct wiphy_delayed_work *dwork,
+ unsigned long delay);
+
+/**
+ * wiphy_delayed_work_cancel - cancel previously queued delayed work
+ * @wiphy: the wiphy, for debug purposes
+ * @dwork: the delayed work to cancel
+ *
+ * Cancel the work *without* waiting for it, this assumes being
+ * called under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+ struct wiphy_delayed_work *dwork);
+
+/**
+ * wiphy_delayed_work_flush - flush previously queued delayed work
+ * @wiphy: the wiphy, for debug purposes
+ * @work: the work to flush
+ *
+ * Flush the work (i.e. run it if pending). This must be called
+ * under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_delayed_work_flush(struct wiphy *wiphy,
+ struct wiphy_delayed_work *dwork);
+
+/**
+ * struct wireless_dev - wireless device state
+ *
+ * For netdevs, this structure must be allocated by the driver
+ * that uses the ieee80211_ptr field in struct net_device (this
+ * is intentional so it can be allocated along with the netdev.)
+ * It need not be registered then as netdev registration will
+ * be intercepted by cfg80211 to see the new wireless device,
+ * however, drivers must lock the wiphy before registering or
+ * unregistering netdevs if they pre-create any netdevs (in ops
+ * called from cfg80211, the wiphy is already locked.)
+ *
+ * For non-netdev uses, it must also be allocated by the driver
+ * in response to the cfg80211 callbacks that require it, as
+ * there's no netdev registration in that case it may not be
+ * allocated outside of callback operations that return it.
+ *
+ * @wiphy: pointer to hardware description
+ * @iftype: interface type
+ * @registered: is this wdev already registered with cfg80211
+ * @registering: indicates we're doing registration under wiphy lock
+ * for the notifier
+ * @list: (private) Used to collect the interfaces
+ * @netdev: (private) Used to reference back to the netdev, may be %NULL
+ * @identifier: (private) Identifier used in nl80211 to identify this
+ * wireless device if it has no netdev
+ * @u: union containing data specific to @iftype
+ * @connected: indicates if connected or not (STA mode)
+ * @bssid: (private) Used by the internal configuration code
+ * @wext: (private) Used by the internal wireless extensions compat code
+ * @wext.ibss: (private) IBSS data part of wext handling
+ * @wext.connect: (private) connection handling data
+ * @wext.keys: (private) (WEP) key data
+ * @wext.ie: (private) extra elements for association
+ * @wext.ie_len: (private) length of extra elements
+ * @wext.bssid: (private) selected network BSSID
+ * @wext.ssid: (private) selected network SSID
+ * @wext.default_key: (private) selected default key index
+ * @wext.default_mgmt_key: (private) selected default management key index
+ * @wext.prev_bssid: (private) previous BSSID for reassociation
+ * @wext.prev_bssid_valid: (private) previous BSSID validity
+ * @use_4addr: indicates 4addr mode is used on this interface, must be
+ * set by driver (if supported) on add_interface BEFORE registering the
+ * netdev and may otherwise be used by driver read-only, will be update
+ * by cfg80211 on change_interface
+ * @mgmt_registrations: list of registrations for management frames
+ * @mgmt_registrations_need_update: mgmt registrations were updated,
+ * need to propagate the update to the driver
+ * @mtx: mutex used to lock data in this struct, may be used by drivers
+ * and some API functions require it held
+ * @beacon_interval: beacon interval used on this device for transmitting
+ * beacons, 0 when not valid
+ * @address: The address for this device, valid only if @netdev is %NULL
+ * @is_running: true if this is a non-netdev device that has been started, e.g.
+ * the P2P Device.
+ * @cac_started: true if DFS channel availability check has been started
+ * @cac_start_time: timestamp (jiffies) when the dfs state was entered.
+ * @cac_time_ms: CAC time in ms
+ * @ps: powersave mode is enabled
+ * @ps_timeout: dynamic powersave timeout
+ * @ap_unexpected_nlportid: (private) netlink port ID of application
+ * registered for unexpected class 3 frames (AP mode)
+ * @conn: (private) cfg80211 software SME connection state machine data
+ * @connect_keys: (private) keys to set after connection is established
+ * @conn_bss_type: connecting/connected BSS type
+ * @conn_owner_nlportid: (private) connection owner socket port ID
+ * @disconnect_wk: (private) auto-disconnect work
+ * @disconnect_bssid: (private) the BSSID to use for auto-disconnect
+ * @event_list: (private) list for internal event processing
+ * @event_lock: (private) lock for event list
+ * @owner_nlportid: (private) owner socket port ID
+ * @nl_owner_dead: (private) owner socket went away
+ * @cqm_rssi_work: (private) CQM RSSI reporting work
+ * @cqm_config: (private) nl80211 RSSI monitor state
+ * @pmsr_list: (private) peer measurement requests
+ * @pmsr_lock: (private) peer measurements requests/results lock
+ * @pmsr_free_wk: (private) peer measurements cleanup work
+ * @unprot_beacon_reported: (private) timestamp of last
+ * unprotected beacon report
+ * @links: array of %IEEE80211_MLD_MAX_NUM_LINKS elements containing @addr
+ * @ap and @client for each link
+ * @valid_links: bitmap describing what elements of @links are valid
+ */
+struct wireless_dev {
+ struct wiphy *wiphy;
+ enum nl80211_iftype iftype;
+
+ /* the remainder of this struct should be private to cfg80211 */
+ struct list_head list;
+ struct net_device *netdev;
+
+ u32 identifier;
+
+ struct list_head mgmt_registrations;
+ u8 mgmt_registrations_need_update:1;
+
+ struct mutex mtx;
+
+ bool use_4addr, is_running, registered, registering;
+
+ u8 address[ETH_ALEN] __aligned(sizeof(u16));
+
+ /* currently used for IBSS and SME - might be rearranged later */
+ struct cfg80211_conn *conn;
+ struct cfg80211_cached_keys *connect_keys;
+ enum ieee80211_bss_type conn_bss_type;
+ u32 conn_owner_nlportid;
+
+ struct work_struct disconnect_wk;
+ u8 disconnect_bssid[ETH_ALEN];
+
+ struct list_head event_list;
+ spinlock_t event_lock;
+
+ u8 connected:1;
+
+ bool ps;
+ int ps_timeout;
+
+ u32 ap_unexpected_nlportid;
+
+ u32 owner_nlportid;
+ bool nl_owner_dead;
+
+ /* FIXME: need to rework radar detection for MLO */
+ bool cac_started;
+ unsigned long cac_start_time;
+ unsigned int cac_time_ms;
+
+#ifdef CONFIG_CFG80211_WEXT
+ /* wext data */
+ struct {
+ struct cfg80211_ibss_params ibss;
+ struct cfg80211_connect_params connect;
+ struct cfg80211_cached_keys *keys;
+ const u8 *ie;
+ size_t ie_len;
+ u8 bssid[ETH_ALEN];
+ u8 prev_bssid[ETH_ALEN];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ s8 default_key, default_mgmt_key;
+ bool prev_bssid_valid;
+ } wext;
+#endif
+
+ struct wiphy_work cqm_rssi_work;
+ struct cfg80211_cqm_config __rcu *cqm_config;
+
+ struct list_head pmsr_list;
+ spinlock_t pmsr_lock;
+ struct work_struct pmsr_free_wk;
+
+ unsigned long unprot_beacon_reported;
+
+ union {
+ struct {
+ u8 connected_addr[ETH_ALEN] __aligned(2);
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ } client;
+ struct {
+ int beacon_interval;
+ struct cfg80211_chan_def preset_chandef;
+ struct cfg80211_chan_def chandef;
+ u8 id[IEEE80211_MAX_SSID_LEN];
+ u8 id_len, id_up_len;
+ } mesh;
+ struct {
+ struct cfg80211_chan_def preset_chandef;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ } ap;
+ struct {
+ struct cfg80211_internal_bss *current_bss;
+ struct cfg80211_chan_def chandef;
+ int beacon_interval;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ } ibss;
+ struct {
+ struct cfg80211_chan_def chandef;
+ } ocb;
+ } u;
+
+ struct {
+ u8 addr[ETH_ALEN] __aligned(2);
+ union {
+ struct {
+ unsigned int beacon_interval;
+ struct cfg80211_chan_def chandef;
+ } ap;
+ struct {
+ struct cfg80211_internal_bss *current_bss;
+ } client;
+ };
+ } links[IEEE80211_MLD_MAX_NUM_LINKS];
+ u16 valid_links;
+};
+
+static inline const u8 *wdev_address(struct wireless_dev *wdev)
+{
+ if (wdev->netdev)
+ return wdev->netdev->dev_addr;
+ return wdev->address;
+}
+
+static inline bool wdev_running(struct wireless_dev *wdev)
+{
+ if (wdev->netdev)
+ return netif_running(wdev->netdev);
+ return wdev->is_running;
+}
+
+/**
+ * wdev_priv - return wiphy priv from wireless_dev
+ *
+ * @wdev: The wireless device whose wiphy's priv pointer to return
+ * Return: The wiphy priv of @wdev.
+ */
+static inline void *wdev_priv(struct wireless_dev *wdev)
+{
+ BUG_ON(!wdev);
+ return wiphy_priv(wdev->wiphy);
+}
+
+/**
+ * wdev_chandef - return chandef pointer from wireless_dev
+ * @wdev: the wdev
+ * @link_id: the link ID for MLO
+ *
+ * Return: The chandef depending on the mode, or %NULL.
+ */
+struct cfg80211_chan_def *wdev_chandef(struct wireless_dev *wdev,
+ unsigned int link_id);
+
+static inline void WARN_INVALID_LINK_ID(struct wireless_dev *wdev,
+ unsigned int link_id)
+{
+ WARN_ON(link_id && !wdev->valid_links);
+ WARN_ON(wdev->valid_links &&
+ !(wdev->valid_links & BIT(link_id)));
+}
+
+#define for_each_valid_link(link_info, link_id) \
+ for (link_id = 0; \
+ link_id < ((link_info)->valid_links ? \
+ ARRAY_SIZE((link_info)->links) : 1); \
+ link_id++) \
+ if (!(link_info)->valid_links || \
+ ((link_info)->valid_links & BIT(link_id)))
+
+/**
+ * DOC: Utility functions
+ *
+ * cfg80211 offers a number of utility functions that can be useful.
+ */
+
+/**
+ * ieee80211_channel_equal - compare two struct ieee80211_channel
+ *
+ * @a: 1st struct ieee80211_channel
+ * @b: 2nd struct ieee80211_channel
+ * Return: true if center frequency of @a == @b
+ */
+static inline bool
+ieee80211_channel_equal(struct ieee80211_channel *a,
+ struct ieee80211_channel *b)
+{
+ return (a->center_freq == b->center_freq &&
+ a->freq_offset == b->freq_offset);
+}
+
+/**
+ * ieee80211_channel_to_khz - convert ieee80211_channel to frequency in KHz
+ * @chan: struct ieee80211_channel to convert
+ * Return: The corresponding frequency (in KHz)
+ */
+static inline u32
+ieee80211_channel_to_khz(const struct ieee80211_channel *chan)
+{
+ return MHZ_TO_KHZ(chan->center_freq) + chan->freq_offset;
+}
+
+/**
+ * ieee80211_s1g_channel_width - get allowed channel width from @chan
+ *
+ * Only allowed for band NL80211_BAND_S1GHZ
+ * @chan: channel
+ * Return: The allowed channel width for this center_freq
+ */
+enum nl80211_chan_width
+ieee80211_s1g_channel_width(const struct ieee80211_channel *chan);
+
+/**
+ * ieee80211_channel_to_freq_khz - convert channel number to frequency
+ * @chan: channel number
+ * @band: band, necessary due to channel number overlap
+ * Return: The corresponding frequency (in KHz), or 0 if the conversion failed.
+ */
+u32 ieee80211_channel_to_freq_khz(int chan, enum nl80211_band band);
+
+/**
+ * ieee80211_channel_to_frequency - convert channel number to frequency
+ * @chan: channel number
+ * @band: band, necessary due to channel number overlap
+ * Return: The corresponding frequency (in MHz), or 0 if the conversion failed.
+ */
+static inline int
+ieee80211_channel_to_frequency(int chan, enum nl80211_band band)
+{
+ return KHZ_TO_MHZ(ieee80211_channel_to_freq_khz(chan, band));
+}
+
+/**
+ * ieee80211_freq_khz_to_channel - convert frequency to channel number
+ * @freq: center frequency in KHz
+ * Return: The corresponding channel, or 0 if the conversion failed.
+ */
+int ieee80211_freq_khz_to_channel(u32 freq);
+
+/**
+ * ieee80211_frequency_to_channel - convert frequency to channel number
+ * @freq: center frequency in MHz
+ * Return: The corresponding channel, or 0 if the conversion failed.
+ */
+static inline int
+ieee80211_frequency_to_channel(int freq)
+{
+ return ieee80211_freq_khz_to_channel(MHZ_TO_KHZ(freq));
+}
+
+/**
+ * ieee80211_get_channel_khz - get channel struct from wiphy for specified
+ * frequency
+ * @wiphy: the struct wiphy to get the channel for
+ * @freq: the center frequency (in KHz) of the channel
+ * Return: The channel struct from @wiphy at @freq.
+ */
+struct ieee80211_channel *
+ieee80211_get_channel_khz(struct wiphy *wiphy, u32 freq);
+
+/**
+ * ieee80211_get_channel - get channel struct from wiphy for specified frequency
+ *
+ * @wiphy: the struct wiphy to get the channel for
+ * @freq: the center frequency (in MHz) of the channel
+ * Return: The channel struct from @wiphy at @freq.
+ */
+static inline struct ieee80211_channel *
+ieee80211_get_channel(struct wiphy *wiphy, int freq)
+{
+ return ieee80211_get_channel_khz(wiphy, MHZ_TO_KHZ(freq));
+}
+
+/**
+ * cfg80211_channel_is_psc - Check if the channel is a 6 GHz PSC
+ * @chan: control channel to check
+ *
+ * The Preferred Scanning Channels (PSC) are defined in
+ * Draft IEEE P802.11ax/D5.0, 26.17.2.3.3
+ */
+static inline bool cfg80211_channel_is_psc(struct ieee80211_channel *chan)
+{
+ if (chan->band != NL80211_BAND_6GHZ)
+ return false;
+
+ return ieee80211_frequency_to_channel(chan->center_freq) % 16 == 5;
+}
+
+/**
+ * ieee80211_get_response_rate - get basic rate for a given rate
+ *
+ * @sband: the band to look for rates in
+ * @basic_rates: bitmap of basic rates
+ * @bitrate: the bitrate for which to find the basic rate
+ *
+ * Return: The basic rate corresponding to a given bitrate, that
+ * is the next lower bitrate contained in the basic rate map,
+ * which is, for this function, given as a bitmap of indices of
+ * rates in the band's bitrate table.
+ */
+const struct ieee80211_rate *
+ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
+ u32 basic_rates, int bitrate);
+
+/**
+ * ieee80211_mandatory_rates - get mandatory rates for a given band
+ * @sband: the band to look for rates in
+ * @scan_width: width of the control channel
+ *
+ * This function returns a bitmap of the mandatory rates for the given
+ * band, bits are set according to the rate position in the bitrates array.
+ */
+u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
+ enum nl80211_bss_scan_width scan_width);
+
+/*
+ * Radiotap parsing functions -- for controlled injection support
+ *
+ * Implemented in net/wireless/radiotap.c
+ * Documentation in Documentation/networking/radiotap-headers.rst
+ */
+
+struct radiotap_align_size {
+ uint8_t align:4, size:4;
+};
+
+struct ieee80211_radiotap_namespace {
+ const struct radiotap_align_size *align_size;
+ int n_bits;
+ uint32_t oui;
+ uint8_t subns;
+};
+
+struct ieee80211_radiotap_vendor_namespaces {
+ const struct ieee80211_radiotap_namespace *ns;
+ int n_ns;
+};
+
+/**
+ * struct ieee80211_radiotap_iterator - tracks walk thru present radiotap args
+ * @this_arg_index: index of current arg, valid after each successful call
+ * to ieee80211_radiotap_iterator_next()
+ * @this_arg: pointer to current radiotap arg; it is valid after each
+ * call to ieee80211_radiotap_iterator_next() but also after
+ * ieee80211_radiotap_iterator_init() where it will point to
+ * the beginning of the actual data portion
+ * @this_arg_size: length of the current arg, for convenience
+ * @current_namespace: pointer to the current namespace definition
+ * (or internally %NULL if the current namespace is unknown)
+ * @is_radiotap_ns: indicates whether the current namespace is the default
+ * radiotap namespace or not
+ *
+ * @_rtheader: pointer to the radiotap header we are walking through
+ * @_max_length: length of radiotap header in cpu byte ordering
+ * @_arg_index: next argument index
+ * @_arg: next argument pointer
+ * @_next_bitmap: internal pointer to next present u32
+ * @_bitmap_shifter: internal shifter for curr u32 bitmap, b0 set == arg present
+ * @_vns: vendor namespace definitions
+ * @_next_ns_data: beginning of the next namespace's data
+ * @_reset_on_ext: internal; reset the arg index to 0 when going to the
+ * next bitmap word
+ *
+ * Describes the radiotap parser state. Fields prefixed with an underscore
+ * must not be used by users of the parser, only by the parser internally.
+ */
+
+struct ieee80211_radiotap_iterator {
+ struct ieee80211_radiotap_header *_rtheader;
+ const struct ieee80211_radiotap_vendor_namespaces *_vns;
+ const struct ieee80211_radiotap_namespace *current_namespace;
+
+ unsigned char *_arg, *_next_ns_data;
+ __le32 *_next_bitmap;
+
+ unsigned char *this_arg;
+ int this_arg_index;
+ int this_arg_size;
+
+ int is_radiotap_ns;
+
+ int _max_length;
+ int _arg_index;
+ uint32_t _bitmap_shifter;
+ int _reset_on_ext;
+};
+
+int
+ieee80211_radiotap_iterator_init(struct ieee80211_radiotap_iterator *iterator,
+ struct ieee80211_radiotap_header *radiotap_header,
+ int max_length,
+ const struct ieee80211_radiotap_vendor_namespaces *vns);
+
+int
+ieee80211_radiotap_iterator_next(struct ieee80211_radiotap_iterator *iterator);
+
+
+extern const unsigned char rfc1042_header[6];
+extern const unsigned char bridge_tunnel_header[6];
+
+/**
+ * ieee80211_get_hdrlen_from_skb - get header length from data
+ *
+ * @skb: the frame
+ *
+ * Given an skb with a raw 802.11 header at the data pointer this function
+ * returns the 802.11 header length.
+ *
+ * Return: The 802.11 header length in bytes (not including encryption
+ * headers). Or 0 if the data in the sk_buff is too short to contain a valid
+ * 802.11 header.
+ */
+unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb);
+
+/**
+ * ieee80211_hdrlen - get header length in bytes from frame control
+ * @fc: frame control field in little-endian format
+ * Return: The header length in bytes.
+ */
+unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc);
+
+/**
+ * ieee80211_get_mesh_hdrlen - get mesh extension header length
+ * @meshhdr: the mesh extension header, only the flags field
+ * (first byte) will be accessed
+ * Return: The length of the extension header, which is always at
+ * least 6 bytes and at most 18 if address 5 and 6 are present.
+ */
+unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
+
+/**
+ * DOC: Data path helpers
+ *
+ * In addition to generic utilities, cfg80211 also offers
+ * functions that help implement the data path for devices
+ * that do not do the 802.11/802.3 conversion on the device.
+ */
+
+/**
+ * ieee80211_data_to_8023_exthdr - convert an 802.11 data frame to 802.3
+ * @skb: the 802.11 data frame
+ * @ehdr: pointer to a &struct ethhdr that will get the header, instead
+ * of it being pushed into the SKB
+ * @addr: the device MAC address
+ * @iftype: the virtual interface type
+ * @data_offset: offset of payload after the 802.11 header
+ * @is_amsdu: true if the 802.11 header is A-MSDU
+ * Return: 0 on success. Non-zero on error.
+ */
+int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ const u8 *addr, enum nl80211_iftype iftype,
+ u8 data_offset, bool is_amsdu);
+
+/**
+ * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
+ * @skb: the 802.11 data frame
+ * @addr: the device MAC address
+ * @iftype: the virtual interface type
+ * Return: 0 on success. Non-zero on error.
+ */
+static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+ enum nl80211_iftype iftype)
+{
+ return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0, false);
+}
+
+/**
+ * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
+ *
+ * Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames.
+ * The @list will be empty if the decode fails. The @skb must be fully
+ * header-less before being passed in here; it is freed in this function.
+ *
+ * @skb: The input A-MSDU frame without any headers.
+ * @list: The output list of 802.3 frames. It must be allocated and
+ * initialized by the caller.
+ * @addr: The device MAC address.
+ * @iftype: The device interface type.
+ * @extra_headroom: The hardware extra headroom for SKBs in the @list.
+ * @check_da: DA to check in the inner ethernet header, or NULL
+ * @check_sa: SA to check in the inner ethernet header, or NULL
+ */
+void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
+ const u8 *addr, enum nl80211_iftype iftype,
+ const unsigned int extra_headroom,
+ const u8 *check_da, const u8 *check_sa);
+
+/**
+ * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame
+ * @skb: the data frame
+ * @qos_map: Interworking QoS mapping or %NULL if not in use
+ * Return: The 802.1p/1d tag.
+ */
+unsigned int cfg80211_classify8021d(struct sk_buff *skb,
+ struct cfg80211_qos_map *qos_map);
+
+/**
+ * cfg80211_find_elem_match - match information element and byte array in data
+ *
+ * @eid: element ID
+ * @ies: data consisting of IEs
+ * @len: length of data
+ * @match: byte array to match
+ * @match_len: number of bytes in the match array
+ * @match_offset: offset in the IE data where the byte array should match.
+ * Note the difference to cfg80211_find_ie_match() which considers
+ * the offset to start from the element ID byte, but here we take
+ * the data portion instead.
+ *
+ * Return: %NULL if the element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match; otherwise return the
+ * requested element struct.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data and being large enough for the
+ * byte array to match.
+ */
+const struct element *
+cfg80211_find_elem_match(u8 eid, const u8 *ies, unsigned int len,
+ const u8 *match, unsigned int match_len,
+ unsigned int match_offset);
+
+/**
+ * cfg80211_find_ie_match - match information element and byte array in data
+ *
+ * @eid: element ID
+ * @ies: data consisting of IEs
+ * @len: length of data
+ * @match: byte array to match
+ * @match_len: number of bytes in the match array
+ * @match_offset: offset in the IE where the byte array should match.
+ * If match_len is zero, this must also be set to zero.
+ * Otherwise this must be set to 2 or more, because the first
+ * byte is the element id, which is already compared to eid, and
+ * the second byte is the IE length.
+ *
+ * Return: %NULL if the element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match, or a pointer to the first
+ * byte of the requested element, that is the byte containing the
+ * element ID.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data and being large enough for the
+ * byte array to match.
+ */
+static inline const u8 *
+cfg80211_find_ie_match(u8 eid, const u8 *ies, unsigned int len,
+ const u8 *match, unsigned int match_len,
+ unsigned int match_offset)
+{
+ /* match_offset can't be smaller than 2, unless match_len is
+ * zero, in which case match_offset must be zero as well.
+ */
+ if (WARN_ON((match_len && match_offset < 2) ||
+ (!match_len && match_offset)))
+ return NULL;
+
+ return (const void *)cfg80211_find_elem_match(eid, ies, len,
+ match, match_len,
+ match_offset ?
+ match_offset - 2 : 0);
+}
+
+/**
+ * cfg80211_find_elem - find information element in data
+ *
+ * @eid: element ID
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match; otherwise return the
+ * requested element struct.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data.
+ */
+static inline const struct element *
+cfg80211_find_elem(u8 eid, const u8 *ies, int len)
+{
+ return cfg80211_find_elem_match(eid, ies, len, NULL, 0, 0);
+}
+
+/**
+ * cfg80211_find_ie - find information element in data
+ *
+ * @eid: element ID
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data), or a pointer to the first byte of the requested
+ * element, that is the byte containing the element ID.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data.
+ */
+static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
+{
+ return cfg80211_find_ie_match(eid, ies, len, NULL, 0, 0);
+}
+
+/**
+ * cfg80211_find_ext_elem - find information element with EID Extension in data
+ *
+ * @ext_eid: element ID Extension
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the etended element could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match; otherwise return the
+ * requested element struct.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data.
+ */
+static inline const struct element *
+cfg80211_find_ext_elem(u8 ext_eid, const u8 *ies, int len)
+{
+ return cfg80211_find_elem_match(WLAN_EID_EXTENSION, ies, len,
+ &ext_eid, 1, 0);
+}
+
+/**
+ * cfg80211_find_ext_ie - find information element with EID Extension in data
+ *
+ * @ext_eid: element ID Extension
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the extended element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data), or a pointer to the first byte of the requested
+ * element, that is the byte containing the element ID.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data.
+ */
+static inline const u8 *cfg80211_find_ext_ie(u8 ext_eid, const u8 *ies, int len)
+{
+ return cfg80211_find_ie_match(WLAN_EID_EXTENSION, ies, len,
+ &ext_eid, 1, 2);
+}
+
+/**
+ * cfg80211_find_vendor_elem - find vendor specific information element in data
+ *
+ * @oui: vendor OUI
+ * @oui_type: vendor-specific OUI type (must be < 0xff), negative means any
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the vendor specific element ID could not be found or if the
+ * element is invalid (claims to be longer than the given data); otherwise
+ * return the element structure for the requested element.
+ *
+ * Note: There are no checks on the element length other than having to fit into
+ * the given data.
+ */
+const struct element *cfg80211_find_vendor_elem(unsigned int oui, int oui_type,
+ const u8 *ies,
+ unsigned int len);
+
+/**
+ * cfg80211_find_vendor_ie - find vendor specific information element in data
+ *
+ * @oui: vendor OUI
+ * @oui_type: vendor-specific OUI type (must be < 0xff), negative means any
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the vendor specific element ID could not be found or if the
+ * element is invalid (claims to be longer than the given data), or a pointer to
+ * the first byte of the requested element, that is the byte containing the
+ * element ID.
+ *
+ * Note: There are no checks on the element length other than having to fit into
+ * the given data.
+ */
+static inline const u8 *
+cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
+ const u8 *ies, unsigned int len)
+{
+ return (const void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len);
+}
+
+/**
+ * cfg80211_send_layer2_update - send layer 2 update frame
+ *
+ * @dev: network device
+ * @addr: STA MAC address
+ *
+ * Wireless drivers can use this function to update forwarding tables in bridge
+ * devices upon STA association.
+ */
+void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr);
+
+/**
+ * DOC: Regulatory enforcement infrastructure
+ *
+ * TODO
+ */
+
+/**
+ * regulatory_hint - driver hint to the wireless core a regulatory domain
+ * @wiphy: the wireless device giving the hint (used only for reporting
+ * conflicts)
+ * @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain
+ * should be in. If @rd is set this should be NULL. Note that if you
+ * set this to NULL you should still set rd->alpha2 to some accepted
+ * alpha2.
+ *
+ * Wireless drivers can use this function to hint to the wireless core
+ * what it believes should be the current regulatory domain by
+ * giving it an ISO/IEC 3166 alpha2 country code it knows its regulatory
+ * domain should be in or by providing a completely build regulatory domain.
+ * If the driver provides an ISO/IEC 3166 alpha2 userspace will be queried
+ * for a regulatory domain structure for the respective country.
+ *
+ * The wiphy must have been registered to cfg80211 prior to this call.
+ * For cfg80211 drivers this means you must first use wiphy_register(),
+ * for mac80211 drivers you must first use ieee80211_register_hw().
+ *
+ * Drivers should check the return value, its possible you can get
+ * an -ENOMEM.
+ *
+ * Return: 0 on success. -ENOMEM.
+ */
+int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
+
+/**
+ * regulatory_set_wiphy_regd - set regdom info for self managed drivers
+ * @wiphy: the wireless device we want to process the regulatory domain on
+ * @rd: the regulatory domain informatoin to use for this wiphy
+ *
+ * Set the regulatory domain information for self-managed wiphys, only they
+ * may use this function. See %REGULATORY_WIPHY_SELF_MANAGED for more
+ * information.
+ *
+ * Return: 0 on success. -EINVAL, -EPERM
+ */
+int regulatory_set_wiphy_regd(struct wiphy *wiphy,
+ struct ieee80211_regdomain *rd);
+
+/**
+ * regulatory_set_wiphy_regd_sync - set regdom for self-managed drivers
+ * @wiphy: the wireless device we want to process the regulatory domain on
+ * @rd: the regulatory domain information to use for this wiphy
+ *
+ * This functions requires the RTNL and the wiphy mutex to be held and
+ * applies the new regdomain synchronously to this wiphy. For more details
+ * see regulatory_set_wiphy_regd().
+ *
+ * Return: 0 on success. -EINVAL, -EPERM
+ */
+int regulatory_set_wiphy_regd_sync(struct wiphy *wiphy,
+ struct ieee80211_regdomain *rd);
+
+/**
+ * wiphy_apply_custom_regulatory - apply a custom driver regulatory domain
+ * @wiphy: the wireless device we want to process the regulatory domain on
+ * @regd: the custom regulatory domain to use for this wiphy
+ *
+ * Drivers can sometimes have custom regulatory domains which do not apply
+ * to a specific country. Drivers can use this to apply such custom regulatory
+ * domains. This routine must be called prior to wiphy registration. The
+ * custom regulatory domain will be trusted completely and as such previous
+ * default channel settings will be disregarded. If no rule is found for a
+ * channel on the regulatory domain the channel will be disabled.
+ * Drivers using this for a wiphy should also set the wiphy flag
+ * REGULATORY_CUSTOM_REG or cfg80211 will set it for the wiphy
+ * that called this helper.
+ */
+void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
+ const struct ieee80211_regdomain *regd);
+
+/**
+ * freq_reg_info - get regulatory information for the given frequency
+ * @wiphy: the wiphy for which we want to process this rule for
+ * @center_freq: Frequency in KHz for which we want regulatory information for
+ *
+ * Use this function to get the regulatory rule for a specific frequency on
+ * a given wireless device. If the device has a specific regulatory domain
+ * it wants to follow we respect that unless a country IE has been received
+ * and processed already.
+ *
+ * Return: A valid pointer, or, when an error occurs, for example if no rule
+ * can be found, the return value is encoded using ERR_PTR(). Use IS_ERR() to
+ * check and PTR_ERR() to obtain the numeric return value. The numeric return
+ * value will be -ERANGE if we determine the given center_freq does not even
+ * have a regulatory rule for a frequency range in the center_freq's band.
+ * See freq_in_rule_band() for our current definition of a band -- this is
+ * purely subjective and right now it's 802.11 specific.
+ */
+const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
+ u32 center_freq);
+
+/**
+ * reg_initiator_name - map regulatory request initiator enum to name
+ * @initiator: the regulatory request initiator
+ *
+ * You can use this to map the regulatory request initiator enum to a
+ * proper string representation.
+ */
+const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
+
+/**
+ * regulatory_pre_cac_allowed - check if pre-CAC allowed in the current regdom
+ * @wiphy: wiphy for which pre-CAC capability is checked.
+ *
+ * Pre-CAC is allowed only in some regdomains (notable ETSI).
+ */
+bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
+
+/**
+ * DOC: Internal regulatory db functions
+ *
+ */
+
+/**
+ * reg_query_regdb_wmm - Query internal regulatory db for wmm rule
+ * Regulatory self-managed driver can use it to proactively
+ *
+ * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
+ * @freq: the freqency(in MHz) to be queried.
+ * @rule: pointer to store the wmm rule from the regulatory db.
+ *
+ * Self-managed wireless drivers can use this function to query
+ * the internal regulatory database to check whether the given
+ * ISO/IEC 3166 alpha2 country and freq have wmm rule limitations.
+ *
+ * Drivers should check the return value, its possible you can get
+ * an -ENODATA.
+ *
+ * Return: 0 on success. -ENODATA.
+ */
+int reg_query_regdb_wmm(char *alpha2, int freq,
+ struct ieee80211_reg_rule *rule);
+
+/*
+ * callbacks for asynchronous cfg80211 methods, notification
+ * functions and BSS handling helpers
+ */
+
+/**
+ * cfg80211_scan_done - notify that scan finished
+ *
+ * @request: the corresponding scan request
+ * @info: information about the completed scan
+ */
+void cfg80211_scan_done(struct cfg80211_scan_request *request,
+ struct cfg80211_scan_info *info);
+
+/**
+ * cfg80211_sched_scan_results - notify that new scan results are available
+ *
+ * @wiphy: the wiphy which got scheduled scan results
+ * @reqid: identifier for the related scheduled scan request
+ */
+void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid);
+
+/**
+ * cfg80211_sched_scan_stopped - notify that the scheduled scan has stopped
+ *
+ * @wiphy: the wiphy on which the scheduled scan stopped
+ * @reqid: identifier for the related scheduled scan request
+ *
+ * The driver can call this function to inform cfg80211 that the
+ * scheduled scan had to be stopped, for whatever reason. The driver
+ * is then called back via the sched_scan_stop operation when done.
+ */
+void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid);
+
+/**
+ * cfg80211_sched_scan_stopped_locked - notify that the scheduled scan has stopped
+ *
+ * @wiphy: the wiphy on which the scheduled scan stopped
+ * @reqid: identifier for the related scheduled scan request
+ *
+ * The driver can call this function to inform cfg80211 that the
+ * scheduled scan had to be stopped, for whatever reason. The driver
+ * is then called back via the sched_scan_stop operation when done.
+ * This function should be called with the wiphy mutex held.
+ */
+void cfg80211_sched_scan_stopped_locked(struct wiphy *wiphy, u64 reqid);
+
+/**
+ * cfg80211_inform_bss_frame_data - inform cfg80211 of a received BSS frame
+ * @wiphy: the wiphy reporting the BSS
+ * @data: the BSS metadata
+ * @mgmt: the management frame (probe response or beacon)
+ * @len: length of the management frame
+ * @gfp: context flags
+ *
+ * This informs cfg80211 that BSS information was found and
+ * the BSS should be updated/added.
+ *
+ * Return: A referenced struct, must be released with cfg80211_put_bss()!
+ * Or %NULL on error.
+ */
+struct cfg80211_bss * __must_check
+cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ gfp_t gfp);
+
+static inline struct cfg80211_bss * __must_check
+cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
+ struct ieee80211_channel *rx_channel,
+ enum nl80211_bss_scan_width scan_width,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ s32 signal, gfp_t gfp)
+{
+ struct cfg80211_inform_bss data = {
+ .chan = rx_channel,
+ .scan_width = scan_width,
+ .signal = signal,
+ };
+
+ return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp);
+}
+
+static inline struct cfg80211_bss * __must_check
+cfg80211_inform_bss_frame(struct wiphy *wiphy,
+ struct ieee80211_channel *rx_channel,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ s32 signal, gfp_t gfp)
+{
+ struct cfg80211_inform_bss data = {
+ .chan = rx_channel,
+ .scan_width = NL80211_BSS_CHAN_WIDTH_20,
+ .signal = signal,
+ };
+
+ return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp);
+}
+
+/**
+ * cfg80211_gen_new_bssid - generate a nontransmitted BSSID for multi-BSSID
+ * @bssid: transmitter BSSID
+ * @max_bssid: max BSSID indicator, taken from Multiple BSSID element
+ * @mbssid_index: BSSID index, taken from Multiple BSSID index element
+ * @new_bssid: calculated nontransmitted BSSID
+ */
+static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid,
+ u8 mbssid_index, u8 *new_bssid)
+{
+ u64 bssid_u64 = ether_addr_to_u64(bssid);
+ u64 mask = GENMASK_ULL(max_bssid - 1, 0);
+ u64 new_bssid_u64;
+
+ new_bssid_u64 = bssid_u64 & ~mask;
+
+ new_bssid_u64 |= ((bssid_u64 & mask) + mbssid_index) & mask;
+
+ u64_to_ether_addr(new_bssid_u64, new_bssid);
+}
+
+/**
+ * cfg80211_is_element_inherited - returns if element ID should be inherited
+ * @element: element to check
+ * @non_inherit_element: non inheritance element
+ */
+bool cfg80211_is_element_inherited(const struct element *element,
+ const struct element *non_inherit_element);
+
+/**
+ * cfg80211_merge_profile - merges a MBSSID profile if it is split between IEs
+ * @ie: ies
+ * @ielen: length of IEs
+ * @mbssid_elem: current MBSSID element
+ * @sub_elem: current MBSSID subelement (profile)
+ * @merged_ie: location of the merged profile
+ * @max_copy_len: max merged profile length
+ */
+size_t cfg80211_merge_profile(const u8 *ie, size_t ielen,
+ const struct element *mbssid_elem,
+ const struct element *sub_elem,
+ u8 *merged_ie, size_t max_copy_len);
+
+/**
+ * enum cfg80211_bss_frame_type - frame type that the BSS data came from
+ * @CFG80211_BSS_FTYPE_UNKNOWN: driver doesn't know whether the data is
+ * from a beacon or probe response
+ * @CFG80211_BSS_FTYPE_BEACON: data comes from a beacon
+ * @CFG80211_BSS_FTYPE_PRESP: data comes from a probe response
+ */
+enum cfg80211_bss_frame_type {
+ CFG80211_BSS_FTYPE_UNKNOWN,
+ CFG80211_BSS_FTYPE_BEACON,
+ CFG80211_BSS_FTYPE_PRESP,
+};
+
+/**
+ * cfg80211_get_ies_channel_number - returns the channel number from ies
+ * @ie: IEs
+ * @ielen: length of IEs
+ * @band: enum nl80211_band of the channel
+ * @ftype: frame type
+ *
+ * Returns the channel number, or -1 if none could be determined.
+ */
+int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
+ enum nl80211_band band,
+ enum cfg80211_bss_frame_type ftype);
+
+/**
+ * cfg80211_inform_bss_data - inform cfg80211 of a new BSS
+ *
+ * @wiphy: the wiphy reporting the BSS
+ * @data: the BSS metadata
+ * @ftype: frame type (if known)
+ * @bssid: the BSSID of the BSS
+ * @tsf: the TSF sent by the peer in the beacon/probe response (or 0)
+ * @capability: the capability field sent by the peer
+ * @beacon_interval: the beacon interval announced by the peer
+ * @ie: additional IEs sent by the peer
+ * @ielen: length of the additional IEs
+ * @gfp: context flags
+ *
+ * This informs cfg80211 that BSS information was found and
+ * the BSS should be updated/added.
+ *
+ * Return: A referenced struct, must be released with cfg80211_put_bss()!
+ * Or %NULL on error.
+ */
+struct cfg80211_bss * __must_check
+cfg80211_inform_bss_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ enum cfg80211_bss_frame_type ftype,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ gfp_t gfp);
+
+static inline struct cfg80211_bss * __must_check
+cfg80211_inform_bss_width(struct wiphy *wiphy,
+ struct ieee80211_channel *rx_channel,
+ enum nl80211_bss_scan_width scan_width,
+ enum cfg80211_bss_frame_type ftype,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ s32 signal, gfp_t gfp)
+{
+ struct cfg80211_inform_bss data = {
+ .chan = rx_channel,
+ .scan_width = scan_width,
+ .signal = signal,
+ };
+
+ return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf,
+ capability, beacon_interval, ie, ielen,
+ gfp);
+}
+
+static inline struct cfg80211_bss * __must_check
+cfg80211_inform_bss(struct wiphy *wiphy,
+ struct ieee80211_channel *rx_channel,
+ enum cfg80211_bss_frame_type ftype,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ s32 signal, gfp_t gfp)
+{
+ struct cfg80211_inform_bss data = {
+ .chan = rx_channel,
+ .scan_width = NL80211_BSS_CHAN_WIDTH_20,
+ .signal = signal,
+ };
+
+ return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf,
+ capability, beacon_interval, ie, ielen,
+ gfp);
+}
+
+/**
+ * cfg80211_get_bss - get a BSS reference
+ * @wiphy: the wiphy this BSS struct belongs to
+ * @channel: the channel to search on (or %NULL)
+ * @bssid: the desired BSSID (or %NULL)
+ * @ssid: the desired SSID (or %NULL)
+ * @ssid_len: length of the SSID (or 0)
+ * @bss_type: type of BSS, see &enum ieee80211_bss_type
+ * @privacy: privacy filter, see &enum ieee80211_privacy
+ */
+struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ const u8 *bssid,
+ const u8 *ssid, size_t ssid_len,
+ enum ieee80211_bss_type bss_type,
+ enum ieee80211_privacy privacy);
+static inline struct cfg80211_bss *
+cfg80211_get_ibss(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ const u8 *ssid, size_t ssid_len)
+{
+ return cfg80211_get_bss(wiphy, channel, NULL, ssid, ssid_len,
+ IEEE80211_BSS_TYPE_IBSS,
+ IEEE80211_PRIVACY_ANY);
+}
+
+/**
+ * cfg80211_ref_bss - reference BSS struct
+ * @wiphy: the wiphy this BSS struct belongs to
+ * @bss: the BSS struct to reference
+ *
+ * Increments the refcount of the given BSS struct.
+ */
+void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
+
+/**
+ * cfg80211_put_bss - unref BSS struct
+ * @wiphy: the wiphy this BSS struct belongs to
+ * @bss: the BSS struct
+ *
+ * Decrements the refcount of the given BSS struct.
+ */
+void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
+
+/**
+ * cfg80211_unlink_bss - unlink BSS from internal data structures
+ * @wiphy: the wiphy
+ * @bss: the bss to remove
+ *
+ * This function removes the given BSS from the internal data structures
+ * thereby making it no longer show up in scan results etc. Use this
+ * function when you detect a BSS is gone. Normally BSSes will also time
+ * out, so it is not necessary to use this function at all.
+ */
+void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
+
+/**
+ * cfg80211_bss_iter - iterate all BSS entries
+ *
+ * This function iterates over the BSS entries associated with the given wiphy
+ * and calls the callback for the iterated BSS. The iterator function is not
+ * allowed to call functions that might modify the internal state of the BSS DB.
+ *
+ * @wiphy: the wiphy
+ * @chandef: if given, the iterator function will be called only if the channel
+ * of the currently iterated BSS is a subset of the given channel.
+ * @iter: the iterator function to call
+ * @iter_data: an argument to the iterator function
+ */
+void cfg80211_bss_iter(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ void (*iter)(struct wiphy *wiphy,
+ struct cfg80211_bss *bss,
+ void *data),
+ void *iter_data);
+
+static inline enum nl80211_bss_scan_width
+cfg80211_chandef_to_scan_width(const struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ return NL80211_BSS_CHAN_WIDTH_5;
+ case NL80211_CHAN_WIDTH_10:
+ return NL80211_BSS_CHAN_WIDTH_10;
+ default:
+ return NL80211_BSS_CHAN_WIDTH_20;
+ }
+}
+
+/**
+ * cfg80211_rx_mlme_mgmt - notification of processed MLME management frame
+ * @dev: network device
+ * @buf: authentication frame (header + body)
+ * @len: length of the frame data
+ *
+ * This function is called whenever an authentication, disassociation or
+ * deauthentication frame has been received and processed in station mode.
+ * After being asked to authenticate via cfg80211_ops::auth() the driver must
+ * call either this function or cfg80211_auth_timeout().
+ * After being asked to associate via cfg80211_ops::assoc() the driver must
+ * call either this function or cfg80211_auth_timeout().
+ * While connected, the driver must calls this for received and processed
+ * disassociation and deauthentication frames. If the frame couldn't be used
+ * because it was unprotected, the driver must call the function
+ * cfg80211_rx_unprot_mlme_mgmt() instead.
+ *
+ * This function may sleep. The caller must hold the corresponding wdev's mutex.
+ */
+void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len);
+
+/**
+ * cfg80211_auth_timeout - notification of timed out authentication
+ * @dev: network device
+ * @addr: The MAC address of the device with which the authentication timed out
+ *
+ * This function may sleep. The caller must hold the corresponding wdev's
+ * mutex.
+ */
+void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
+
+/**
+ * struct cfg80211_rx_assoc_resp - association response data
+ * @bss: the BSS that association was requested with, ownership of the pointer
+ * moves to cfg80211 in the call to cfg80211_rx_assoc_resp()
+ * @buf: (Re)Association Response frame (header + body)
+ * @len: length of the frame data
+ * @uapsd_queues: bitmap of queues configured for uapsd. Same format
+ * as the AC bitmap in the QoS info field
+ * @req_ies: information elements from the (Re)Association Request frame
+ * @req_ies_len: length of req_ies data
+ * @ap_mld_addr: AP MLD address (in case of MLO)
+ * @links: per-link information indexed by link ID, use links[0] for
+ * non-MLO connections
+ */
+struct cfg80211_rx_assoc_resp {
+ const u8 *buf;
+ size_t len;
+ const u8 *req_ies;
+ size_t req_ies_len;
+ int uapsd_queues;
+ const u8 *ap_mld_addr;
+ struct {
+ const u8 *addr;
+ struct cfg80211_bss *bss;
+ } links[IEEE80211_MLD_MAX_NUM_LINKS];
+};
+
+/**
+ * cfg80211_rx_assoc_resp - notification of processed association response
+ * @dev: network device
+ * @data: association response data, &struct cfg80211_rx_assoc_resp
+ *
+ * After being asked to associate via cfg80211_ops::assoc() the driver must
+ * call either this function or cfg80211_auth_timeout().
+ *
+ * This function may sleep. The caller must hold the corresponding wdev's mutex.
+ */
+void cfg80211_rx_assoc_resp(struct net_device *dev,
+ struct cfg80211_rx_assoc_resp *data);
+
+/**
+ * struct cfg80211_assoc_failure - association failure data
+ * @ap_mld_addr: AP MLD address, or %NULL
+ * @bss: list of BSSes, must use entry 0 for non-MLO connections
+ * (@ap_mld_addr is %NULL)
+ * @timeout: indicates the association failed due to timeout, otherwise
+ * the association was abandoned for a reason reported through some
+ * other API (e.g. deauth RX)
+ */
+struct cfg80211_assoc_failure {
+ const u8 *ap_mld_addr;
+ struct cfg80211_bss *bss[IEEE80211_MLD_MAX_NUM_LINKS];
+ bool timeout;
+};
+
+/**
+ * cfg80211_assoc_failure - notification of association failure
+ * @dev: network device
+ * @data: data describing the association failure
+ *
+ * This function may sleep. The caller must hold the corresponding wdev's mutex.
+ */
+void cfg80211_assoc_failure(struct net_device *dev,
+ struct cfg80211_assoc_failure *data);
+
+/**
+ * cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame
+ * @dev: network device
+ * @buf: 802.11 frame (header + body)
+ * @len: length of the frame data
+ * @reconnect: immediate reconnect is desired (include the nl80211 attribute)
+ *
+ * This function is called whenever deauthentication has been processed in
+ * station mode. This includes both received deauthentication frames and
+ * locally generated ones. This function may sleep. The caller must hold the
+ * corresponding wdev's mutex.
+ */
+void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len,
+ bool reconnect);
+
+/**
+ * cfg80211_rx_unprot_mlme_mgmt - notification of unprotected mlme mgmt frame
+ * @dev: network device
+ * @buf: received management frame (header + body)
+ * @len: length of the frame data
+ *
+ * This function is called whenever a received deauthentication or dissassoc
+ * frame has been dropped in station mode because of MFP being used but the
+ * frame was not protected. This is also used to notify reception of a Beacon
+ * frame that was dropped because it did not include a valid MME MIC while
+ * beacon protection was enabled (BIGTK configured in station mode).
+ *
+ * This function may sleep.
+ */
+void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev,
+ const u8 *buf, size_t len);
+
+/**
+ * cfg80211_michael_mic_failure - notification of Michael MIC failure (TKIP)
+ * @dev: network device
+ * @addr: The source MAC address of the frame
+ * @key_type: The key type that the received frame used
+ * @key_id: Key identifier (0..3). Can be -1 if missing.
+ * @tsc: The TSC value of the frame that generated the MIC failure (6 octets)
+ * @gfp: allocation flags
+ *
+ * This function is called whenever the local MAC detects a MIC failure in a
+ * received frame. This matches with MLME-MICHAELMICFAILURE.indication()
+ * primitive.
+ */
+void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
+ enum nl80211_key_type key_type, int key_id,
+ const u8 *tsc, gfp_t gfp);
+
+/**
+ * cfg80211_ibss_joined - notify cfg80211 that device joined an IBSS
+ *
+ * @dev: network device
+ * @bssid: the BSSID of the IBSS joined
+ * @channel: the channel of the IBSS joined
+ * @gfp: allocation flags
+ *
+ * This function notifies cfg80211 that the device joined an IBSS or
+ * switched to a different BSSID. Before this function can be called,
+ * either a beacon has to have been received from the IBSS, or one of
+ * the cfg80211_inform_bss{,_frame} functions must have been called
+ * with the locally generated beacon -- this guarantees that there is
+ * always a scan result for this IBSS. cfg80211 will handle the rest.
+ */
+void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
+ struct ieee80211_channel *channel, gfp_t gfp);
+
+/**
+ * cfg80211_notify_new_peer_candidate - notify cfg80211 of a new mesh peer
+ * candidate
+ *
+ * @dev: network device
+ * @macaddr: the MAC address of the new candidate
+ * @ie: information elements advertised by the peer candidate
+ * @ie_len: length of the information elements buffer
+ * @sig_dbm: signal level in dBm
+ * @gfp: allocation flags
+ *
+ * This function notifies cfg80211 that the mesh peer candidate has been
+ * detected, most likely via a beacon or, less likely, via a probe response.
+ * cfg80211 then sends a notification to userspace.
+ */
+void cfg80211_notify_new_peer_candidate(struct net_device *dev,
+ const u8 *macaddr, const u8 *ie, u8 ie_len,
+ int sig_dbm, gfp_t gfp);
+
+/**
+ * DOC: RFkill integration
+ *
+ * RFkill integration in cfg80211 is almost invisible to drivers,
+ * as cfg80211 automatically registers an rfkill instance for each
+ * wireless device it knows about. Soft kill is also translated
+ * into disconnecting and turning all interfaces off, drivers are
+ * expected to turn off the device when all interfaces are down.
+ *
+ * However, devices may have a hard RFkill line, in which case they
+ * also need to interact with the rfkill subsystem, via cfg80211.
+ * They can do this with a few helper functions documented here.
+ */
+
+/**
+ * wiphy_rfkill_set_hw_state_reason - notify cfg80211 about hw block state
+ * @wiphy: the wiphy
+ * @blocked: block status
+ * @reason: one of reasons in &enum rfkill_hard_block_reasons
+ */
+void wiphy_rfkill_set_hw_state_reason(struct wiphy *wiphy, bool blocked,
+ enum rfkill_hard_block_reasons reason);
+
+static inline void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked)
+{
+ wiphy_rfkill_set_hw_state_reason(wiphy, blocked,
+ RFKILL_HARD_BLOCK_SIGNAL);
+}
+
+/**
+ * wiphy_rfkill_start_polling - start polling rfkill
+ * @wiphy: the wiphy
+ */
+void wiphy_rfkill_start_polling(struct wiphy *wiphy);
+
+/**
+ * wiphy_rfkill_stop_polling - stop polling rfkill
+ * @wiphy: the wiphy
+ */
+static inline void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
+{
+ rfkill_pause_polling(wiphy->rfkill);
+}
+
+/**
+ * DOC: Vendor commands
+ *
+ * Occasionally, there are special protocol or firmware features that
+ * can't be implemented very openly. For this and similar cases, the
+ * vendor command functionality allows implementing the features with
+ * (typically closed-source) userspace and firmware, using nl80211 as
+ * the configuration mechanism.
+ *
+ * A driver supporting vendor commands must register them as an array
+ * in struct wiphy, with handlers for each one, each command has an
+ * OUI and sub command ID to identify it.
+ *
+ * Note that this feature should not be (ab)used to implement protocol
+ * features that could openly be shared across drivers. In particular,
+ * it must never be required to use vendor commands to implement any
+ * "normal" functionality that higher-level userspace like connection
+ * managers etc. need.
+ */
+
+struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
+ enum nl80211_commands cmd,
+ enum nl80211_attrs attr,
+ int approxlen);
+
+struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ enum nl80211_commands cmd,
+ enum nl80211_attrs attr,
+ unsigned int portid,
+ int vendor_event_idx,
+ int approxlen, gfp_t gfp);
+
+void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp);
+
+/**
+ * cfg80211_vendor_cmd_alloc_reply_skb - allocate vendor command reply
+ * @wiphy: the wiphy
+ * @approxlen: an upper bound of the length of the data that will
+ * be put into the skb
+ *
+ * This function allocates and pre-fills an skb for a reply to
+ * a vendor command. Since it is intended for a reply, calling
+ * it outside of a vendor command's doit() operation is invalid.
+ *
+ * The returned skb is pre-filled with some identifying data in
+ * a way that any data that is put into the skb (with skb_put(),
+ * nla_put() or similar) will end up being within the
+ * %NL80211_ATTR_VENDOR_DATA attribute, so all that needs to be done
+ * with the skb is adding data for the corresponding userspace tool
+ * which can then read that data out of the vendor data attribute.
+ * You must not modify the skb in any other way.
+ *
+ * When done, call cfg80211_vendor_cmd_reply() with the skb and return
+ * its error code as the result of the doit() operation.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
+{
+ return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_VENDOR,
+ NL80211_ATTR_VENDOR_DATA, approxlen);
+}
+
+/**
+ * cfg80211_vendor_cmd_reply - send the reply skb
+ * @skb: The skb, must have been allocated with
+ * cfg80211_vendor_cmd_alloc_reply_skb()
+ *
+ * Since calling this function will usually be the last thing
+ * before returning from the vendor command doit() you should
+ * return the error code. Note that this function consumes the
+ * skb regardless of the return value.
+ *
+ * Return: An error code or 0 on success.
+ */
+int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
+
+/**
+ * cfg80211_vendor_cmd_get_sender - get the current sender netlink ID
+ * @wiphy: the wiphy
+ *
+ * Return the current netlink port ID in a vendor command handler.
+ * Valid to call only there.
+ */
+unsigned int cfg80211_vendor_cmd_get_sender(struct wiphy *wiphy);
+
+/**
+ * cfg80211_vendor_event_alloc - allocate vendor-specific event skb
+ * @wiphy: the wiphy
+ * @wdev: the wireless device
+ * @event_idx: index of the vendor event in the wiphy's vendor_events
+ * @approxlen: an upper bound of the length of the data that will
+ * be put into the skb
+ * @gfp: allocation flags
+ *
+ * This function allocates and pre-fills an skb for an event on the
+ * vendor-specific multicast group.
+ *
+ * If wdev != NULL, both the ifindex and identifier of the specified
+ * wireless device are added to the event message before the vendor data
+ * attribute.
+ *
+ * When done filling the skb, call cfg80211_vendor_event() with the
+ * skb to send the event.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+cfg80211_vendor_event_alloc(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int approxlen, int event_idx, gfp_t gfp)
+{
+ return __cfg80211_alloc_event_skb(wiphy, wdev, NL80211_CMD_VENDOR,
+ NL80211_ATTR_VENDOR_DATA,
+ 0, event_idx, approxlen, gfp);
+}
+
+/**
+ * cfg80211_vendor_event_alloc_ucast - alloc unicast vendor-specific event skb
+ * @wiphy: the wiphy
+ * @wdev: the wireless device
+ * @event_idx: index of the vendor event in the wiphy's vendor_events
+ * @portid: port ID of the receiver
+ * @approxlen: an upper bound of the length of the data that will
+ * be put into the skb
+ * @gfp: allocation flags
+ *
+ * This function allocates and pre-fills an skb for an event to send to
+ * a specific (userland) socket. This socket would previously have been
+ * obtained by cfg80211_vendor_cmd_get_sender(), and the caller MUST take
+ * care to register a netlink notifier to see when the socket closes.
+ *
+ * If wdev != NULL, both the ifindex and identifier of the specified
+ * wireless device are added to the event message before the vendor data
+ * attribute.
+ *
+ * When done filling the skb, call cfg80211_vendor_event() with the
+ * skb to send the event.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+cfg80211_vendor_event_alloc_ucast(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ unsigned int portid, int approxlen,
+ int event_idx, gfp_t gfp)
+{
+ return __cfg80211_alloc_event_skb(wiphy, wdev, NL80211_CMD_VENDOR,
+ NL80211_ATTR_VENDOR_DATA,
+ portid, event_idx, approxlen, gfp);
+}
+
+/**
+ * cfg80211_vendor_event - send the event
+ * @skb: The skb, must have been allocated with cfg80211_vendor_event_alloc()
+ * @gfp: allocation flags
+ *
+ * This function sends the given @skb, which must have been allocated
+ * by cfg80211_vendor_event_alloc(), as an event. It always consumes it.
+ */
+static inline void cfg80211_vendor_event(struct sk_buff *skb, gfp_t gfp)
+{
+ __cfg80211_send_event_skb(skb, gfp);
+}
+
+#ifdef CONFIG_NL80211_TESTMODE
+/**
+ * DOC: Test mode
+ *
+ * Test mode is a set of utility functions to allow drivers to
+ * interact with driver-specific tools to aid, for instance,
+ * factory programming.
+ *
+ * This chapter describes how drivers interact with it, for more
+ * information see the nl80211 book's chapter on it.
+ */
+
+/**
+ * cfg80211_testmode_alloc_reply_skb - allocate testmode reply
+ * @wiphy: the wiphy
+ * @approxlen: an upper bound of the length of the data that will
+ * be put into the skb
+ *
+ * This function allocates and pre-fills an skb for a reply to
+ * the testmode command. Since it is intended for a reply, calling
+ * it outside of the @testmode_cmd operation is invalid.
+ *
+ * The returned skb is pre-filled with the wiphy index and set up in
+ * a way that any data that is put into the skb (with skb_put(),
+ * nla_put() or similar) will end up being within the
+ * %NL80211_ATTR_TESTDATA attribute, so all that needs to be done
+ * with the skb is adding data for the corresponding userspace tool
+ * which can then read that data out of the testdata attribute. You
+ * must not modify the skb in any other way.
+ *
+ * When done, call cfg80211_testmode_reply() with the skb and return
+ * its error code as the result of the @testmode_cmd operation.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
+{
+ return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_TESTMODE,
+ NL80211_ATTR_TESTDATA, approxlen);
+}
+
+/**
+ * cfg80211_testmode_reply - send the reply skb
+ * @skb: The skb, must have been allocated with
+ * cfg80211_testmode_alloc_reply_skb()
+ *
+ * Since calling this function will usually be the last thing
+ * before returning from the @testmode_cmd you should return
+ * the error code. Note that this function consumes the skb
+ * regardless of the return value.
+ *
+ * Return: An error code or 0 on success.
+ */
+static inline int cfg80211_testmode_reply(struct sk_buff *skb)
+{
+ return cfg80211_vendor_cmd_reply(skb);
+}
+
+/**
+ * cfg80211_testmode_alloc_event_skb - allocate testmode event
+ * @wiphy: the wiphy
+ * @approxlen: an upper bound of the length of the data that will
+ * be put into the skb
+ * @gfp: allocation flags
+ *
+ * This function allocates and pre-fills an skb for an event on the
+ * testmode multicast group.
+ *
+ * The returned skb is set up in the same way as with
+ * cfg80211_testmode_alloc_reply_skb() but prepared for an event. As
+ * there, you should simply add data to it that will then end up in the
+ * %NL80211_ATTR_TESTDATA attribute. Again, you must not modify the skb
+ * in any other way.
+ *
+ * When done filling the skb, call cfg80211_testmode_event() with the
+ * skb to send the event.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp)
+{
+ return __cfg80211_alloc_event_skb(wiphy, NULL, NL80211_CMD_TESTMODE,
+ NL80211_ATTR_TESTDATA, 0, -1,
+ approxlen, gfp);
+}
+
+/**
+ * cfg80211_testmode_event - send the event
+ * @skb: The skb, must have been allocated with
+ * cfg80211_testmode_alloc_event_skb()
+ * @gfp: allocation flags
+ *
+ * This function sends the given @skb, which must have been allocated
+ * by cfg80211_testmode_alloc_event_skb(), as an event. It always
+ * consumes it.
+ */
+static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
+{
+ __cfg80211_send_event_skb(skb, gfp);
+}
+
+#define CFG80211_TESTMODE_CMD(cmd) .testmode_cmd = (cmd),
+#define CFG80211_TESTMODE_DUMP(cmd) .testmode_dump = (cmd),
+#else
+#define CFG80211_TESTMODE_CMD(cmd)
+#define CFG80211_TESTMODE_DUMP(cmd)
+#endif
+
+/**
+ * struct cfg80211_fils_resp_params - FILS connection response params
+ * @kek: KEK derived from a successful FILS connection (may be %NULL)
+ * @kek_len: Length of @fils_kek in octets
+ * @update_erp_next_seq_num: Boolean value to specify whether the value in
+ * @erp_next_seq_num is valid.
+ * @erp_next_seq_num: The next sequence number to use in ERP message in
+ * FILS Authentication. This value should be specified irrespective of the
+ * status for a FILS connection.
+ * @pmk: A new PMK if derived from a successful FILS connection (may be %NULL).
+ * @pmk_len: Length of @pmk in octets
+ * @pmkid: A new PMKID if derived from a successful FILS connection or the PMKID
+ * used for this FILS connection (may be %NULL).
+ */
+struct cfg80211_fils_resp_params {
+ const u8 *kek;
+ size_t kek_len;
+ bool update_erp_next_seq_num;
+ u16 erp_next_seq_num;
+ const u8 *pmk;
+ size_t pmk_len;
+ const u8 *pmkid;
+};
+
+/**
+ * struct cfg80211_connect_resp_params - Connection response params
+ * @status: Status code, %WLAN_STATUS_SUCCESS for successful connection, use
+ * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ * the real status code for failures. If this call is used to report a
+ * failure due to a timeout (e.g., not receiving an Authentication frame
+ * from the AP) instead of an explicit rejection by the AP, -1 is used to
+ * indicate that this is a failure, but without a status code.
+ * @timeout_reason is used to report the reason for the timeout in that
+ * case.
+ * @req_ie: Association request IEs (may be %NULL)
+ * @req_ie_len: Association request IEs length
+ * @resp_ie: Association response IEs (may be %NULL)
+ * @resp_ie_len: Association response IEs length
+ * @fils: FILS connection response parameters.
+ * @timeout_reason: Reason for connection timeout. This is used when the
+ * connection fails due to a timeout instead of an explicit rejection from
+ * the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
+ * not known. This value is used only if @status < 0 to indicate that the
+ * failure is due to a timeout and not due to explicit rejection by the AP.
+ * This value is ignored in other cases (@status >= 0).
+ * @valid_links: For MLO connection, BIT mask of the valid link ids. Otherwise
+ * zero.
+ * @ap_mld_addr: For MLO connection, MLD address of the AP. Otherwise %NULL.
+ * @links : For MLO connection, contains link info for the valid links indicated
+ * using @valid_links. For non-MLO connection, links[0] contains the
+ * connected AP info.
+ * @links.addr: For MLO connection, MAC address of the STA link. Otherwise
+ * %NULL.
+ * @links.bssid: For MLO connection, MAC address of the AP link. For non-MLO
+ * connection, links[0].bssid points to the BSSID of the AP (may be %NULL).
+ * @links.bss: For MLO connection, entry of bss to which STA link is connected.
+ * For non-MLO connection, links[0].bss points to entry of bss to which STA
+ * is connected. It can be obtained through cfg80211_get_bss() (may be
+ * %NULL). It is recommended to store the bss from the connect_request and
+ * hold a reference to it and return through this param to avoid a warning
+ * if the bss is expired during the connection, esp. for those drivers
+ * implementing connect op. Only one parameter among @bssid and @bss needs
+ * to be specified.
+ */
+struct cfg80211_connect_resp_params {
+ int status;
+ const u8 *req_ie;
+ size_t req_ie_len;
+ const u8 *resp_ie;
+ size_t resp_ie_len;
+ struct cfg80211_fils_resp_params fils;
+ enum nl80211_timeout_reason timeout_reason;
+
+ const u8 *ap_mld_addr;
+ u16 valid_links;
+ struct {
+ const u8 *addr;
+ const u8 *bssid;
+ struct cfg80211_bss *bss;
+ } links[IEEE80211_MLD_MAX_NUM_LINKS];
+};
+
+/**
+ * cfg80211_connect_done - notify cfg80211 of connection result
+ *
+ * @dev: network device
+ * @params: connection response parameters
+ * @gfp: allocation flags
+ *
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_bss(), but takes a structure pointer for connection response
+ * parameters. Only one of the functions among cfg80211_connect_bss(),
+ * cfg80211_connect_result(), cfg80211_connect_timeout(),
+ * and cfg80211_connect_done() should be called.
+ */
+void cfg80211_connect_done(struct net_device *dev,
+ struct cfg80211_connect_resp_params *params,
+ gfp_t gfp);
+
+/**
+ * cfg80211_connect_bss - notify cfg80211 of connection result
+ *
+ * @dev: network device
+ * @bssid: the BSSID of the AP
+ * @bss: Entry of bss to which STA got connected to, can be obtained through
+ * cfg80211_get_bss() (may be %NULL). But it is recommended to store the
+ * bss from the connect_request and hold a reference to it and return
+ * through this param to avoid a warning if the bss is expired during the
+ * connection, esp. for those drivers implementing connect op.
+ * Only one parameter among @bssid and @bss needs to be specified.
+ * @req_ie: association request IEs (maybe be %NULL)
+ * @req_ie_len: association request IEs length
+ * @resp_ie: association response IEs (may be %NULL)
+ * @resp_ie_len: assoc response IEs length
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use
+ * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ * the real status code for failures. If this call is used to report a
+ * failure due to a timeout (e.g., not receiving an Authentication frame
+ * from the AP) instead of an explicit rejection by the AP, -1 is used to
+ * indicate that this is a failure, but without a status code.
+ * @timeout_reason is used to report the reason for the timeout in that
+ * case.
+ * @gfp: allocation flags
+ * @timeout_reason: reason for connection timeout. This is used when the
+ * connection fails due to a timeout instead of an explicit rejection from
+ * the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
+ * not known. This value is used only if @status < 0 to indicate that the
+ * failure is due to a timeout and not due to explicit rejection by the AP.
+ * This value is ignored in other cases (@status >= 0).
+ *
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_result(), but with the option of identifying the exact bss
+ * entry for the connection. Only one of the functions among
+ * cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
+ */
+static inline void
+cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
+ struct cfg80211_bss *bss, const u8 *req_ie,
+ size_t req_ie_len, const u8 *resp_ie,
+ size_t resp_ie_len, int status, gfp_t gfp,
+ enum nl80211_timeout_reason timeout_reason)
+{
+ struct cfg80211_connect_resp_params params;
+
+ memset(&params, 0, sizeof(params));
+ params.status = status;
+ params.links[0].bssid = bssid;
+ params.links[0].bss = bss;
+ params.req_ie = req_ie;
+ params.req_ie_len = req_ie_len;
+ params.resp_ie = resp_ie;
+ params.resp_ie_len = resp_ie_len;
+ params.timeout_reason = timeout_reason;
+
+ cfg80211_connect_done(dev, &params, gfp);
+}
+
+/**
+ * cfg80211_connect_result - notify cfg80211 of connection result
+ *
+ * @dev: network device
+ * @bssid: the BSSID of the AP
+ * @req_ie: association request IEs (maybe be %NULL)
+ * @req_ie_len: association request IEs length
+ * @resp_ie: association response IEs (may be %NULL)
+ * @resp_ie_len: assoc response IEs length
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use
+ * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ * the real status code for failures.
+ * @gfp: allocation flags
+ *
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_bss() which allows the exact bss entry to be specified. Only
+ * one of the functions among cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
+ */
+static inline void
+cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
+ const u8 *req_ie, size_t req_ie_len,
+ const u8 *resp_ie, size_t resp_ie_len,
+ u16 status, gfp_t gfp)
+{
+ cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, resp_ie,
+ resp_ie_len, status, gfp,
+ NL80211_TIMEOUT_UNSPECIFIED);
+}
+
+/**
+ * cfg80211_connect_timeout - notify cfg80211 of connection timeout
+ *
+ * @dev: network device
+ * @bssid: the BSSID of the AP
+ * @req_ie: association request IEs (maybe be %NULL)
+ * @req_ie_len: association request IEs length
+ * @gfp: allocation flags
+ * @timeout_reason: reason for connection timeout.
+ *
+ * It should be called by the underlying driver whenever connect() has failed
+ * in a sequence where no explicit authentication/association rejection was
+ * received from the AP. This could happen, e.g., due to not being able to send
+ * out the Authentication or Association Request frame or timing out while
+ * waiting for the response. Only one of the functions among
+ * cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
+ */
+static inline void
+cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
+ const u8 *req_ie, size_t req_ie_len, gfp_t gfp,
+ enum nl80211_timeout_reason timeout_reason)
+{
+ cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, NULL, 0, -1,
+ gfp, timeout_reason);
+}
+
+/**
+ * struct cfg80211_roam_info - driver initiated roaming information
+ *
+ * @req_ie: association request IEs (maybe be %NULL)
+ * @req_ie_len: association request IEs length
+ * @resp_ie: association response IEs (may be %NULL)
+ * @resp_ie_len: assoc response IEs length
+ * @fils: FILS related roaming information.
+ * @valid_links: For MLO roaming, BIT mask of the new valid links is set.
+ * Otherwise zero.
+ * @ap_mld_addr: For MLO roaming, MLD address of the new AP. Otherwise %NULL.
+ * @links : For MLO roaming, contains new link info for the valid links set in
+ * @valid_links. For non-MLO roaming, links[0] contains the new AP info.
+ * @links.addr: For MLO roaming, MAC address of the STA link. Otherwise %NULL.
+ * @links.bssid: For MLO roaming, MAC address of the new AP link. For non-MLO
+ * roaming, links[0].bssid points to the BSSID of the new AP. May be
+ * %NULL if %links.bss is set.
+ * @links.channel: the channel of the new AP.
+ * @links.bss: For MLO roaming, entry of new bss to which STA link got
+ * roamed. For non-MLO roaming, links[0].bss points to entry of bss to
+ * which STA got roamed (may be %NULL if %links.bssid is set)
+ */
+struct cfg80211_roam_info {
+ const u8 *req_ie;
+ size_t req_ie_len;
+ const u8 *resp_ie;
+ size_t resp_ie_len;
+ struct cfg80211_fils_resp_params fils;
+
+ const u8 *ap_mld_addr;
+ u16 valid_links;
+ struct {
+ const u8 *addr;
+ const u8 *bssid;
+ struct ieee80211_channel *channel;
+ struct cfg80211_bss *bss;
+ } links[IEEE80211_MLD_MAX_NUM_LINKS];
+};
+
+/**
+ * cfg80211_roamed - notify cfg80211 of roaming
+ *
+ * @dev: network device
+ * @info: information about the new BSS. struct &cfg80211_roam_info.
+ * @gfp: allocation flags
+ *
+ * This function may be called with the driver passing either the BSSID of the
+ * new AP or passing the bss entry to avoid a race in timeout of the bss entry.
+ * It should be called by the underlying driver whenever it roamed from one AP
+ * to another while connected. Drivers which have roaming implemented in
+ * firmware should pass the bss entry to avoid a race in bss entry timeout where
+ * the bss entry of the new AP is seen in the driver, but gets timed out by the
+ * time it is accessed in __cfg80211_roamed() due to delay in scheduling
+ * rdev->event_work. In case of any failures, the reference is released
+ * either in cfg80211_roamed() or in __cfg80211_romed(), Otherwise, it will be
+ * released while disconnecting from the current bss.
+ */
+void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
+ gfp_t gfp);
+
+/**
+ * cfg80211_port_authorized - notify cfg80211 of successful security association
+ *
+ * @dev: network device
+ * @bssid: the BSSID of the AP
+ * @gfp: allocation flags
+ *
+ * This function should be called by a driver that supports 4 way handshake
+ * offload after a security association was successfully established (i.e.,
+ * the 4 way handshake was completed successfully). The call to this function
+ * should be preceded with a call to cfg80211_connect_result(),
+ * cfg80211_connect_done(), cfg80211_connect_bss() or cfg80211_roamed() to
+ * indicate the 802.11 association.
+ */
+void cfg80211_port_authorized(struct net_device *dev, const u8 *bssid,
+ gfp_t gfp);
+
+/**
+ * cfg80211_disconnected - notify cfg80211 that connection was dropped
+ *
+ * @dev: network device
+ * @ie: information elements of the deauth/disassoc frame (may be %NULL)
+ * @ie_len: length of IEs
+ * @reason: reason code for the disconnection, set it to 0 if unknown
+ * @locally_generated: disconnection was requested locally
+ * @gfp: allocation flags
+ *
+ * After it calls this function, the driver should enter an idle state
+ * and not try to connect to any AP any more.
+ */
+void cfg80211_disconnected(struct net_device *dev, u16 reason,
+ const u8 *ie, size_t ie_len,
+ bool locally_generated, gfp_t gfp);
+
+/**
+ * cfg80211_ready_on_channel - notification of remain_on_channel start
+ * @wdev: wireless device
+ * @cookie: the request cookie
+ * @chan: The current channel (from remain_on_channel request)
+ * @duration: Duration in milliseconds that the driver intents to remain on the
+ * channel
+ * @gfp: allocation flags
+ */
+void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
+ struct ieee80211_channel *chan,
+ unsigned int duration, gfp_t gfp);
+
+/**
+ * cfg80211_remain_on_channel_expired - remain_on_channel duration expired
+ * @wdev: wireless device
+ * @cookie: the request cookie
+ * @chan: The current channel (from remain_on_channel request)
+ * @gfp: allocation flags
+ */
+void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
+ struct ieee80211_channel *chan,
+ gfp_t gfp);
+
+/**
+ * cfg80211_tx_mgmt_expired - tx_mgmt duration expired
+ * @wdev: wireless device
+ * @cookie: the requested cookie
+ * @chan: The current channel (from tx_mgmt request)
+ * @gfp: allocation flags
+ */
+void cfg80211_tx_mgmt_expired(struct wireless_dev *wdev, u64 cookie,
+ struct ieee80211_channel *chan, gfp_t gfp);
+
+/**
+ * cfg80211_sinfo_alloc_tid_stats - allocate per-tid statistics.
+ *
+ * @sinfo: the station information
+ * @gfp: allocation flags
+ */
+int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp);
+
+/**
+ * cfg80211_sinfo_release_content - release contents of station info
+ * @sinfo: the station information
+ *
+ * Releases any potentially allocated sub-information of the station
+ * information, but not the struct itself (since it's typically on
+ * the stack.)
+ */
+static inline void cfg80211_sinfo_release_content(struct station_info *sinfo)
+{
+ kfree(sinfo->pertid);
+}
+
+/**
+ * cfg80211_new_sta - notify userspace about station
+ *
+ * @dev: the netdev
+ * @mac_addr: the station's address
+ * @sinfo: the station information
+ * @gfp: allocation flags
+ */
+void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
+ struct station_info *sinfo, gfp_t gfp);
+
+/**
+ * cfg80211_del_sta_sinfo - notify userspace about deletion of a station
+ * @dev: the netdev
+ * @mac_addr: the station's address
+ * @sinfo: the station information/statistics
+ * @gfp: allocation flags
+ */
+void cfg80211_del_sta_sinfo(struct net_device *dev, const u8 *mac_addr,
+ struct station_info *sinfo, gfp_t gfp);
+
+/**
+ * cfg80211_del_sta - notify userspace about deletion of a station
+ *
+ * @dev: the netdev
+ * @mac_addr: the station's address
+ * @gfp: allocation flags
+ */
+static inline void cfg80211_del_sta(struct net_device *dev,
+ const u8 *mac_addr, gfp_t gfp)
+{
+ cfg80211_del_sta_sinfo(dev, mac_addr, NULL, gfp);
+}
+
+/**
+ * cfg80211_conn_failed - connection request failed notification
+ *
+ * @dev: the netdev
+ * @mac_addr: the station's address
+ * @reason: the reason for connection failure
+ * @gfp: allocation flags
+ *
+ * Whenever a station tries to connect to an AP and if the station
+ * could not connect to the AP as the AP has rejected the connection
+ * for some reasons, this function is called.
+ *
+ * The reason for connection failure can be any of the value from
+ * nl80211_connect_failed_reason enum
+ */
+void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
+ enum nl80211_connect_failed_reason reason,
+ gfp_t gfp);
+
+/**
+ * struct cfg80211_rx_info - received management frame info
+ *
+ * @freq: Frequency on which the frame was received in kHz
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
+ * @have_link_id: indicates the frame was received on a link of
+ * an MLD, i.e. the @link_id field is valid
+ * @link_id: the ID of the link the frame was received on
+ * @buf: Management frame (header + body)
+ * @len: length of the frame data
+ * @flags: flags, as defined in enum nl80211_rxmgmt_flags
+ * @rx_tstamp: Hardware timestamp of frame RX in nanoseconds
+ * @ack_tstamp: Hardware timestamp of ack TX in nanoseconds
+ */
+struct cfg80211_rx_info {
+ int freq;
+ int sig_dbm;
+ bool have_link_id;
+ u8 link_id;
+ const u8 *buf;
+ size_t len;
+ u32 flags;
+ u64 rx_tstamp;
+ u64 ack_tstamp;
+};
+
+/**
+ * cfg80211_rx_mgmt_ext - management frame notification with extended info
+ * @wdev: wireless device receiving the frame
+ * @info: RX info as defined in struct cfg80211_rx_info
+ *
+ * This function is called whenever an Action frame is received for a station
+ * mode interface, but is not processed in kernel.
+ *
+ * Return: %true if a user space application has registered for this frame.
+ * For action frames, that makes it responsible for rejecting unrecognized
+ * action frames; %false otherwise, in which case for action frames the
+ * driver is responsible for rejecting the frame.
+ */
+bool cfg80211_rx_mgmt_ext(struct wireless_dev *wdev,
+ struct cfg80211_rx_info *info);
+
+/**
+ * cfg80211_rx_mgmt_khz - notification of received, unprocessed management frame
+ * @wdev: wireless device receiving the frame
+ * @freq: Frequency on which the frame was received in KHz
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
+ * @buf: Management frame (header + body)
+ * @len: length of the frame data
+ * @flags: flags, as defined in enum nl80211_rxmgmt_flags
+ *
+ * This function is called whenever an Action frame is received for a station
+ * mode interface, but is not processed in kernel.
+ *
+ * Return: %true if a user space application has registered for this frame.
+ * For action frames, that makes it responsible for rejecting unrecognized
+ * action frames; %false otherwise, in which case for action frames the
+ * driver is responsible for rejecting the frame.
+ */
+static inline bool cfg80211_rx_mgmt_khz(struct wireless_dev *wdev, int freq,
+ int sig_dbm, const u8 *buf, size_t len,
+ u32 flags)
+{
+ struct cfg80211_rx_info info = {
+ .freq = freq,
+ .sig_dbm = sig_dbm,
+ .buf = buf,
+ .len = len,
+ .flags = flags
+ };
+
+ return cfg80211_rx_mgmt_ext(wdev, &info);
+}
+
+/**
+ * cfg80211_rx_mgmt - notification of received, unprocessed management frame
+ * @wdev: wireless device receiving the frame
+ * @freq: Frequency on which the frame was received in MHz
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
+ * @buf: Management frame (header + body)
+ * @len: length of the frame data
+ * @flags: flags, as defined in enum nl80211_rxmgmt_flags
+ *
+ * This function is called whenever an Action frame is received for a station
+ * mode interface, but is not processed in kernel.
+ *
+ * Return: %true if a user space application has registered for this frame.
+ * For action frames, that makes it responsible for rejecting unrecognized
+ * action frames; %false otherwise, in which case for action frames the
+ * driver is responsible for rejecting the frame.
+ */
+static inline bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq,
+ int sig_dbm, const u8 *buf, size_t len,
+ u32 flags)
+{
+ struct cfg80211_rx_info info = {
+ .freq = MHZ_TO_KHZ(freq),
+ .sig_dbm = sig_dbm,
+ .buf = buf,
+ .len = len,
+ .flags = flags
+ };
+
+ return cfg80211_rx_mgmt_ext(wdev, &info);
+}
+
+/**
+ * struct cfg80211_tx_status - TX status for management frame information
+ *
+ * @cookie: Cookie returned by cfg80211_ops::mgmt_tx()
+ * @tx_tstamp: hardware TX timestamp in nanoseconds
+ * @ack_tstamp: hardware ack RX timestamp in nanoseconds
+ * @buf: Management frame (header + body)
+ * @len: length of the frame data
+ * @ack: Whether frame was acknowledged
+ */
+struct cfg80211_tx_status {
+ u64 cookie;
+ u64 tx_tstamp;
+ u64 ack_tstamp;
+ const u8 *buf;
+ size_t len;
+ bool ack;
+};
+
+/**
+ * cfg80211_mgmt_tx_status_ext - TX status notification with extended info
+ * @wdev: wireless device receiving the frame
+ * @status: TX status data
+ * @gfp: context flags
+ *
+ * This function is called whenever a management frame was requested to be
+ * transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the
+ * transmission attempt with extended info.
+ */
+void cfg80211_mgmt_tx_status_ext(struct wireless_dev *wdev,
+ struct cfg80211_tx_status *status, gfp_t gfp);
+
+/**
+ * cfg80211_mgmt_tx_status - notification of TX status for management frame
+ * @wdev: wireless device receiving the frame
+ * @cookie: Cookie returned by cfg80211_ops::mgmt_tx()
+ * @buf: Management frame (header + body)
+ * @len: length of the frame data
+ * @ack: Whether frame was acknowledged
+ * @gfp: context flags
+ *
+ * This function is called whenever a management frame was requested to be
+ * transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the
+ * transmission attempt.
+ */
+static inline void cfg80211_mgmt_tx_status(struct wireless_dev *wdev,
+ u64 cookie, const u8 *buf,
+ size_t len, bool ack, gfp_t gfp)
+{
+ struct cfg80211_tx_status status = {
+ .cookie = cookie,
+ .buf = buf,
+ .len = len,
+ .ack = ack
+ };
+
+ cfg80211_mgmt_tx_status_ext(wdev, &status, gfp);
+}
+
+/**
+ * cfg80211_control_port_tx_status - notification of TX status for control
+ * port frames
+ * @wdev: wireless device receiving the frame
+ * @cookie: Cookie returned by cfg80211_ops::tx_control_port()
+ * @buf: Data frame (header + body)
+ * @len: length of the frame data
+ * @ack: Whether frame was acknowledged
+ * @gfp: context flags
+ *
+ * This function is called whenever a control port frame was requested to be
+ * transmitted with cfg80211_ops::tx_control_port() to report the TX status of
+ * the transmission attempt.
+ */
+void cfg80211_control_port_tx_status(struct wireless_dev *wdev, u64 cookie,
+ const u8 *buf, size_t len, bool ack,
+ gfp_t gfp);
+
+/**
+ * cfg80211_rx_control_port - notification about a received control port frame
+ * @dev: The device the frame matched to
+ * @skb: The skbuf with the control port frame. It is assumed that the skbuf
+ * is 802.3 formatted (with 802.3 header). The skb can be non-linear.
+ * This function does not take ownership of the skb, so the caller is
+ * responsible for any cleanup. The caller must also ensure that
+ * skb->protocol is set appropriately.
+ * @unencrypted: Whether the frame was received unencrypted
+ *
+ * This function is used to inform userspace about a received control port
+ * frame. It should only be used if userspace indicated it wants to receive
+ * control port frames over nl80211.
+ *
+ * The frame is the data portion of the 802.3 or 802.11 data frame with all
+ * network layer headers removed (e.g. the raw EAPoL frame).
+ *
+ * Return: %true if the frame was passed to userspace
+ */
+bool cfg80211_rx_control_port(struct net_device *dev,
+ struct sk_buff *skb, bool unencrypted);
+
+/**
+ * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
+ * @dev: network device
+ * @rssi_event: the triggered RSSI event
+ * @rssi_level: new RSSI level value or 0 if not available
+ * @gfp: context flags
+ *
+ * This function is called when a configured connection quality monitoring
+ * rssi threshold reached event occurs.
+ */
+void cfg80211_cqm_rssi_notify(struct net_device *dev,
+ enum nl80211_cqm_rssi_threshold_event rssi_event,
+ s32 rssi_level, gfp_t gfp);
+
+/**
+ * cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer
+ * @dev: network device
+ * @peer: peer's MAC address
+ * @num_packets: how many packets were lost -- should be a fixed threshold
+ * but probably no less than maybe 50, or maybe a throughput dependent
+ * threshold (to account for temporary interference)
+ * @gfp: context flags
+ */
+void cfg80211_cqm_pktloss_notify(struct net_device *dev,
+ const u8 *peer, u32 num_packets, gfp_t gfp);
+
+/**
+ * cfg80211_cqm_txe_notify - TX error rate event
+ * @dev: network device
+ * @peer: peer's MAC address
+ * @num_packets: how many packets were lost
+ * @rate: % of packets which failed transmission
+ * @intvl: interval (in s) over which the TX failure threshold was breached.
+ * @gfp: context flags
+ *
+ * Notify userspace when configured % TX failures over number of packets in a
+ * given interval is exceeded.
+ */
+void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer,
+ u32 num_packets, u32 rate, u32 intvl, gfp_t gfp);
+
+/**
+ * cfg80211_cqm_beacon_loss_notify - beacon loss event
+ * @dev: network device
+ * @gfp: context flags
+ *
+ * Notify userspace about beacon loss from the connected AP.
+ */
+void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp);
+
+/**
+ * __cfg80211_radar_event - radar detection event
+ * @wiphy: the wiphy
+ * @chandef: chandef for the current channel
+ * @offchan: the radar has been detected on the offchannel chain
+ * @gfp: context flags
+ *
+ * This function is called when a radar is detected on the current chanenl.
+ */
+void __cfg80211_radar_event(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ bool offchan, gfp_t gfp);
+
+static inline void
+cfg80211_radar_event(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ gfp_t gfp)
+{
+ __cfg80211_radar_event(wiphy, chandef, false, gfp);
+}
+
+static inline void
+cfg80211_background_radar_event(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ gfp_t gfp)
+{
+ __cfg80211_radar_event(wiphy, chandef, true, gfp);
+}
+
+/**
+ * cfg80211_sta_opmode_change_notify - STA's ht/vht operation mode change event
+ * @dev: network device
+ * @mac: MAC address of a station which opmode got modified
+ * @sta_opmode: station's current opmode value
+ * @gfp: context flags
+ *
+ * Driver should call this function when station's opmode modified via action
+ * frame.
+ */
+void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
+ struct sta_opmode_info *sta_opmode,
+ gfp_t gfp);
+
+/**
+ * cfg80211_cac_event - Channel availability check (CAC) event
+ * @netdev: network device
+ * @chandef: chandef for the current channel
+ * @event: type of event
+ * @gfp: context flags
+ *
+ * This function is called when a Channel availability check (CAC) is finished
+ * or aborted. This must be called to notify the completion of a CAC process,
+ * also by full-MAC drivers.
+ */
+void cfg80211_cac_event(struct net_device *netdev,
+ const struct cfg80211_chan_def *chandef,
+ enum nl80211_radar_event event, gfp_t gfp);
+
+/**
+ * cfg80211_background_cac_abort - Channel Availability Check offchan abort event
+ * @wiphy: the wiphy
+ *
+ * This function is called by the driver when a Channel Availability Check
+ * (CAC) is aborted by a offchannel dedicated chain.
+ */
+void cfg80211_background_cac_abort(struct wiphy *wiphy);
+
+/**
+ * cfg80211_gtk_rekey_notify - notify userspace about driver rekeying
+ * @dev: network device
+ * @bssid: BSSID of AP (to avoid races)
+ * @replay_ctr: new replay counter
+ * @gfp: allocation flags
+ */
+void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
+ const u8 *replay_ctr, gfp_t gfp);
+
+/**
+ * cfg80211_pmksa_candidate_notify - notify about PMKSA caching candidate
+ * @dev: network device
+ * @index: candidate index (the smaller the index, the higher the priority)
+ * @bssid: BSSID of AP
+ * @preauth: Whether AP advertises support for RSN pre-authentication
+ * @gfp: allocation flags
+ */
+void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
+ const u8 *bssid, bool preauth, gfp_t gfp);
+
+/**
+ * cfg80211_rx_spurious_frame - inform userspace about a spurious frame
+ * @dev: The device the frame matched to
+ * @addr: the transmitter address
+ * @gfp: context flags
+ *
+ * This function is used in AP mode (only!) to inform userspace that
+ * a spurious class 3 frame was received, to be able to deauth the
+ * sender.
+ * Return: %true if the frame was passed to userspace (or this failed
+ * for a reason other than not having a subscription.)
+ */
+bool cfg80211_rx_spurious_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp);
+
+/**
+ * cfg80211_rx_unexpected_4addr_frame - inform about unexpected WDS frame
+ * @dev: The device the frame matched to
+ * @addr: the transmitter address
+ * @gfp: context flags
+ *
+ * This function is used in AP mode (only!) to inform userspace that
+ * an associated station sent a 4addr frame but that wasn't expected.
+ * It is allowed and desirable to send this event only once for each
+ * station to avoid event flooding.
+ * Return: %true if the frame was passed to userspace (or this failed
+ * for a reason other than not having a subscription.)
+ */
+bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp);
+
+/**
+ * cfg80211_probe_status - notify userspace about probe status
+ * @dev: the device the probe was sent on
+ * @addr: the address of the peer
+ * @cookie: the cookie filled in @probe_client previously
+ * @acked: indicates whether probe was acked or not
+ * @ack_signal: signal strength (in dBm) of the ACK frame.
+ * @is_valid_ack_signal: indicates the ack_signal is valid or not.
+ * @gfp: allocation flags
+ */
+void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
+ u64 cookie, bool acked, s32 ack_signal,
+ bool is_valid_ack_signal, gfp_t gfp);
+
+/**
+ * cfg80211_report_obss_beacon_khz - report beacon from other APs
+ * @wiphy: The wiphy that received the beacon
+ * @frame: the frame
+ * @len: length of the frame
+ * @freq: frequency the frame was received on in KHz
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
+ *
+ * Use this function to report to userspace when a beacon was
+ * received. It is not useful to call this when there is no
+ * netdev that is in AP/GO mode.
+ */
+void cfg80211_report_obss_beacon_khz(struct wiphy *wiphy, const u8 *frame,
+ size_t len, int freq, int sig_dbm);
+
+/**
+ * cfg80211_report_obss_beacon - report beacon from other APs
+ * @wiphy: The wiphy that received the beacon
+ * @frame: the frame
+ * @len: length of the frame
+ * @freq: frequency the frame was received on
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
+ *
+ * Use this function to report to userspace when a beacon was
+ * received. It is not useful to call this when there is no
+ * netdev that is in AP/GO mode.
+ */
+static inline void cfg80211_report_obss_beacon(struct wiphy *wiphy,
+ const u8 *frame, size_t len,
+ int freq, int sig_dbm)
+{
+ cfg80211_report_obss_beacon_khz(wiphy, frame, len, MHZ_TO_KHZ(freq),
+ sig_dbm);
+}
+
+/**
+ * cfg80211_reg_can_beacon - check if beaconing is allowed
+ * @wiphy: the wiphy
+ * @chandef: the channel definition
+ * @iftype: interface type
+ *
+ * Return: %true if there is no secondary channel or the secondary channel(s)
+ * can be used for beaconing (i.e. is not a radar channel etc.)
+ */
+bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype);
+
+/**
+ * cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation
+ * @wiphy: the wiphy
+ * @chandef: the channel definition
+ * @iftype: interface type
+ *
+ * Return: %true if there is no secondary channel or the secondary channel(s)
+ * can be used for beaconing (i.e. is not a radar channel etc.). This version
+ * also checks if IR-relaxation conditions apply, to allow beaconing under
+ * more permissive conditions.
+ *
+ * Requires the wiphy mutex to be held.
+ */
+bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype);
+
+/*
+ * cfg80211_ch_switch_notify - update wdev channel and notify userspace
+ * @dev: the device which switched channels
+ * @chandef: the new channel definition
+ * @link_id: the link ID for MLO, must be 0 for non-MLO
+ *
+ * Caller must acquire wdev_lock, therefore must only be called from sleepable
+ * driver context!
+ */
+void cfg80211_ch_switch_notify(struct net_device *dev,
+ struct cfg80211_chan_def *chandef,
+ unsigned int link_id);
+
+/*
+ * cfg80211_ch_switch_started_notify - notify channel switch start
+ * @dev: the device on which the channel switch started
+ * @chandef: the future channel definition
+ * @link_id: the link ID for MLO, must be 0 for non-MLO
+ * @count: the number of TBTTs until the channel switch happens
+ * @quiet: whether or not immediate quiet was requested by the AP
+ *
+ * Inform the userspace about the channel switch that has just
+ * started, so that it can take appropriate actions (eg. starting
+ * channel switch on other vifs), if necessary.
+ */
+void cfg80211_ch_switch_started_notify(struct net_device *dev,
+ struct cfg80211_chan_def *chandef,
+ unsigned int link_id, u8 count,
+ bool quiet);
+
+/**
+ * ieee80211_operating_class_to_band - convert operating class to band
+ *
+ * @operating_class: the operating class to convert
+ * @band: band pointer to fill
+ *
+ * Returns %true if the conversion was successful, %false otherwise.
+ */
+bool ieee80211_operating_class_to_band(u8 operating_class,
+ enum nl80211_band *band);
+
+/**
+ * ieee80211_chandef_to_operating_class - convert chandef to operation class
+ *
+ * @chandef: the chandef to convert
+ * @op_class: a pointer to the resulting operating class
+ *
+ * Returns %true if the conversion was successful, %false otherwise.
+ */
+bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
+ u8 *op_class);
+
+/**
+ * ieee80211_chandef_to_khz - convert chandef to frequency in KHz
+ *
+ * @chandef: the chandef to convert
+ *
+ * Returns the center frequency of chandef (1st segment) in KHz.
+ */
+static inline u32
+ieee80211_chandef_to_khz(const struct cfg80211_chan_def *chandef)
+{
+ return MHZ_TO_KHZ(chandef->center_freq1) + chandef->freq1_offset;
+}
+
+/*
+ * cfg80211_tdls_oper_request - request userspace to perform TDLS operation
+ * @dev: the device on which the operation is requested
+ * @peer: the MAC address of the peer device
+ * @oper: the requested TDLS operation (NL80211_TDLS_SETUP or
+ * NL80211_TDLS_TEARDOWN)
+ * @reason_code: the reason code for teardown request
+ * @gfp: allocation flags
+ *
+ * This function is used to request userspace to perform TDLS operation that
+ * requires knowledge of keys, i.e., link setup or teardown when the AP
+ * connection uses encryption. This is optional mechanism for the driver to use
+ * if it can automatically determine when a TDLS link could be useful (e.g.,
+ * based on traffic and signal strength for a peer).
+ */
+void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
+ enum nl80211_tdls_operation oper,
+ u16 reason_code, gfp_t gfp);
+
+/*
+ * cfg80211_calculate_bitrate - calculate actual bitrate (in 100Kbps units)
+ * @rate: given rate_info to calculate bitrate from
+ *
+ * return 0 if MCS index >= 32
+ */
+u32 cfg80211_calculate_bitrate(struct rate_info *rate);
+
+/**
+ * cfg80211_unregister_wdev - remove the given wdev
+ * @wdev: struct wireless_dev to remove
+ *
+ * This function removes the device so it can no longer be used. It is necessary
+ * to call this function even when cfg80211 requests the removal of the device
+ * by calling the del_virtual_intf() callback. The function must also be called
+ * when the driver wishes to unregister the wdev, e.g. when the hardware device
+ * is unbound from the driver.
+ *
+ * Requires the RTNL and wiphy mutex to be held.
+ */
+void cfg80211_unregister_wdev(struct wireless_dev *wdev);
+
+/**
+ * cfg80211_register_netdevice - register the given netdev
+ * @dev: the netdev to register
+ *
+ * Note: In contexts coming from cfg80211 callbacks, you must call this rather
+ * than register_netdevice(), unregister_netdev() is impossible as the RTNL is
+ * held. Otherwise, both register_netdevice() and register_netdev() are usable
+ * instead as well.
+ *
+ * Requires the RTNL and wiphy mutex to be held.
+ */
+int cfg80211_register_netdevice(struct net_device *dev);
+
+/**
+ * cfg80211_unregister_netdevice - unregister the given netdev
+ * @dev: the netdev to register
+ *
+ * Note: In contexts coming from cfg80211 callbacks, you must call this rather
+ * than unregister_netdevice(), unregister_netdev() is impossible as the RTNL
+ * is held. Otherwise, both unregister_netdevice() and unregister_netdev() are
+ * usable instead as well.
+ *
+ * Requires the RTNL and wiphy mutex to be held.
+ */
+static inline void cfg80211_unregister_netdevice(struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_CFG80211)
+ cfg80211_unregister_wdev(dev->ieee80211_ptr);
+#endif
+}
+
+/**
+ * struct cfg80211_ft_event_params - FT Information Elements
+ * @ies: FT IEs
+ * @ies_len: length of the FT IE in bytes
+ * @target_ap: target AP's MAC address
+ * @ric_ies: RIC IE
+ * @ric_ies_len: length of the RIC IE in bytes
+ */
+struct cfg80211_ft_event_params {
+ const u8 *ies;
+ size_t ies_len;
+ const u8 *target_ap;
+ const u8 *ric_ies;
+ size_t ric_ies_len;
+};
+
+/**
+ * cfg80211_ft_event - notify userspace about FT IE and RIC IE
+ * @netdev: network device
+ * @ft_event: IE information
+ */
+void cfg80211_ft_event(struct net_device *netdev,
+ struct cfg80211_ft_event_params *ft_event);
+
+/**
+ * cfg80211_get_p2p_attr - find and copy a P2P attribute from IE buffer
+ * @ies: the input IE buffer
+ * @len: the input length
+ * @attr: the attribute ID to find
+ * @buf: output buffer, can be %NULL if the data isn't needed, e.g.
+ * if the function is only called to get the needed buffer size
+ * @bufsize: size of the output buffer
+ *
+ * The function finds a given P2P attribute in the (vendor) IEs and
+ * copies its contents to the given buffer.
+ *
+ * Return: A negative error code (-%EILSEQ or -%ENOENT) if the data is
+ * malformed or the attribute can't be found (respectively), or the
+ * length of the found attribute (which can be zero).
+ */
+int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len,
+ enum ieee80211_p2p_attr_id attr,
+ u8 *buf, unsigned int bufsize);
+
+/**
+ * ieee80211_ie_split_ric - split an IE buffer according to ordering (with RIC)
+ * @ies: the IE buffer
+ * @ielen: the length of the IE buffer
+ * @ids: an array with element IDs that are allowed before
+ * the split. A WLAN_EID_EXTENSION value means that the next
+ * EID in the list is a sub-element of the EXTENSION IE.
+ * @n_ids: the size of the element ID array
+ * @after_ric: array IE types that come after the RIC element
+ * @n_after_ric: size of the @after_ric array
+ * @offset: offset where to start splitting in the buffer
+ *
+ * This function splits an IE buffer by updating the @offset
+ * variable to point to the location where the buffer should be
+ * split.
+ *
+ * It assumes that the given IE buffer is well-formed, this
+ * has to be guaranteed by the caller!
+ *
+ * It also assumes that the IEs in the buffer are ordered
+ * correctly, if not the result of using this function will not
+ * be ordered correctly either, i.e. it does no reordering.
+ *
+ * The function returns the offset where the next part of the
+ * buffer starts, which may be @ielen if the entire (remainder)
+ * of the buffer should be used.
+ */
+size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids,
+ const u8 *after_ric, int n_after_ric,
+ size_t offset);
+
+/**
+ * ieee80211_ie_split - split an IE buffer according to ordering
+ * @ies: the IE buffer
+ * @ielen: the length of the IE buffer
+ * @ids: an array with element IDs that are allowed before
+ * the split. A WLAN_EID_EXTENSION value means that the next
+ * EID in the list is a sub-element of the EXTENSION IE.
+ * @n_ids: the size of the element ID array
+ * @offset: offset where to start splitting in the buffer
+ *
+ * This function splits an IE buffer by updating the @offset
+ * variable to point to the location where the buffer should be
+ * split.
+ *
+ * It assumes that the given IE buffer is well-formed, this
+ * has to be guaranteed by the caller!
+ *
+ * It also assumes that the IEs in the buffer are ordered
+ * correctly, if not the result of using this function will not
+ * be ordered correctly either, i.e. it does no reordering.
+ *
+ * The function returns the offset where the next part of the
+ * buffer starts, which may be @ielen if the entire (remainder)
+ * of the buffer should be used.
+ */
+static inline size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids, size_t offset)
+{
+ return ieee80211_ie_split_ric(ies, ielen, ids, n_ids, NULL, 0, offset);
+}
+
+/**
+ * cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN
+ * @wdev: the wireless device reporting the wakeup
+ * @wakeup: the wakeup report
+ * @gfp: allocation flags
+ *
+ * This function reports that the given device woke up. If it
+ * caused the wakeup, report the reason(s), otherwise you may
+ * pass %NULL as the @wakeup parameter to advertise that something
+ * else caused the wakeup.
+ */
+void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
+ struct cfg80211_wowlan_wakeup *wakeup,
+ gfp_t gfp);
+
+/**
+ * cfg80211_crit_proto_stopped() - indicate critical protocol stopped by driver.
+ *
+ * @wdev: the wireless device for which critical protocol is stopped.
+ * @gfp: allocation flags
+ *
+ * This function can be called by the driver to indicate it has reverted
+ * operation back to normal. One reason could be that the duration given
+ * by .crit_proto_start() has expired.
+ */
+void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp);
+
+/**
+ * ieee80211_get_num_supported_channels - get number of channels device has
+ * @wiphy: the wiphy
+ *
+ * Return: the number of channels supported by the device.
+ */
+unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy);
+
+/**
+ * cfg80211_check_combinations - check interface combinations
+ *
+ * @wiphy: the wiphy
+ * @params: the interface combinations parameter
+ *
+ * This function can be called by the driver to check whether a
+ * combination of interfaces and their types are allowed according to
+ * the interface combinations.
+ */
+int cfg80211_check_combinations(struct wiphy *wiphy,
+ struct iface_combination_params *params);
+
+/**
+ * cfg80211_iter_combinations - iterate over matching combinations
+ *
+ * @wiphy: the wiphy
+ * @params: the interface combinations parameter
+ * @iter: function to call for each matching combination
+ * @data: pointer to pass to iter function
+ *
+ * This function can be called by the driver to check what possible
+ * combinations it fits in at a given moment, e.g. for channel switching
+ * purposes.
+ */
+int cfg80211_iter_combinations(struct wiphy *wiphy,
+ struct iface_combination_params *params,
+ void (*iter)(const struct ieee80211_iface_combination *c,
+ void *data),
+ void *data);
+
+/*
+ * cfg80211_stop_iface - trigger interface disconnection
+ *
+ * @wiphy: the wiphy
+ * @wdev: wireless device
+ * @gfp: context flags
+ *
+ * Trigger interface to be stopped as if AP was stopped, IBSS/mesh left, STA
+ * disconnected.
+ *
+ * Note: This doesn't need any locks and is asynchronous.
+ */
+void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
+ gfp_t gfp);
+
+/**
+ * cfg80211_shutdown_all_interfaces - shut down all interfaces for a wiphy
+ * @wiphy: the wiphy to shut down
+ *
+ * This function shuts down all interfaces belonging to this wiphy by
+ * calling dev_close() (and treating non-netdev interfaces as needed).
+ * It shouldn't really be used unless there are some fatal device errors
+ * that really can't be recovered in any other way.
+ *
+ * Callers must hold the RTNL and be able to deal with callbacks into
+ * the driver while the function is running.
+ */
+void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy);
+
+/**
+ * wiphy_ext_feature_set - set the extended feature flag
+ *
+ * @wiphy: the wiphy to modify.
+ * @ftidx: extended feature bit index.
+ *
+ * The extended features are flagged in multiple bytes (see
+ * &struct wiphy.@ext_features)
+ */
+static inline void wiphy_ext_feature_set(struct wiphy *wiphy,
+ enum nl80211_ext_feature_index ftidx)
+{
+ u8 *ft_byte;
+
+ ft_byte = &wiphy->ext_features[ftidx / 8];
+ *ft_byte |= BIT(ftidx % 8);
+}
+
+/**
+ * wiphy_ext_feature_isset - check the extended feature flag
+ *
+ * @wiphy: the wiphy to modify.
+ * @ftidx: extended feature bit index.
+ *
+ * The extended features are flagged in multiple bytes (see
+ * &struct wiphy.@ext_features)
+ */
+static inline bool
+wiphy_ext_feature_isset(struct wiphy *wiphy,
+ enum nl80211_ext_feature_index ftidx)
+{
+ u8 ft_byte;
+
+ ft_byte = wiphy->ext_features[ftidx / 8];
+ return (ft_byte & BIT(ftidx % 8)) != 0;
+}
+
+/**
+ * cfg80211_free_nan_func - free NAN function
+ * @f: NAN function that should be freed
+ *
+ * Frees all the NAN function and all it's allocated members.
+ */
+void cfg80211_free_nan_func(struct cfg80211_nan_func *f);
+
+/**
+ * struct cfg80211_nan_match_params - NAN match parameters
+ * @type: the type of the function that triggered a match. If it is
+ * %NL80211_NAN_FUNC_SUBSCRIBE it means that we replied to a subscriber.
+ * If it is %NL80211_NAN_FUNC_PUBLISH, it means that we got a discovery
+ * result.
+ * If it is %NL80211_NAN_FUNC_FOLLOW_UP, we received a follow up.
+ * @inst_id: the local instance id
+ * @peer_inst_id: the instance id of the peer's function
+ * @addr: the MAC address of the peer
+ * @info_len: the length of the &info
+ * @info: the Service Specific Info from the peer (if any)
+ * @cookie: unique identifier of the corresponding function
+ */
+struct cfg80211_nan_match_params {
+ enum nl80211_nan_function_type type;
+ u8 inst_id;
+ u8 peer_inst_id;
+ const u8 *addr;
+ u8 info_len;
+ const u8 *info;
+ u64 cookie;
+};
+
+/**
+ * cfg80211_nan_match - report a match for a NAN function.
+ * @wdev: the wireless device reporting the match
+ * @match: match notification parameters
+ * @gfp: allocation flags
+ *
+ * This function reports that the a NAN function had a match. This
+ * can be a subscribe that had a match or a solicited publish that
+ * was sent. It can also be a follow up that was received.
+ */
+void cfg80211_nan_match(struct wireless_dev *wdev,
+ struct cfg80211_nan_match_params *match, gfp_t gfp);
+
+/**
+ * cfg80211_nan_func_terminated - notify about NAN function termination.
+ *
+ * @wdev: the wireless device reporting the match
+ * @inst_id: the local instance id
+ * @reason: termination reason (one of the NL80211_NAN_FUNC_TERM_REASON_*)
+ * @cookie: unique NAN function identifier
+ * @gfp: allocation flags
+ *
+ * This function reports that the a NAN function is terminated.
+ */
+void cfg80211_nan_func_terminated(struct wireless_dev *wdev,
+ u8 inst_id,
+ enum nl80211_nan_func_term_reason reason,
+ u64 cookie, gfp_t gfp);
+
+/* ethtool helper */
+void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
+
+/**
+ * cfg80211_external_auth_request - userspace request for authentication
+ * @netdev: network device
+ * @params: External authentication parameters
+ * @gfp: allocation flags
+ * Returns: 0 on success, < 0 on error
+ */
+int cfg80211_external_auth_request(struct net_device *netdev,
+ struct cfg80211_external_auth_params *params,
+ gfp_t gfp);
+
+/**
+ * cfg80211_pmsr_report - report peer measurement result data
+ * @wdev: the wireless device reporting the measurement
+ * @req: the original measurement request
+ * @result: the result data
+ * @gfp: allocation flags
+ */
+void cfg80211_pmsr_report(struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *req,
+ struct cfg80211_pmsr_result *result,
+ gfp_t gfp);
+
+/**
+ * cfg80211_pmsr_complete - report peer measurement completed
+ * @wdev: the wireless device reporting the measurement
+ * @req: the original measurement request
+ * @gfp: allocation flags
+ *
+ * Report that the entire measurement completed, after this
+ * the request pointer will no longer be valid.
+ */
+void cfg80211_pmsr_complete(struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *req,
+ gfp_t gfp);
+
+/**
+ * cfg80211_iftype_allowed - check whether the interface can be allowed
+ * @wiphy: the wiphy
+ * @iftype: interface type
+ * @is_4addr: use_4addr flag, must be '0' when check_swif is '1'
+ * @check_swif: check iftype against software interfaces
+ *
+ * Check whether the interface is allowed to operate; additionally, this API
+ * can be used to check iftype against the software interfaces when
+ * check_swif is '1'.
+ */
+bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
+ bool is_4addr, u8 check_swif);
+
+
+/**
+ * cfg80211_assoc_comeback - notification of association that was
+ * temporarly rejected with a comeback
+ * @netdev: network device
+ * @ap_addr: AP (MLD) address that rejected the assocation
+ * @timeout: timeout interval value TUs.
+ *
+ * this function may sleep. the caller must hold the corresponding wdev's mutex.
+ */
+void cfg80211_assoc_comeback(struct net_device *netdev,
+ const u8 *ap_addr, u32 timeout);
+
+/* Logging, debugging and troubleshooting/diagnostic helpers. */
+
+/* wiphy_printk helpers, similar to dev_printk */
+
+#define wiphy_printk(level, wiphy, format, args...) \
+ dev_printk(level, &(wiphy)->dev, format, ##args)
+#define wiphy_emerg(wiphy, format, args...) \
+ dev_emerg(&(wiphy)->dev, format, ##args)
+#define wiphy_alert(wiphy, format, args...) \
+ dev_alert(&(wiphy)->dev, format, ##args)
+#define wiphy_crit(wiphy, format, args...) \
+ dev_crit(&(wiphy)->dev, format, ##args)
+#define wiphy_err(wiphy, format, args...) \
+ dev_err(&(wiphy)->dev, format, ##args)
+#define wiphy_warn(wiphy, format, args...) \
+ dev_warn(&(wiphy)->dev, format, ##args)
+#define wiphy_notice(wiphy, format, args...) \
+ dev_notice(&(wiphy)->dev, format, ##args)
+#define wiphy_info(wiphy, format, args...) \
+ dev_info(&(wiphy)->dev, format, ##args)
+#define wiphy_info_once(wiphy, format, args...) \
+ dev_info_once(&(wiphy)->dev, format, ##args)
+
+#define wiphy_err_ratelimited(wiphy, format, args...) \
+ dev_err_ratelimited(&(wiphy)->dev, format, ##args)
+#define wiphy_warn_ratelimited(wiphy, format, args...) \
+ dev_warn_ratelimited(&(wiphy)->dev, format, ##args)
+
+#define wiphy_debug(wiphy, format, args...) \
+ wiphy_printk(KERN_DEBUG, wiphy, format, ##args)
+
+#define wiphy_dbg(wiphy, format, args...) \
+ dev_dbg(&(wiphy)->dev, format, ##args)
+
+#if defined(VERBOSE_DEBUG)
+#define wiphy_vdbg wiphy_dbg
+#else
+#define wiphy_vdbg(wiphy, format, args...) \
+({ \
+ if (0) \
+ wiphy_printk(KERN_DEBUG, wiphy, format, ##args); \
+ 0; \
+})
+#endif
+
+/*
+ * wiphy_WARN() acts like wiphy_printk(), but with the key difference
+ * of using a WARN/WARN_ON to get the message out, including the
+ * file/line information and a backtrace.
+ */
+#define wiphy_WARN(wiphy, format, args...) \
+ WARN(1, "wiphy: %s\n" format, wiphy_name(wiphy), ##args);
+
+/**
+ * cfg80211_update_owe_info_event - Notify the peer's OWE info to user space
+ * @netdev: network device
+ * @owe_info: peer's owe info
+ * @gfp: allocation flags
+ */
+void cfg80211_update_owe_info_event(struct net_device *netdev,
+ struct cfg80211_update_owe_info *owe_info,
+ gfp_t gfp);
+
+/**
+ * cfg80211_bss_flush - resets all the scan entries
+ * @wiphy: the wiphy
+ */
+void cfg80211_bss_flush(struct wiphy *wiphy);
+
+/**
+ * cfg80211_bss_color_notify - notify about bss color event
+ * @dev: network device
+ * @gfp: allocation flags
+ * @cmd: the actual event we want to notify
+ * @count: the number of TBTTs until the color change happens
+ * @color_bitmap: representations of the colors that the local BSS is aware of
+ */
+int cfg80211_bss_color_notify(struct net_device *dev, gfp_t gfp,
+ enum nl80211_commands cmd, u8 count,
+ u64 color_bitmap);
+
+/**
+ * cfg80211_obss_color_collision_notify - notify about bss color collision
+ * @dev: network device
+ * @color_bitmap: representations of the colors that the local BSS is aware of
+ * @gfp: allocation flags
+ */
+static inline int cfg80211_obss_color_collision_notify(struct net_device *dev,
+ u64 color_bitmap, gfp_t gfp)
+{
+ return cfg80211_bss_color_notify(dev, gfp,
+ NL80211_CMD_OBSS_COLOR_COLLISION,
+ 0, color_bitmap);
+}
+
+/**
+ * cfg80211_color_change_started_notify - notify color change start
+ * @dev: the device on which the color is switched
+ * @count: the number of TBTTs until the color change happens
+ *
+ * Inform the userspace about the color change that has started.
+ */
+static inline int cfg80211_color_change_started_notify(struct net_device *dev,
+ u8 count)
+{
+ return cfg80211_bss_color_notify(dev, GFP_KERNEL,
+ NL80211_CMD_COLOR_CHANGE_STARTED,
+ count, 0);
+}
+
+/**
+ * cfg80211_color_change_aborted_notify - notify color change abort
+ * @dev: the device on which the color is switched
+ *
+ * Inform the userspace about the color change that has aborted.
+ */
+static inline int cfg80211_color_change_aborted_notify(struct net_device *dev)
+{
+ return cfg80211_bss_color_notify(dev, GFP_KERNEL,
+ NL80211_CMD_COLOR_CHANGE_ABORTED,
+ 0, 0);
+}
+
+/**
+ * cfg80211_color_change_notify - notify color change completion
+ * @dev: the device on which the color was switched
+ *
+ * Inform the userspace about the color change that has completed.
+ */
+static inline int cfg80211_color_change_notify(struct net_device *dev)
+{
+ return cfg80211_bss_color_notify(dev, GFP_KERNEL,
+ NL80211_CMD_COLOR_CHANGE_COMPLETED,
+ 0, 0);
+}
+
+#endif /* __NET_CFG80211_H */
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
new file mode 100644
index 000000000..d8d871931
--- /dev/null
+++ b/include/net/cfg802154.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2007, 2008, 2009 Siemens AG
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ */
+
+#ifndef __NET_CFG802154_H
+#define __NET_CFG802154_H
+
+#include <linux/ieee802154.h>
+#include <linux/netdevice.h>
+#include <linux/mutex.h>
+#include <linux/bug.h>
+
+#include <net/nl802154.h>
+
+struct wpan_phy;
+struct wpan_phy_cca;
+
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+struct ieee802154_llsec_device_key;
+struct ieee802154_llsec_seclevel;
+struct ieee802154_llsec_params;
+struct ieee802154_llsec_device;
+struct ieee802154_llsec_table;
+struct ieee802154_llsec_key_id;
+struct ieee802154_llsec_key;
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
+struct cfg802154_ops {
+ struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
+ const char *name,
+ unsigned char name_assign_type,
+ int type);
+ void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
+ struct net_device *dev);
+ int (*suspend)(struct wpan_phy *wpan_phy);
+ int (*resume)(struct wpan_phy *wpan_phy);
+ int (*add_virtual_intf)(struct wpan_phy *wpan_phy,
+ const char *name,
+ unsigned char name_assign_type,
+ enum nl802154_iftype type,
+ __le64 extended_addr);
+ int (*del_virtual_intf)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev);
+ int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel);
+ int (*set_cca_mode)(struct wpan_phy *wpan_phy,
+ const struct wpan_phy_cca *cca);
+ int (*set_cca_ed_level)(struct wpan_phy *wpan_phy, s32 ed_level);
+ int (*set_tx_power)(struct wpan_phy *wpan_phy, s32 power);
+ int (*set_pan_id)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, __le16 pan_id);
+ int (*set_short_addr)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, __le16 short_addr);
+ int (*set_backoff_exponent)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, u8 min_be,
+ u8 max_be);
+ int (*set_max_csma_backoffs)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ u8 max_csma_backoffs);
+ int (*set_max_frame_retries)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ s8 max_frame_retries);
+ int (*set_lbt_mode)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, bool mode);
+ int (*set_ackreq_default)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, bool ackreq);
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+ void (*get_llsec_table)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ struct ieee802154_llsec_table **table);
+ void (*lock_llsec_table)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev);
+ void (*unlock_llsec_table)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev);
+ /* TODO remove locking/get table callbacks, this is part of the
+ * nl802154 interface and should be accessible from ieee802154 layer.
+ */
+ int (*get_llsec_params)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ struct ieee802154_llsec_params *params);
+ int (*set_llsec_params)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ const struct ieee802154_llsec_params *params,
+ int changed);
+ int (*add_llsec_key)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ const struct ieee802154_llsec_key_id *id,
+ const struct ieee802154_llsec_key *key);
+ int (*del_llsec_key)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ const struct ieee802154_llsec_key_id *id);
+ int (*add_seclevel)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ const struct ieee802154_llsec_seclevel *sl);
+ int (*del_seclevel)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ const struct ieee802154_llsec_seclevel *sl);
+ int (*add_device)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ const struct ieee802154_llsec_device *dev);
+ int (*del_device)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, __le64 extended_addr);
+ int (*add_devkey)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ __le64 extended_addr,
+ const struct ieee802154_llsec_device_key *key);
+ int (*del_devkey)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ __le64 extended_addr,
+ const struct ieee802154_llsec_device_key *key);
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+};
+
+static inline bool
+wpan_phy_supported_bool(bool b, enum nl802154_supported_bool_states st)
+{
+ switch (st) {
+ case NL802154_SUPPORTED_BOOL_TRUE:
+ return b;
+ case NL802154_SUPPORTED_BOOL_FALSE:
+ return !b;
+ case NL802154_SUPPORTED_BOOL_BOTH:
+ return true;
+ default:
+ WARN_ON(1);
+ }
+
+ return false;
+}
+
+struct wpan_phy_supported {
+ u32 channels[IEEE802154_MAX_PAGE + 1],
+ cca_modes, cca_opts, iftypes;
+ enum nl802154_supported_bool_states lbt;
+ u8 min_minbe, max_minbe, min_maxbe, max_maxbe,
+ min_csma_backoffs, max_csma_backoffs;
+ s8 min_frame_retries, max_frame_retries;
+ size_t tx_powers_size, cca_ed_levels_size;
+ const s32 *tx_powers, *cca_ed_levels;
+};
+
+struct wpan_phy_cca {
+ enum nl802154_cca_modes mode;
+ enum nl802154_cca_opts opt;
+};
+
+static inline bool
+wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b)
+{
+ if (a->mode != b->mode)
+ return false;
+
+ if (a->mode == NL802154_CCA_ENERGY_CARRIER)
+ return a->opt == b->opt;
+
+ return true;
+}
+
+/**
+ * @WPAN_PHY_FLAG_TRANSMIT_POWER: Indicates that transceiver will support
+ * transmit power setting.
+ * @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed
+ * level setting.
+ * @WPAN_PHY_FLAG_CCA_MODE: Indicates that transceiver will support cca mode
+ * setting.
+ */
+enum wpan_phy_flags {
+ WPAN_PHY_FLAG_TXPOWER = BIT(1),
+ WPAN_PHY_FLAG_CCA_ED_LEVEL = BIT(2),
+ WPAN_PHY_FLAG_CCA_MODE = BIT(3),
+};
+
+struct wpan_phy {
+ /* If multiple wpan_phys are registered and you're handed e.g.
+ * a regular netdev with assigned ieee802154_ptr, you won't
+ * know whether it points to a wpan_phy your driver has registered
+ * or not. Assign this to something global to your driver to
+ * help determine whether you own this wpan_phy or not.
+ */
+ const void *privid;
+
+ u32 flags;
+
+ /*
+ * This is a PIB according to 802.15.4-2011.
+ * We do not provide timing-related variables, as they
+ * aren't used outside of driver
+ */
+ u8 current_channel;
+ u8 current_page;
+ struct wpan_phy_supported supported;
+ /* current transmit_power in mBm */
+ s32 transmit_power;
+ struct wpan_phy_cca cca;
+
+ __le64 perm_extended_addr;
+
+ /* current cca ed threshold in mBm */
+ s32 cca_ed_level;
+
+ /* PHY depended MAC PIB values */
+
+ /* 802.15.4 acronym: Tdsym in nsec */
+ u32 symbol_duration;
+ /* lifs and sifs periods timing */
+ u16 lifs_period;
+ u16 sifs_period;
+
+ struct device dev;
+
+ /* the network namespace this phy lives in currently */
+ possible_net_t _net;
+
+ char priv[] __aligned(NETDEV_ALIGN);
+};
+
+static inline struct net *wpan_phy_net(struct wpan_phy *wpan_phy)
+{
+ return read_pnet(&wpan_phy->_net);
+}
+
+static inline void wpan_phy_net_set(struct wpan_phy *wpan_phy, struct net *net)
+{
+ write_pnet(&wpan_phy->_net, net);
+}
+
+/**
+ * struct ieee802154_addr - IEEE802.15.4 device address
+ * @mode: Address mode from frame header. Can be one of:
+ * - @IEEE802154_ADDR_NONE
+ * - @IEEE802154_ADDR_SHORT
+ * - @IEEE802154_ADDR_LONG
+ * @pan_id: The PAN ID this address belongs to
+ * @short_addr: address if @mode is @IEEE802154_ADDR_SHORT
+ * @extended_addr: address if @mode is @IEEE802154_ADDR_LONG
+ */
+struct ieee802154_addr {
+ u8 mode;
+ __le16 pan_id;
+ union {
+ __le16 short_addr;
+ __le64 extended_addr;
+ };
+};
+
+struct ieee802154_llsec_key_id {
+ u8 mode;
+ u8 id;
+ union {
+ struct ieee802154_addr device_addr;
+ __le32 short_source;
+ __le64 extended_source;
+ };
+};
+
+#define IEEE802154_LLSEC_KEY_SIZE 16
+
+struct ieee802154_llsec_key {
+ u8 frame_types;
+ u32 cmd_frame_ids;
+ /* TODO replace with NL802154_KEY_SIZE */
+ u8 key[IEEE802154_LLSEC_KEY_SIZE];
+};
+
+struct ieee802154_llsec_key_entry {
+ struct list_head list;
+
+ struct ieee802154_llsec_key_id id;
+ struct ieee802154_llsec_key *key;
+};
+
+struct ieee802154_llsec_params {
+ bool enabled;
+
+ __be32 frame_counter;
+ u8 out_level;
+ struct ieee802154_llsec_key_id out_key;
+
+ __le64 default_key_source;
+
+ __le16 pan_id;
+ __le64 hwaddr;
+ __le64 coord_hwaddr;
+ __le16 coord_shortaddr;
+};
+
+struct ieee802154_llsec_table {
+ struct list_head keys;
+ struct list_head devices;
+ struct list_head security_levels;
+};
+
+struct ieee802154_llsec_seclevel {
+ struct list_head list;
+
+ u8 frame_type;
+ u8 cmd_frame_id;
+ bool device_override;
+ u32 sec_levels;
+};
+
+struct ieee802154_llsec_device {
+ struct list_head list;
+
+ __le16 pan_id;
+ __le16 short_addr;
+ __le64 hwaddr;
+ u32 frame_counter;
+ bool seclevel_exempt;
+
+ u8 key_mode;
+ struct list_head keys;
+};
+
+struct ieee802154_llsec_device_key {
+ struct list_head list;
+
+ struct ieee802154_llsec_key_id key_id;
+ u32 frame_counter;
+};
+
+struct wpan_dev_header_ops {
+ /* TODO create callback currently assumes ieee802154_mac_cb inside
+ * skb->cb. This should be changed to give these information as
+ * parameter.
+ */
+ int (*create)(struct sk_buff *skb, struct net_device *dev,
+ const struct ieee802154_addr *daddr,
+ const struct ieee802154_addr *saddr,
+ unsigned int len);
+};
+
+struct wpan_dev {
+ struct wpan_phy *wpan_phy;
+ int iftype;
+
+ /* the remainder of this struct should be private to cfg802154 */
+ struct list_head list;
+ struct net_device *netdev;
+
+ const struct wpan_dev_header_ops *header_ops;
+
+ /* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */
+ struct net_device *lowpan_dev;
+
+ u32 identifier;
+
+ /* MAC PIB */
+ __le16 pan_id;
+ __le16 short_addr;
+ __le64 extended_addr;
+
+ /* MAC BSN field */
+ atomic_t bsn;
+ /* MAC DSN field */
+ atomic_t dsn;
+
+ u8 min_be;
+ u8 max_be;
+ u8 csma_retries;
+ s8 frame_retries;
+
+ bool lbt;
+
+ bool promiscuous_mode;
+
+ /* fallback for acknowledgment bit setting */
+ bool ackreq;
+};
+
+#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
+
+#if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
+static inline int
+wpan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
+ const struct ieee802154_addr *daddr,
+ const struct ieee802154_addr *saddr,
+ unsigned int len)
+{
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+
+ return wpan_dev->header_ops->create(skb, dev, daddr, saddr, len);
+}
+#endif
+
+struct wpan_phy *
+wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size);
+static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev)
+{
+ phy->dev.parent = dev;
+}
+
+int wpan_phy_register(struct wpan_phy *phy);
+void wpan_phy_unregister(struct wpan_phy *phy);
+void wpan_phy_free(struct wpan_phy *phy);
+/* Same semantics as for class_for_each_device */
+int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data);
+
+static inline void *wpan_phy_priv(struct wpan_phy *phy)
+{
+ BUG_ON(!phy);
+ return &phy->priv;
+}
+
+struct wpan_phy *wpan_phy_find(const char *str);
+
+static inline void wpan_phy_put(struct wpan_phy *phy)
+{
+ put_device(&phy->dev);
+}
+
+static inline const char *wpan_phy_name(struct wpan_phy *phy)
+{
+ return dev_name(&phy->dev);
+}
+
+void ieee802154_configure_durations(struct wpan_phy *phy);
+
+#endif /* __NET_CFG802154_H */
diff --git a/include/net/checksum.h b/include/net/checksum.h
new file mode 100644
index 000000000..6bc783b7a
--- /dev/null
+++ b/include/net/checksum.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Checksumming functions for IP, TCP, UDP and so on
+ *
+ * Authors: Jorge Cwik, <jorge@laser.satlink.net>
+ * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ * Borrows very liberally from tcp.c and ip.c, see those
+ * files for more names.
+ */
+
+#ifndef _CHECKSUM_H
+#define _CHECKSUM_H
+
+#include <linux/errno.h>
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <linux/uaccess.h>
+#include <asm/checksum.h>
+
+#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+static __always_inline
+__wsum csum_and_copy_from_user (const void __user *src, void *dst,
+ int len)
+{
+ if (copy_from_user(dst, src, len))
+ return 0;
+ return csum_partial(dst, len, ~0U);
+}
+#endif
+
+#ifndef HAVE_CSUM_COPY_USER
+static __always_inline __wsum csum_and_copy_to_user
+(const void *src, void __user *dst, int len)
+{
+ __wsum sum = csum_partial(src, len, ~0U);
+
+ if (copy_to_user(dst, src, len) == 0)
+ return sum;
+ return 0;
+}
+#endif
+
+#ifndef _HAVE_ARCH_CSUM_AND_COPY
+static __always_inline __wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len)
+{
+ memcpy(dst, src, len);
+ return csum_partial(dst, len, 0);
+}
+#endif
+
+#ifndef HAVE_ARCH_CSUM_ADD
+static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+ u32 res = (__force u32)csum;
+ res += (__force u32)addend;
+ return (__force __wsum)(res + (res < (__force u32)addend));
+}
+#endif
+
+static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
+{
+ return csum_add(csum, ~addend);
+}
+
+static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
+{
+ u16 res = (__force u16)csum;
+
+ res += (__force u16)addend;
+ return (__force __sum16)(res + (res < (__force u16)addend));
+}
+
+static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
+{
+ return csum16_add(csum, ~addend);
+}
+
+#ifndef HAVE_ARCH_CSUM_SHIFT
+static __always_inline __wsum csum_shift(__wsum sum, int offset)
+{
+ /* rotate sum to align it with a 16b boundary */
+ if (offset & 1)
+ return (__force __wsum)ror32((__force u32)sum, 8);
+ return sum;
+}
+#endif
+
+static __always_inline __wsum
+csum_block_add(__wsum csum, __wsum csum2, int offset)
+{
+ return csum_add(csum, csum_shift(csum2, offset));
+}
+
+static __always_inline __wsum
+csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
+{
+ return csum_block_add(csum, csum2, offset);
+}
+
+static __always_inline __wsum
+csum_block_sub(__wsum csum, __wsum csum2, int offset)
+{
+ return csum_block_add(csum, ~csum2, offset);
+}
+
+static __always_inline __wsum csum_unfold(__sum16 n)
+{
+ return (__force __wsum)n;
+}
+
+static __always_inline
+__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
+{
+ return csum_partial(buff, len, sum);
+}
+
+#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
+
+static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
+{
+ *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
+}
+
+static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
+{
+ __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
+
+ *sum = csum_fold(csum_add(tmp, (__force __wsum)to));
+}
+
+/* Implements RFC 1624 (Incremental Internet Checksum)
+ * 3. Discussion states :
+ * HC' = ~(~HC + ~m + m')
+ * m : old value of a 16bit field
+ * m' : new value of a 16bit field
+ */
+static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
+{
+ *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
+}
+
+static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
+{
+ *csum = csum_add(csum_sub(*csum, old), new);
+}
+
+struct sk_buff;
+void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
+ __be32 from, __be32 to, bool pseudohdr);
+void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
+ const __be32 *from, const __be32 *to,
+ bool pseudohdr);
+void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+ __wsum diff, bool pseudohdr);
+
+static __always_inline
+void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
+ __be16 from, __be16 to, bool pseudohdr)
+{
+ inet_proto_csum_replace4(sum, skb, (__force __be32)from,
+ (__force __be32)to, pseudohdr);
+}
+
+static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
+ int start, int offset)
+{
+ __sum16 *psum = (__sum16 *)(ptr + offset);
+ __wsum delta;
+
+ /* Subtract out checksum up to start */
+ csum = csum_sub(csum, csum_partial(ptr, start, 0));
+
+ /* Set derived checksum in packet */
+ delta = csum_sub((__force __wsum)csum_fold(csum),
+ (__force __wsum)*psum);
+ *psum = csum_fold(csum);
+
+ return delta;
+}
+
+static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
+{
+ *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
+}
+
+static __always_inline __wsum wsum_negate(__wsum val)
+{
+ return (__force __wsum)-((__force u32)val);
+}
+#endif
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
new file mode 100644
index 000000000..53dd7d988
--- /dev/null
+++ b/include/net/cipso_ipv4.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * CIPSO - Commercial IP Security Option
+ *
+ * This is an implementation of the CIPSO 2.2 protocol as specified in
+ * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in
+ * FIPS-188, copies of both documents can be found in the Documentation
+ * directory. While CIPSO never became a full IETF RFC standard many vendors
+ * have chosen to adopt the protocol and over the years it has become a
+ * de-facto standard for labeled networking.
+ *
+ * Author: Paul Moore <paul@paul-moore.com>
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
+ */
+
+#ifndef _CIPSO_IPV4_H
+#define _CIPSO_IPV4_H
+
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/list.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <net/netlabel.h>
+#include <net/request_sock.h>
+#include <linux/atomic.h>
+#include <linux/refcount.h>
+#include <asm/unaligned.h>
+
+/* known doi values */
+#define CIPSO_V4_DOI_UNKNOWN 0x00000000
+
+/* standard tag types */
+#define CIPSO_V4_TAG_INVALID 0
+#define CIPSO_V4_TAG_RBITMAP 1
+#define CIPSO_V4_TAG_ENUM 2
+#define CIPSO_V4_TAG_RANGE 5
+#define CIPSO_V4_TAG_PBITMAP 6
+#define CIPSO_V4_TAG_FREEFORM 7
+
+/* non-standard tag types (tags > 127) */
+#define CIPSO_V4_TAG_LOCAL 128
+
+/* doi mapping types */
+#define CIPSO_V4_MAP_UNKNOWN 0
+#define CIPSO_V4_MAP_TRANS 1
+#define CIPSO_V4_MAP_PASS 2
+#define CIPSO_V4_MAP_LOCAL 3
+
+/* limits */
+#define CIPSO_V4_MAX_REM_LVLS 255
+#define CIPSO_V4_INV_LVL 0x80000000
+#define CIPSO_V4_MAX_LOC_LVLS (CIPSO_V4_INV_LVL - 1)
+#define CIPSO_V4_MAX_REM_CATS 65534
+#define CIPSO_V4_INV_CAT 0x80000000
+#define CIPSO_V4_MAX_LOC_CATS (CIPSO_V4_INV_CAT - 1)
+
+/*
+ * CIPSO DOI definitions
+ */
+
+/* DOI definition struct */
+#define CIPSO_V4_TAG_MAXCNT 5
+struct cipso_v4_doi {
+ u32 doi;
+ u32 type;
+ union {
+ struct cipso_v4_std_map_tbl *std;
+ } map;
+ u8 tags[CIPSO_V4_TAG_MAXCNT];
+
+ refcount_t refcount;
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+/* Standard CIPSO mapping table */
+/* NOTE: the highest order bit (i.e. 0x80000000) is an 'invalid' flag, if the
+ * bit is set then consider that value as unspecified, meaning the
+ * mapping for that particular level/category is invalid */
+struct cipso_v4_std_map_tbl {
+ struct {
+ u32 *cipso;
+ u32 *local;
+ u32 cipso_size;
+ u32 local_size;
+ } lvl;
+ struct {
+ u32 *cipso;
+ u32 *local;
+ u32 cipso_size;
+ u32 local_size;
+ } cat;
+};
+
+/*
+ * Sysctl Variables
+ */
+
+#ifdef CONFIG_NETLABEL
+extern int cipso_v4_cache_enabled;
+extern int cipso_v4_cache_bucketsize;
+extern int cipso_v4_rbm_optfmt;
+extern int cipso_v4_rbm_strictvalid;
+#endif
+
+/*
+ * DOI List Functions
+ */
+
+#ifdef CONFIG_NETLABEL
+int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
+ struct netlbl_audit *audit_info);
+void cipso_v4_doi_free(struct cipso_v4_doi *doi_def);
+int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info);
+struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi);
+void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def);
+int cipso_v4_doi_walk(u32 *skip_cnt,
+ int (*callback) (struct cipso_v4_doi *doi_def, void *arg),
+ void *cb_arg);
+#else
+static inline int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
+ struct netlbl_audit *audit_info)
+{
+ return -ENOSYS;
+}
+
+static inline void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
+{
+ return;
+}
+
+static inline int cipso_v4_doi_remove(u32 doi,
+ struct netlbl_audit *audit_info)
+{
+ return 0;
+}
+
+static inline struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi)
+{
+ return NULL;
+}
+
+static inline int cipso_v4_doi_walk(u32 *skip_cnt,
+ int (*callback) (struct cipso_v4_doi *doi_def, void *arg),
+ void *cb_arg)
+{
+ return 0;
+}
+#endif /* CONFIG_NETLABEL */
+
+/*
+ * Label Mapping Cache Functions
+ */
+
+#ifdef CONFIG_NETLABEL
+void cipso_v4_cache_invalidate(void);
+int cipso_v4_cache_add(const unsigned char *cipso_ptr,
+ const struct netlbl_lsm_secattr *secattr);
+#else
+static inline void cipso_v4_cache_invalidate(void)
+{
+ return;
+}
+
+static inline int cipso_v4_cache_add(const unsigned char *cipso_ptr,
+ const struct netlbl_lsm_secattr *secattr)
+{
+ return 0;
+}
+#endif /* CONFIG_NETLABEL */
+
+/*
+ * Protocol Handling Functions
+ */
+
+#ifdef CONFIG_NETLABEL
+void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway);
+int cipso_v4_getattr(const unsigned char *cipso,
+ struct netlbl_lsm_secattr *secattr);
+int cipso_v4_sock_setattr(struct sock *sk,
+ const struct cipso_v4_doi *doi_def,
+ const struct netlbl_lsm_secattr *secattr);
+void cipso_v4_sock_delattr(struct sock *sk);
+int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr);
+int cipso_v4_req_setattr(struct request_sock *req,
+ const struct cipso_v4_doi *doi_def,
+ const struct netlbl_lsm_secattr *secattr);
+void cipso_v4_req_delattr(struct request_sock *req);
+int cipso_v4_skbuff_setattr(struct sk_buff *skb,
+ const struct cipso_v4_doi *doi_def,
+ const struct netlbl_lsm_secattr *secattr);
+int cipso_v4_skbuff_delattr(struct sk_buff *skb);
+int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
+ struct netlbl_lsm_secattr *secattr);
+unsigned char *cipso_v4_optptr(const struct sk_buff *skb);
+int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option);
+#else
+static inline void cipso_v4_error(struct sk_buff *skb,
+ int error,
+ u32 gateway)
+{
+ return;
+}
+
+static inline int cipso_v4_getattr(const unsigned char *cipso,
+ struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOSYS;
+}
+
+static inline int cipso_v4_sock_setattr(struct sock *sk,
+ const struct cipso_v4_doi *doi_def,
+ const struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOSYS;
+}
+
+static inline void cipso_v4_sock_delattr(struct sock *sk)
+{
+}
+
+static inline int cipso_v4_sock_getattr(struct sock *sk,
+ struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOSYS;
+}
+
+static inline int cipso_v4_req_setattr(struct request_sock *req,
+ const struct cipso_v4_doi *doi_def,
+ const struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOSYS;
+}
+
+static inline void cipso_v4_req_delattr(struct request_sock *req)
+{
+ return;
+}
+
+static inline int cipso_v4_skbuff_setattr(struct sk_buff *skb,
+ const struct cipso_v4_doi *doi_def,
+ const struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOSYS;
+}
+
+static inline int cipso_v4_skbuff_delattr(struct sk_buff *skb)
+{
+ return -ENOSYS;
+}
+
+static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
+ struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOSYS;
+}
+
+static inline unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
+{
+ return NULL;
+}
+
+static inline int cipso_v4_validate(const struct sk_buff *skb,
+ unsigned char **option)
+{
+ unsigned char *opt = *option;
+ unsigned char err_offset = 0;
+ u8 opt_len = opt[1];
+ u8 opt_iter;
+ u8 tag_len;
+
+ if (opt_len < 8) {
+ err_offset = 1;
+ goto out;
+ }
+
+ if (get_unaligned_be32(&opt[2]) == 0) {
+ err_offset = 2;
+ goto out;
+ }
+
+ for (opt_iter = 6; opt_iter < opt_len;) {
+ if (opt_iter + 1 == opt_len) {
+ err_offset = opt_iter;
+ goto out;
+ }
+ tag_len = opt[opt_iter + 1];
+ if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) {
+ err_offset = opt_iter + 1;
+ goto out;
+ }
+ opt_iter += tag_len;
+ }
+
+out:
+ *option = opt + err_offset;
+ return err_offset;
+
+}
+#endif /* CONFIG_NETLABEL */
+
+#endif /* _CIPSO_IPV4_H */
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
new file mode 100644
index 000000000..7e78e7d6f
--- /dev/null
+++ b/include/net/cls_cgroup.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * cls_cgroup.h Control Group Classifier
+ *
+ * Authors: Thomas Graf <tgraf@suug.ch>
+ */
+
+#ifndef _NET_CLS_CGROUP_H
+#define _NET_CLS_CGROUP_H
+
+#include <linux/cgroup.h>
+#include <linux/hardirq.h>
+#include <linux/rcupdate.h>
+#include <net/sock.h>
+#include <net/inet_sock.h>
+
+#ifdef CONFIG_CGROUP_NET_CLASSID
+struct cgroup_cls_state {
+ struct cgroup_subsys_state css;
+ u32 classid;
+};
+
+struct cgroup_cls_state *task_cls_state(struct task_struct *p);
+
+static inline u32 task_cls_classid(struct task_struct *p)
+{
+ u32 classid;
+
+ if (in_interrupt())
+ return 0;
+
+ rcu_read_lock();
+ classid = container_of(task_css(p, net_cls_cgrp_id),
+ struct cgroup_cls_state, css)->classid;
+ rcu_read_unlock();
+
+ return classid;
+}
+
+static inline void sock_update_classid(struct sock_cgroup_data *skcd)
+{
+ u32 classid;
+
+ classid = task_cls_classid(current);
+ sock_cgroup_set_classid(skcd, classid);
+}
+
+static inline u32 __task_get_classid(struct task_struct *task)
+{
+ return task_cls_state(task)->classid;
+}
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+ u32 classid = __task_get_classid(current);
+
+ /* Due to the nature of the classifier it is required to ignore all
+ * packets originating from softirq context as accessing `current'
+ * would lead to false results.
+ *
+ * This test assumes that all callers of dev_queue_xmit() explicitly
+ * disable bh. Knowing this, it is possible to detect softirq based
+ * calls by looking at the number of nested bh disable calls because
+ * softirqs always disables bh.
+ */
+ if (in_serving_softirq()) {
+ struct sock *sk = skb_to_full_sk(skb);
+
+ /* If there is an sock_cgroup_classid we'll use that. */
+ if (!sk || !sk_fullsock(sk))
+ return 0;
+
+ classid = sock_cgroup_classid(&sk->sk_cgrp_data);
+ }
+
+ return classid;
+}
+#else /* !CONFIG_CGROUP_NET_CLASSID */
+static inline void sock_update_classid(struct sock_cgroup_data *skcd)
+{
+}
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+ return 0;
+}
+#endif /* CONFIG_CGROUP_NET_CLASSID */
+#endif /* _NET_CLS_CGROUP_H */
diff --git a/include/net/codel.h b/include/net/codel.h
new file mode 100644
index 000000000..5fed2f16c
--- /dev/null
+++ b/include/net/codel.h
@@ -0,0 +1,167 @@
+#ifndef __NET_SCHED_CODEL_H
+#define __NET_SCHED_CODEL_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ktime.h>
+#include <linux/skbuff.h>
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+
+/* CoDel uses a 1024 nsec clock, encoded in u32
+ * This gives a range of 2199 seconds, because of signed compares
+ */
+typedef u32 codel_time_t;
+typedef s32 codel_tdiff_t;
+#define CODEL_SHIFT 10
+#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT)
+
+static inline codel_time_t codel_get_time(void)
+{
+ u64 ns = ktime_get_ns();
+
+ return ns >> CODEL_SHIFT;
+}
+
+/* Dealing with timer wrapping, according to RFC 1982, as desc in wikipedia:
+ * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
+ * codel_time_after(a,b) returns true if the time a is after time b.
+ */
+#define codel_time_after(a, b) \
+ (typecheck(codel_time_t, a) && \
+ typecheck(codel_time_t, b) && \
+ ((s32)((a) - (b)) > 0))
+#define codel_time_before(a, b) codel_time_after(b, a)
+
+#define codel_time_after_eq(a, b) \
+ (typecheck(codel_time_t, a) && \
+ typecheck(codel_time_t, b) && \
+ ((s32)((a) - (b)) >= 0))
+#define codel_time_before_eq(a, b) codel_time_after_eq(b, a)
+
+static inline u32 codel_time_to_us(codel_time_t val)
+{
+ u64 valns = ((u64)val << CODEL_SHIFT);
+
+ do_div(valns, NSEC_PER_USEC);
+ return (u32)valns;
+}
+
+/**
+ * struct codel_params - contains codel parameters
+ * @target: target queue size (in time units)
+ * @ce_threshold: threshold for marking packets with ECN CE
+ * @interval: width of moving time window
+ * @mtu: device mtu, or minimal queue backlog in bytes.
+ * @ecn: is Explicit Congestion Notification enabled
+ * @ce_threshold_selector: apply ce_threshold to packets matching this value
+ * in the diffserv/ECN byte of the IP header
+ * @ce_threshold_mask: mask to apply to ce_threshold_selector comparison
+ */
+struct codel_params {
+ codel_time_t target;
+ codel_time_t ce_threshold;
+ codel_time_t interval;
+ u32 mtu;
+ bool ecn;
+ u8 ce_threshold_selector;
+ u8 ce_threshold_mask;
+};
+
+/**
+ * struct codel_vars - contains codel variables
+ * @count: how many drops we've done since the last time we
+ * entered dropping state
+ * @lastcount: count at entry to dropping state
+ * @dropping: set to true if in dropping state
+ * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1
+ * @first_above_time: when we went (or will go) continuously above target
+ * for interval
+ * @drop_next: time to drop next packet, or when we dropped last
+ * @ldelay: sojourn time of last dequeued packet
+ */
+struct codel_vars {
+ u32 count;
+ u32 lastcount;
+ bool dropping;
+ u16 rec_inv_sqrt;
+ codel_time_t first_above_time;
+ codel_time_t drop_next;
+ codel_time_t ldelay;
+};
+
+#define REC_INV_SQRT_BITS (8 * sizeof(u16)) /* or sizeof_in_bits(rec_inv_sqrt) */
+/* needed shift to get a Q0.32 number from rec_inv_sqrt */
+#define REC_INV_SQRT_SHIFT (32 - REC_INV_SQRT_BITS)
+
+/**
+ * struct codel_stats - contains codel shared variables and stats
+ * @maxpacket: largest packet we've seen so far
+ * @drop_count: temp count of dropped packets in dequeue()
+ * @drop_len: bytes of dropped packets in dequeue()
+ * ecn_mark: number of packets we ECN marked instead of dropping
+ * ce_mark: number of packets CE marked because sojourn time was above ce_threshold
+ */
+struct codel_stats {
+ u32 maxpacket;
+ u32 drop_count;
+ u32 drop_len;
+ u32 ecn_mark;
+ u32 ce_mark;
+};
+
+#define CODEL_DISABLED_THRESHOLD INT_MAX
+
+typedef u32 (*codel_skb_len_t)(const struct sk_buff *skb);
+typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *skb);
+typedef void (*codel_skb_drop_t)(struct sk_buff *skb, void *ctx);
+typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
+ void *ctx);
+
+#endif
diff --git a/include/net/codel_impl.h b/include/net/codel_impl.h
new file mode 100644
index 000000000..78a27ac73
--- /dev/null
+++ b/include/net/codel_impl.h
@@ -0,0 +1,269 @@
+#ifndef __NET_SCHED_CODEL_IMPL_H
+#define __NET_SCHED_CODEL_IMPL_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+#include <net/inet_ecn.h>
+
+static void codel_params_init(struct codel_params *params)
+{
+ params->interval = MS2TIME(100);
+ params->target = MS2TIME(5);
+ params->ce_threshold = CODEL_DISABLED_THRESHOLD;
+ params->ce_threshold_mask = 0;
+ params->ce_threshold_selector = 0;
+ params->ecn = false;
+}
+
+static void codel_vars_init(struct codel_vars *vars)
+{
+ memset(vars, 0, sizeof(*vars));
+}
+
+static void codel_stats_init(struct codel_stats *stats)
+{
+ stats->maxpacket = 0;
+}
+
+/*
+ * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
+ * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
+ *
+ * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
+ */
+static void codel_Newton_step(struct codel_vars *vars)
+{
+ u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
+ u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
+ u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
+
+ val >>= 2; /* avoid overflow in following multiply */
+ val = (val * invsqrt) >> (32 - 2 + 1);
+
+ vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
+}
+
+/*
+ * CoDel control_law is t + interval/sqrt(count)
+ * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
+ * both sqrt() and divide operation.
+ */
+static codel_time_t codel_control_law(codel_time_t t,
+ codel_time_t interval,
+ u32 rec_inv_sqrt)
+{
+ return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
+}
+
+static bool codel_should_drop(const struct sk_buff *skb,
+ void *ctx,
+ struct codel_vars *vars,
+ struct codel_params *params,
+ struct codel_stats *stats,
+ codel_skb_len_t skb_len_func,
+ codel_skb_time_t skb_time_func,
+ u32 *backlog,
+ codel_time_t now)
+{
+ bool ok_to_drop;
+ u32 skb_len;
+
+ if (!skb) {
+ vars->first_above_time = 0;
+ return false;
+ }
+
+ skb_len = skb_len_func(skb);
+ vars->ldelay = now - skb_time_func(skb);
+
+ if (unlikely(skb_len > stats->maxpacket))
+ stats->maxpacket = skb_len;
+
+ if (codel_time_before(vars->ldelay, params->target) ||
+ *backlog <= params->mtu) {
+ /* went below - stay below for at least interval */
+ vars->first_above_time = 0;
+ return false;
+ }
+ ok_to_drop = false;
+ if (vars->first_above_time == 0) {
+ /* just went above from below. If we stay above
+ * for at least interval we'll say it's ok to drop
+ */
+ vars->first_above_time = now + params->interval;
+ } else if (codel_time_after(now, vars->first_above_time)) {
+ ok_to_drop = true;
+ }
+ return ok_to_drop;
+}
+
+static struct sk_buff *codel_dequeue(void *ctx,
+ u32 *backlog,
+ struct codel_params *params,
+ struct codel_vars *vars,
+ struct codel_stats *stats,
+ codel_skb_len_t skb_len_func,
+ codel_skb_time_t skb_time_func,
+ codel_skb_drop_t drop_func,
+ codel_skb_dequeue_t dequeue_func)
+{
+ struct sk_buff *skb = dequeue_func(vars, ctx);
+ codel_time_t now;
+ bool drop;
+
+ if (!skb) {
+ vars->dropping = false;
+ return skb;
+ }
+ now = codel_get_time();
+ drop = codel_should_drop(skb, ctx, vars, params, stats,
+ skb_len_func, skb_time_func, backlog, now);
+ if (vars->dropping) {
+ if (!drop) {
+ /* sojourn time below target - leave dropping state */
+ vars->dropping = false;
+ } else if (codel_time_after_eq(now, vars->drop_next)) {
+ /* It's time for the next drop. Drop the current
+ * packet and dequeue the next. The dequeue might
+ * take us out of dropping state.
+ * If not, schedule the next drop.
+ * A large backlog might result in drop rates so high
+ * that the next drop should happen now,
+ * hence the while loop.
+ */
+ while (vars->dropping &&
+ codel_time_after_eq(now, vars->drop_next)) {
+ vars->count++; /* dont care of possible wrap
+ * since there is no more divide
+ */
+ codel_Newton_step(vars);
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->rec_inv_sqrt);
+ goto end;
+ }
+ stats->drop_len += skb_len_func(skb);
+ drop_func(skb, ctx);
+ stats->drop_count++;
+ skb = dequeue_func(vars, ctx);
+ if (!codel_should_drop(skb, ctx,
+ vars, params, stats,
+ skb_len_func,
+ skb_time_func,
+ backlog, now)) {
+ /* leave dropping state */
+ vars->dropping = false;
+ } else {
+ /* and schedule the next drop */
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->rec_inv_sqrt);
+ }
+ }
+ }
+ } else if (drop) {
+ u32 delta;
+
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ } else {
+ stats->drop_len += skb_len_func(skb);
+ drop_func(skb, ctx);
+ stats->drop_count++;
+
+ skb = dequeue_func(vars, ctx);
+ drop = codel_should_drop(skb, ctx, vars, params,
+ stats, skb_len_func,
+ skb_time_func, backlog, now);
+ }
+ vars->dropping = true;
+ /* if min went above target close to when we last went below it
+ * assume that the drop rate that controlled the queue on the
+ * last cycle is a good starting point to control it now.
+ */
+ delta = vars->count - vars->lastcount;
+ if (delta > 1 &&
+ codel_time_before(now - vars->drop_next,
+ 16 * params->interval)) {
+ vars->count = delta;
+ /* we dont care if rec_inv_sqrt approximation
+ * is not very precise :
+ * Next Newton steps will correct it quadratically.
+ */
+ codel_Newton_step(vars);
+ } else {
+ vars->count = 1;
+ vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
+ }
+ vars->lastcount = vars->count;
+ vars->drop_next = codel_control_law(now, params->interval,
+ vars->rec_inv_sqrt);
+ }
+end:
+ if (skb && codel_time_after(vars->ldelay, params->ce_threshold)) {
+ bool set_ce = true;
+
+ if (params->ce_threshold_mask) {
+ int dsfield = skb_get_dsfield(skb);
+
+ set_ce = (dsfield >= 0 &&
+ (((u8)dsfield & params->ce_threshold_mask) ==
+ params->ce_threshold_selector));
+ }
+ if (set_ce && INET_ECN_set_ce(skb))
+ stats->ce_mark++;
+ }
+ return skb;
+}
+
+#endif
diff --git a/include/net/codel_qdisc.h b/include/net/codel_qdisc.h
new file mode 100644
index 000000000..7d3d9219f
--- /dev/null
+++ b/include/net/codel_qdisc.h
@@ -0,0 +1,77 @@
+#ifndef __NET_SCHED_CODEL_QDISC_H
+#define __NET_SCHED_CODEL_QDISC_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+#include <net/codel.h>
+#include <net/pkt_sched.h>
+
+/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
+struct codel_skb_cb {
+ codel_time_t enqueue_time;
+ unsigned int mem_usage;
+};
+
+static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
+ return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
+{
+ return get_codel_cb(skb)->enqueue_time;
+}
+
+static void codel_set_enqueue_time(struct sk_buff *skb)
+{
+ get_codel_cb(skb)->enqueue_time = codel_get_time();
+}
+
+#endif
diff --git a/include/net/compat.h b/include/net/compat.h
new file mode 100644
index 000000000..84c163f40
--- /dev/null
+++ b/include/net/compat.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef NET_COMPAT_H
+#define NET_COMPAT_H
+
+
+struct sock;
+
+#include <linux/compat.h>
+
+struct compat_msghdr {
+ compat_uptr_t msg_name; /* void * */
+ compat_int_t msg_namelen;
+ compat_uptr_t msg_iov; /* struct compat_iovec * */
+ compat_size_t msg_iovlen;
+ compat_uptr_t msg_control; /* void * */
+ compat_size_t msg_controllen;
+ compat_uint_t msg_flags;
+};
+
+struct compat_mmsghdr {
+ struct compat_msghdr msg_hdr;
+ compat_uint_t msg_len;
+};
+
+struct compat_cmsghdr {
+ compat_size_t cmsg_len;
+ compat_int_t cmsg_level;
+ compat_int_t cmsg_type;
+};
+
+struct compat_rtentry {
+ u32 rt_pad1;
+ struct sockaddr rt_dst; /* target address */
+ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
+ struct sockaddr rt_genmask; /* target network mask (IP) */
+ unsigned short rt_flags;
+ short rt_pad2;
+ u32 rt_pad3;
+ unsigned char rt_tos;
+ unsigned char rt_class;
+ short rt_pad4;
+ short rt_metric; /* +1 for binary compatibility! */
+ compat_uptr_t rt_dev; /* forcing the device at add */
+ u32 rt_mtu; /* per route MTU/Window */
+ u32 rt_window; /* Window clamping */
+ unsigned short rt_irtt; /* Initial RTT */
+};
+
+int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr *msg,
+ struct sockaddr __user **save_addr);
+int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
+ struct sockaddr __user **, struct iovec **);
+int put_cmsg_compat(struct msghdr*, int, int, int, void *);
+
+int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *,
+ unsigned char *, int);
+
+struct compat_group_req {
+ __u32 gr_interface;
+ struct __kernel_sockaddr_storage gr_group
+ __aligned(4);
+} __packed;
+
+struct compat_group_source_req {
+ __u32 gsr_interface;
+ struct __kernel_sockaddr_storage gsr_group
+ __aligned(4);
+ struct __kernel_sockaddr_storage gsr_source
+ __aligned(4);
+} __packed;
+
+struct compat_group_filter {
+ union {
+ struct {
+ __u32 gf_interface_aux;
+ struct __kernel_sockaddr_storage gf_group_aux
+ __aligned(4);
+ __u32 gf_fmode_aux;
+ __u32 gf_numsrc_aux;
+ struct __kernel_sockaddr_storage gf_slist[1]
+ __aligned(4);
+ } __packed;
+ struct {
+ __u32 gf_interface;
+ struct __kernel_sockaddr_storage gf_group
+ __aligned(4);
+ __u32 gf_fmode;
+ __u32 gf_numsrc;
+ struct __kernel_sockaddr_storage gf_slist_flex[]
+ __aligned(4);
+ } __packed;
+ };
+} __packed;
+
+#endif /* NET_COMPAT_H */
diff --git a/include/net/datalink.h b/include/net/datalink.h
new file mode 100644
index 000000000..c837ffc7e
--- /dev/null
+++ b/include/net/datalink.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_INET_DATALINK_H_
+#define _NET_INET_DATALINK_H_
+
+#include <linux/list.h>
+
+struct llc_sap;
+struct net_device;
+struct packet_type;
+struct sk_buff;
+
+struct datalink_proto {
+ unsigned char type[8];
+
+ struct llc_sap *sap;
+
+ unsigned short header_length;
+
+ int (*rcvfunc)(struct sk_buff *, struct net_device *,
+ struct packet_type *, struct net_device *);
+ int (*request)(struct datalink_proto *, struct sk_buff *,
+ const unsigned char *);
+ struct list_head node;
+};
+
+struct datalink_proto *make_EII_client(void);
+void destroy_EII_client(struct datalink_proto *dl);
+#endif
diff --git a/include/net/dcbevent.h b/include/net/dcbevent.h
new file mode 100644
index 000000000..02700262f
--- /dev/null
+++ b/include/net/dcbevent.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2010, Intel Corporation.
+ *
+ * Author: John Fastabend <john.r.fastabend@intel.com>
+ */
+
+#ifndef _DCB_EVENT_H
+#define _DCB_EVENT_H
+
+struct notifier_block;
+
+enum dcbevent_notif_type {
+ DCB_APP_EVENT = 1,
+};
+
+#ifdef CONFIG_DCB
+int register_dcbevent_notifier(struct notifier_block *nb);
+int unregister_dcbevent_notifier(struct notifier_block *nb);
+int call_dcbevent_notifiers(unsigned long val, void *v);
+#else
+static inline int
+register_dcbevent_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int unregister_dcbevent_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int call_dcbevent_notifiers(unsigned long val, void *v)
+{
+ return 0;
+}
+#endif /* CONFIG_DCB */
+
+#endif
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
new file mode 100644
index 000000000..2b2d86fb3
--- /dev/null
+++ b/include/net/dcbnl.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2008, Intel Corporation.
+ *
+ * Author: Lucy Liu <lucy.liu@intel.com>
+ */
+
+#ifndef __NET_DCBNL_H__
+#define __NET_DCBNL_H__
+
+#include <linux/dcbnl.h>
+
+struct net_device;
+
+struct dcb_app_type {
+ int ifindex;
+ struct dcb_app app;
+ struct list_head list;
+ u8 dcbx;
+};
+
+int dcb_setapp(struct net_device *, struct dcb_app *);
+u8 dcb_getapp(struct net_device *, struct dcb_app *);
+int dcb_ieee_setapp(struct net_device *, struct dcb_app *);
+int dcb_ieee_delapp(struct net_device *, struct dcb_app *);
+u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *);
+
+struct dcb_ieee_app_prio_map {
+ u64 map[IEEE_8021QAZ_MAX_TCS];
+};
+void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
+ struct dcb_ieee_app_prio_map *p_map);
+
+struct dcb_ieee_app_dscp_map {
+ u8 map[64];
+};
+void dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
+ struct dcb_ieee_app_dscp_map *p_map);
+u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev);
+
+int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
+ u32 seq, u32 pid);
+int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
+ u32 seq, u32 pid);
+
+/*
+ * Ops struct for the netlink callbacks. Used by DCB-enabled drivers through
+ * the netdevice struct.
+ */
+struct dcbnl_rtnl_ops {
+ /* IEEE 802.1Qaz std */
+ int (*ieee_getets) (struct net_device *, struct ieee_ets *);
+ int (*ieee_setets) (struct net_device *, struct ieee_ets *);
+ int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *);
+ int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *);
+ int (*ieee_getqcn) (struct net_device *, struct ieee_qcn *);
+ int (*ieee_setqcn) (struct net_device *, struct ieee_qcn *);
+ int (*ieee_getqcnstats) (struct net_device *, struct ieee_qcn_stats *);
+ int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *);
+ int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *);
+ int (*ieee_getapp) (struct net_device *, struct dcb_app *);
+ int (*ieee_setapp) (struct net_device *, struct dcb_app *);
+ int (*ieee_delapp) (struct net_device *, struct dcb_app *);
+ int (*ieee_peer_getets) (struct net_device *, struct ieee_ets *);
+ int (*ieee_peer_getpfc) (struct net_device *, struct ieee_pfc *);
+
+ /* CEE std */
+ u8 (*getstate)(struct net_device *);
+ u8 (*setstate)(struct net_device *, u8);
+ void (*getpermhwaddr)(struct net_device *, u8 *);
+ void (*setpgtccfgtx)(struct net_device *, int, u8, u8, u8, u8);
+ void (*setpgbwgcfgtx)(struct net_device *, int, u8);
+ void (*setpgtccfgrx)(struct net_device *, int, u8, u8, u8, u8);
+ void (*setpgbwgcfgrx)(struct net_device *, int, u8);
+ void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);
+ void (*getpgbwgcfgtx)(struct net_device *, int, u8 *);
+ void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);
+ void (*getpgbwgcfgrx)(struct net_device *, int, u8 *);
+ void (*setpfccfg)(struct net_device *, int, u8);
+ void (*getpfccfg)(struct net_device *, int, u8 *);
+ u8 (*setall)(struct net_device *);
+ u8 (*getcap)(struct net_device *, int, u8 *);
+ int (*getnumtcs)(struct net_device *, int, u8 *);
+ int (*setnumtcs)(struct net_device *, int, u8);
+ u8 (*getpfcstate)(struct net_device *);
+ void (*setpfcstate)(struct net_device *, u8);
+ void (*getbcncfg)(struct net_device *, int, u32 *);
+ void (*setbcncfg)(struct net_device *, int, u32);
+ void (*getbcnrp)(struct net_device *, int, u8 *);
+ void (*setbcnrp)(struct net_device *, int, u8);
+ int (*setapp)(struct net_device *, u8, u16, u8);
+ int (*getapp)(struct net_device *, u8, u16);
+ u8 (*getfeatcfg)(struct net_device *, int, u8 *);
+ u8 (*setfeatcfg)(struct net_device *, int, u8);
+
+ /* DCBX configuration */
+ u8 (*getdcbx)(struct net_device *);
+ u8 (*setdcbx)(struct net_device *, u8);
+
+ /* peer apps */
+ int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *,
+ u16 *);
+ int (*peer_getapptable)(struct net_device *, struct dcb_app *);
+
+ /* CEE peer */
+ int (*cee_peer_getpg) (struct net_device *, struct cee_pg *);
+ int (*cee_peer_getpfc) (struct net_device *, struct cee_pfc *);
+
+ /* buffer settings */
+ int (*dcbnl_getbuffer)(struct net_device *, struct dcbnl_buffer *);
+ int (*dcbnl_setbuffer)(struct net_device *, struct dcbnl_buffer *);
+};
+
+#endif /* __NET_DCBNL_H__ */
diff --git a/include/net/devlink.h b/include/net/devlink.h
new file mode 100644
index 000000000..ba6b8b094
--- /dev/null
+++ b/include/net/devlink.h
@@ -0,0 +1,1906 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/net/devlink.h - Network physical device Netlink interface
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ */
+#ifndef _NET_DEVLINK_H_
+#define _NET_DEVLINK_H_
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/refcount.h>
+#include <net/net_namespace.h>
+#include <net/flow_offload.h>
+#include <uapi/linux/devlink.h>
+#include <linux/xarray.h>
+#include <linux/firmware.h>
+
+struct devlink;
+struct devlink_linecard;
+
+struct devlink_port_phys_attrs {
+ u32 port_number; /* Same value as "split group".
+ * A physical port which is visible to the user
+ * for a given port flavour.
+ */
+ u32 split_subport_number; /* If the port is split, this is the number of subport. */
+};
+
+/**
+ * struct devlink_port_pci_pf_attrs - devlink port's PCI PF attributes
+ * @controller: Associated controller number
+ * @pf: Associated PCI PF number for this port.
+ * @external: when set, indicates if a port is for an external controller
+ */
+struct devlink_port_pci_pf_attrs {
+ u32 controller;
+ u16 pf;
+ u8 external:1;
+};
+
+/**
+ * struct devlink_port_pci_vf_attrs - devlink port's PCI VF attributes
+ * @controller: Associated controller number
+ * @pf: Associated PCI PF number for this port.
+ * @vf: Associated PCI VF for of the PCI PF for this port.
+ * @external: when set, indicates if a port is for an external controller
+ */
+struct devlink_port_pci_vf_attrs {
+ u32 controller;
+ u16 pf;
+ u16 vf;
+ u8 external:1;
+};
+
+/**
+ * struct devlink_port_pci_sf_attrs - devlink port's PCI SF attributes
+ * @controller: Associated controller number
+ * @sf: Associated PCI SF for of the PCI PF for this port.
+ * @pf: Associated PCI PF number for this port.
+ * @external: when set, indicates if a port is for an external controller
+ */
+struct devlink_port_pci_sf_attrs {
+ u32 controller;
+ u32 sf;
+ u16 pf;
+ u8 external:1;
+};
+
+/**
+ * struct devlink_port_attrs - devlink port object
+ * @flavour: flavour of the port
+ * @split: indicates if this is split port
+ * @splittable: indicates if the port can be split.
+ * @lanes: maximum number of lanes the port supports. 0 value is not passed to netlink.
+ * @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL
+ * @phys: physical port attributes
+ * @pci_pf: PCI PF port attributes
+ * @pci_vf: PCI VF port attributes
+ * @pci_sf: PCI SF port attributes
+ */
+struct devlink_port_attrs {
+ u8 split:1,
+ splittable:1;
+ u32 lanes;
+ enum devlink_port_flavour flavour;
+ struct netdev_phys_item_id switch_id;
+ union {
+ struct devlink_port_phys_attrs phys;
+ struct devlink_port_pci_pf_attrs pci_pf;
+ struct devlink_port_pci_vf_attrs pci_vf;
+ struct devlink_port_pci_sf_attrs pci_sf;
+ };
+};
+
+struct devlink_rate {
+ struct list_head list;
+ enum devlink_rate_type type;
+ struct devlink *devlink;
+ void *priv;
+ u64 tx_share;
+ u64 tx_max;
+
+ struct devlink_rate *parent;
+ union {
+ struct devlink_port *devlink_port;
+ struct {
+ char *name;
+ refcount_t refcnt;
+ };
+ };
+};
+
+struct devlink_port {
+ struct list_head list;
+ struct list_head region_list;
+ struct devlink *devlink;
+ unsigned int index;
+ spinlock_t type_lock; /* Protects type and type_dev
+ * pointer consistency.
+ */
+ enum devlink_port_type type;
+ enum devlink_port_type desired_type;
+ void *type_dev;
+ struct devlink_port_attrs attrs;
+ u8 attrs_set:1,
+ switch_port:1,
+ registered:1,
+ initialized:1;
+ struct delayed_work type_warn_dw;
+ struct list_head reporter_list;
+ struct mutex reporters_lock; /* Protects reporter_list */
+
+ struct devlink_rate *devlink_rate;
+ struct devlink_linecard *linecard;
+};
+
+struct devlink_port_new_attrs {
+ enum devlink_port_flavour flavour;
+ unsigned int port_index;
+ u32 controller;
+ u32 sfnum;
+ u16 pfnum;
+ u8 port_index_valid:1,
+ controller_valid:1,
+ sfnum_valid:1;
+};
+
+/**
+ * struct devlink_linecard_ops - Linecard operations
+ * @provision: callback to provision the linecard slot with certain
+ * type of linecard. As a result of this operation,
+ * driver is expected to eventually (could be after
+ * the function call returns) call one of:
+ * devlink_linecard_provision_set()
+ * devlink_linecard_provision_fail()
+ * @unprovision: callback to unprovision the linecard slot. As a result
+ * of this operation, driver is expected to eventually
+ * (could be after the function call returns) call
+ * devlink_linecard_provision_clear()
+ * devlink_linecard_provision_fail()
+ * @same_provision: callback to ask the driver if linecard is already
+ * provisioned in the same way user asks this linecard to be
+ * provisioned.
+ * @types_count: callback to get number of supported types
+ * @types_get: callback to get next type in list
+ */
+struct devlink_linecard_ops {
+ int (*provision)(struct devlink_linecard *linecard, void *priv,
+ const char *type, const void *type_priv,
+ struct netlink_ext_ack *extack);
+ int (*unprovision)(struct devlink_linecard *linecard, void *priv,
+ struct netlink_ext_ack *extack);
+ bool (*same_provision)(struct devlink_linecard *linecard, void *priv,
+ const char *type, const void *type_priv);
+ unsigned int (*types_count)(struct devlink_linecard *linecard,
+ void *priv);
+ void (*types_get)(struct devlink_linecard *linecard,
+ void *priv, unsigned int index, const char **type,
+ const void **type_priv);
+};
+
+struct devlink_sb_pool_info {
+ enum devlink_sb_pool_type pool_type;
+ u32 size;
+ enum devlink_sb_threshold_type threshold_type;
+ u32 cell_size;
+};
+
+/**
+ * struct devlink_dpipe_field - dpipe field object
+ * @name: field name
+ * @id: index inside the headers field array
+ * @bitwidth: bitwidth
+ * @mapping_type: mapping type
+ */
+struct devlink_dpipe_field {
+ const char *name;
+ unsigned int id;
+ unsigned int bitwidth;
+ enum devlink_dpipe_field_mapping_type mapping_type;
+};
+
+/**
+ * struct devlink_dpipe_header - dpipe header object
+ * @name: header name
+ * @id: index, global/local detrmined by global bit
+ * @fields: fields
+ * @fields_count: number of fields
+ * @global: indicates if header is shared like most protocol header
+ * or driver specific
+ */
+struct devlink_dpipe_header {
+ const char *name;
+ unsigned int id;
+ struct devlink_dpipe_field *fields;
+ unsigned int fields_count;
+ bool global;
+};
+
+/**
+ * struct devlink_dpipe_match - represents match operation
+ * @type: type of match
+ * @header_index: header index (packets can have several headers of same
+ * type like in case of tunnels)
+ * @header: header
+ * @fieled_id: field index
+ */
+struct devlink_dpipe_match {
+ enum devlink_dpipe_match_type type;
+ unsigned int header_index;
+ struct devlink_dpipe_header *header;
+ unsigned int field_id;
+};
+
+/**
+ * struct devlink_dpipe_action - represents action operation
+ * @type: type of action
+ * @header_index: header index (packets can have several headers of same
+ * type like in case of tunnels)
+ * @header: header
+ * @fieled_id: field index
+ */
+struct devlink_dpipe_action {
+ enum devlink_dpipe_action_type type;
+ unsigned int header_index;
+ struct devlink_dpipe_header *header;
+ unsigned int field_id;
+};
+
+/**
+ * struct devlink_dpipe_value - represents value of match/action
+ * @action: action
+ * @match: match
+ * @mapping_value: in case the field has some mapping this value
+ * specified the mapping value
+ * @mapping_valid: specify if mapping value is valid
+ * @value_size: value size
+ * @value: value
+ * @mask: bit mask
+ */
+struct devlink_dpipe_value {
+ union {
+ struct devlink_dpipe_action *action;
+ struct devlink_dpipe_match *match;
+ };
+ unsigned int mapping_value;
+ bool mapping_valid;
+ unsigned int value_size;
+ void *value;
+ void *mask;
+};
+
+/**
+ * struct devlink_dpipe_entry - table entry object
+ * @index: index of the entry in the table
+ * @match_values: match values
+ * @matche_values_count: count of matches tuples
+ * @action_values: actions values
+ * @action_values_count: count of actions values
+ * @counter: value of counter
+ * @counter_valid: Specify if value is valid from hardware
+ */
+struct devlink_dpipe_entry {
+ u64 index;
+ struct devlink_dpipe_value *match_values;
+ unsigned int match_values_count;
+ struct devlink_dpipe_value *action_values;
+ unsigned int action_values_count;
+ u64 counter;
+ bool counter_valid;
+};
+
+/**
+ * struct devlink_dpipe_dump_ctx - context provided to driver in order
+ * to dump
+ * @info: info
+ * @cmd: devlink command
+ * @skb: skb
+ * @nest: top attribute
+ * @hdr: hdr
+ */
+struct devlink_dpipe_dump_ctx {
+ struct genl_info *info;
+ enum devlink_command cmd;
+ struct sk_buff *skb;
+ struct nlattr *nest;
+ void *hdr;
+};
+
+struct devlink_dpipe_table_ops;
+
+/**
+ * struct devlink_dpipe_table - table object
+ * @priv: private
+ * @name: table name
+ * @counters_enabled: indicates if counters are active
+ * @counter_control_extern: indicates if counter control is in dpipe or
+ * external tool
+ * @resource_valid: Indicate that the resource id is valid
+ * @resource_id: relative resource this table is related to
+ * @resource_units: number of resource's unit consumed per table's entry
+ * @table_ops: table operations
+ * @rcu: rcu
+ */
+struct devlink_dpipe_table {
+ void *priv;
+ struct list_head list;
+ const char *name;
+ bool counters_enabled;
+ bool counter_control_extern;
+ bool resource_valid;
+ u64 resource_id;
+ u64 resource_units;
+ struct devlink_dpipe_table_ops *table_ops;
+ struct rcu_head rcu;
+};
+
+/**
+ * struct devlink_dpipe_table_ops - dpipe_table ops
+ * @actions_dump - dumps all tables actions
+ * @matches_dump - dumps all tables matches
+ * @entries_dump - dumps all active entries in the table
+ * @counters_set_update - when changing the counter status hardware sync
+ * maybe needed to allocate/free counter related
+ * resources
+ * @size_get - get size
+ */
+struct devlink_dpipe_table_ops {
+ int (*actions_dump)(void *priv, struct sk_buff *skb);
+ int (*matches_dump)(void *priv, struct sk_buff *skb);
+ int (*entries_dump)(void *priv, bool counters_enabled,
+ struct devlink_dpipe_dump_ctx *dump_ctx);
+ int (*counters_set_update)(void *priv, bool enable);
+ u64 (*size_get)(void *priv);
+};
+
+/**
+ * struct devlink_dpipe_headers - dpipe headers
+ * @headers - header array can be shared (global bit) or driver specific
+ * @headers_count - count of headers
+ */
+struct devlink_dpipe_headers {
+ struct devlink_dpipe_header **headers;
+ unsigned int headers_count;
+};
+
+/**
+ * struct devlink_resource_size_params - resource's size parameters
+ * @size_min: minimum size which can be set
+ * @size_max: maximum size which can be set
+ * @size_granularity: size granularity
+ * @size_unit: resource's basic unit
+ */
+struct devlink_resource_size_params {
+ u64 size_min;
+ u64 size_max;
+ u64 size_granularity;
+ enum devlink_resource_unit unit;
+};
+
+static inline void
+devlink_resource_size_params_init(struct devlink_resource_size_params *size_params,
+ u64 size_min, u64 size_max,
+ u64 size_granularity,
+ enum devlink_resource_unit unit)
+{
+ size_params->size_min = size_min;
+ size_params->size_max = size_max;
+ size_params->size_granularity = size_granularity;
+ size_params->unit = unit;
+}
+
+typedef u64 devlink_resource_occ_get_t(void *priv);
+
+#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
+
+#define DEVLINK_RESOURCE_GENERIC_NAME_PORTS "physical_ports"
+
+#define __DEVLINK_PARAM_MAX_STRING_VALUE 32
+enum devlink_param_type {
+ DEVLINK_PARAM_TYPE_U8,
+ DEVLINK_PARAM_TYPE_U16,
+ DEVLINK_PARAM_TYPE_U32,
+ DEVLINK_PARAM_TYPE_STRING,
+ DEVLINK_PARAM_TYPE_BOOL,
+};
+
+union devlink_param_value {
+ u8 vu8;
+ u16 vu16;
+ u32 vu32;
+ char vstr[__DEVLINK_PARAM_MAX_STRING_VALUE];
+ bool vbool;
+};
+
+struct devlink_param_gset_ctx {
+ union devlink_param_value val;
+ enum devlink_param_cmode cmode;
+};
+
+/**
+ * struct devlink_flash_notify - devlink dev flash notify data
+ * @status_msg: current status string
+ * @component: firmware component being updated
+ * @done: amount of work completed of total amount
+ * @total: amount of work expected to be done
+ * @timeout: expected max timeout in seconds
+ *
+ * These are values to be given to userland to be displayed in order
+ * to show current activity in a firmware update process.
+ */
+struct devlink_flash_notify {
+ const char *status_msg;
+ const char *component;
+ unsigned long done;
+ unsigned long total;
+ unsigned long timeout;
+};
+
+/**
+ * struct devlink_param - devlink configuration parameter data
+ * @name: name of the parameter
+ * @generic: indicates if the parameter is generic or driver specific
+ * @type: parameter type
+ * @supported_cmodes: bitmap of supported configuration modes
+ * @get: get parameter value, used for runtime and permanent
+ * configuration modes
+ * @set: set parameter value, used for runtime and permanent
+ * configuration modes
+ * @validate: validate input value is applicable (within value range, etc.)
+ *
+ * This struct should be used by the driver to fill the data for
+ * a parameter it registers.
+ */
+struct devlink_param {
+ u32 id;
+ const char *name;
+ bool generic;
+ enum devlink_param_type type;
+ unsigned long supported_cmodes;
+ int (*get)(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+ int (*set)(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+ int (*validate)(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack);
+};
+
+struct devlink_param_item {
+ struct list_head list;
+ const struct devlink_param *param;
+ union devlink_param_value driverinit_value;
+ bool driverinit_value_valid;
+};
+
+enum devlink_param_generic_id {
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
+ DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+
+ /* add new param generic ids above here*/
+ __DEVLINK_PARAM_GENERIC_ID_MAX,
+ DEVLINK_PARAM_GENERIC_ID_MAX = __DEVLINK_PARAM_GENERIC_ID_MAX - 1,
+};
+
+#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME "internal_error_reset"
+#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_MAX_MACS_NAME "max_macs"
+#define DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME "enable_sriov"
+#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME "region_snapshot_enable"
+#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_IGNORE_ARI_NAME "ignore_ari"
+#define DEVLINK_PARAM_GENERIC_IGNORE_ARI_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_NAME "msix_vec_per_pf_max"
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME "msix_vec_per_pf_min"
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME "fw_load_policy"
+#define DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE DEVLINK_PARAM_TYPE_U8
+
+#define DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME \
+ "reset_dev_on_drv_probe"
+#define DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE DEVLINK_PARAM_TYPE_U8
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME "enable_roce"
+#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_NAME "enable_remote_dev_reset"
+#define DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_ETH_NAME "enable_eth"
+#define DEVLINK_PARAM_GENERIC_ENABLE_ETH_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_RDMA_NAME "enable_rdma"
+#define DEVLINK_PARAM_GENERIC_ENABLE_RDMA_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_VNET_NAME "enable_vnet"
+#define DEVLINK_PARAM_GENERIC_ENABLE_VNET_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME "enable_iwarp"
+#define DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_NAME "io_eq_size"
+#define DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME "event_eq_size"
+#define DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
+{ \
+ .id = DEVLINK_PARAM_GENERIC_ID_##_id, \
+ .name = DEVLINK_PARAM_GENERIC_##_id##_NAME, \
+ .type = DEVLINK_PARAM_GENERIC_##_id##_TYPE, \
+ .generic = true, \
+ .supported_cmodes = _cmodes, \
+ .get = _get, \
+ .set = _set, \
+ .validate = _validate, \
+}
+
+#define DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, _get, _set, _validate) \
+{ \
+ .id = _id, \
+ .name = _name, \
+ .type = _type, \
+ .supported_cmodes = _cmodes, \
+ .get = _get, \
+ .set = _set, \
+ .validate = _validate, \
+}
+
+/* Part number, identifier of board design */
+#define DEVLINK_INFO_VERSION_GENERIC_BOARD_ID "board.id"
+/* Revision of board design */
+#define DEVLINK_INFO_VERSION_GENERIC_BOARD_REV "board.rev"
+/* Maker of the board */
+#define DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE "board.manufacture"
+
+/* Part number, identifier of asic design */
+#define DEVLINK_INFO_VERSION_GENERIC_ASIC_ID "asic.id"
+/* Revision of asic design */
+#define DEVLINK_INFO_VERSION_GENERIC_ASIC_REV "asic.rev"
+
+/* Overall FW version */
+#define DEVLINK_INFO_VERSION_GENERIC_FW "fw"
+/* Control processor FW version */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT "fw.mgmt"
+/* FW interface specification version */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API "fw.mgmt.api"
+/* Data path microcode controlling high-speed packet processing */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_APP "fw.app"
+/* UNDI software version */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_UNDI "fw.undi"
+/* NCSI support/handler version */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_NCSI "fw.ncsi"
+/* FW parameter set id */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_PSID "fw.psid"
+/* RoCE FW version */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_ROCE "fw.roce"
+/* Firmware bundle identifier */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id"
+
+/**
+ * struct devlink_flash_update_params - Flash Update parameters
+ * @fw: pointer to the firmware data to update from
+ * @component: the flash component to update
+ *
+ * With the exception of fw, drivers must opt-in to parameters by
+ * setting the appropriate bit in the supported_flash_update_params field in
+ * their devlink_ops structure.
+ */
+struct devlink_flash_update_params {
+ const struct firmware *fw;
+ const char *component;
+ u32 overwrite_mask;
+};
+
+#define DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK BIT(0)
+
+struct devlink_region;
+struct devlink_info_req;
+
+/**
+ * struct devlink_region_ops - Region operations
+ * @name: region name
+ * @destructor: callback used to free snapshot memory when deleting
+ * @snapshot: callback to request an immediate snapshot. On success,
+ * the data variable must be updated to point to the snapshot data.
+ * The function will be called while the devlink instance lock is
+ * held.
+ * @priv: Pointer to driver private data for the region operation
+ */
+struct devlink_region_ops {
+ const char *name;
+ void (*destructor)(const void *data);
+ int (*snapshot)(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data);
+ void *priv;
+};
+
+/**
+ * struct devlink_port_region_ops - Region operations for a port
+ * @name: region name
+ * @destructor: callback used to free snapshot memory when deleting
+ * @snapshot: callback to request an immediate snapshot. On success,
+ * the data variable must be updated to point to the snapshot data.
+ * The function will be called while the devlink instance lock is
+ * held.
+ * @priv: Pointer to driver private data for the region operation
+ */
+struct devlink_port_region_ops {
+ const char *name;
+ void (*destructor)(const void *data);
+ int (*snapshot)(struct devlink_port *port,
+ const struct devlink_port_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data);
+ void *priv;
+};
+
+struct devlink_fmsg;
+struct devlink_health_reporter;
+
+enum devlink_health_reporter_state {
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY,
+ DEVLINK_HEALTH_REPORTER_STATE_ERROR,
+};
+
+/**
+ * struct devlink_health_reporter_ops - Reporter operations
+ * @name: reporter name
+ * @recover: callback to recover from reported error
+ * if priv_ctx is NULL, run a full recover
+ * @dump: callback to dump an object
+ * if priv_ctx is NULL, run a full dump
+ * @diagnose: callback to diagnose the current status
+ * @test: callback to trigger a test event
+ */
+
+struct devlink_health_reporter_ops {
+ char *name;
+ int (*recover)(struct devlink_health_reporter *reporter,
+ void *priv_ctx, struct netlink_ext_ack *extack);
+ int (*dump)(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack);
+ int (*diagnose)(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack);
+ int (*test)(struct devlink_health_reporter *reporter,
+ struct netlink_ext_ack *extack);
+};
+
+/**
+ * struct devlink_trap_metadata - Packet trap metadata.
+ * @trap_name: Trap name.
+ * @trap_group_name: Trap group name.
+ * @input_dev: Input netdevice.
+ * @dev_tracker: refcount tracker for @input_dev.
+ * @fa_cookie: Flow action user cookie.
+ * @trap_type: Trap type.
+ */
+struct devlink_trap_metadata {
+ const char *trap_name;
+ const char *trap_group_name;
+
+ struct net_device *input_dev;
+ netdevice_tracker dev_tracker;
+
+ const struct flow_action_cookie *fa_cookie;
+ enum devlink_trap_type trap_type;
+};
+
+/**
+ * struct devlink_trap_policer - Immutable packet trap policer attributes.
+ * @id: Policer identifier.
+ * @init_rate: Initial rate in packets / sec.
+ * @init_burst: Initial burst size in packets.
+ * @max_rate: Maximum rate.
+ * @min_rate: Minimum rate.
+ * @max_burst: Maximum burst size.
+ * @min_burst: Minimum burst size.
+ *
+ * Describes immutable attributes of packet trap policers that drivers register
+ * with devlink.
+ */
+struct devlink_trap_policer {
+ u32 id;
+ u64 init_rate;
+ u64 init_burst;
+ u64 max_rate;
+ u64 min_rate;
+ u64 max_burst;
+ u64 min_burst;
+};
+
+/**
+ * struct devlink_trap_group - Immutable packet trap group attributes.
+ * @name: Trap group name.
+ * @id: Trap group identifier.
+ * @generic: Whether the trap group is generic or not.
+ * @init_policer_id: Initial policer identifier.
+ *
+ * Describes immutable attributes of packet trap groups that drivers register
+ * with devlink.
+ */
+struct devlink_trap_group {
+ const char *name;
+ u16 id;
+ bool generic;
+ u32 init_policer_id;
+};
+
+#define DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT BIT(0)
+#define DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE BIT(1)
+
+/**
+ * struct devlink_trap - Immutable packet trap attributes.
+ * @type: Trap type.
+ * @init_action: Initial trap action.
+ * @generic: Whether the trap is generic or not.
+ * @id: Trap identifier.
+ * @name: Trap name.
+ * @init_group_id: Initial group identifier.
+ * @metadata_cap: Metadata types that can be provided by the trap.
+ *
+ * Describes immutable attributes of packet traps that drivers register with
+ * devlink.
+ */
+struct devlink_trap {
+ enum devlink_trap_type type;
+ enum devlink_trap_action init_action;
+ bool generic;
+ u16 id;
+ const char *name;
+ u16 init_group_id;
+ u32 metadata_cap;
+};
+
+/* All traps must be documented in
+ * Documentation/networking/devlink/devlink-trap.rst
+ */
+enum devlink_trap_generic_id {
+ DEVLINK_TRAP_GENERIC_ID_SMAC_MC,
+ DEVLINK_TRAP_GENERIC_ID_VLAN_TAG_MISMATCH,
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_STP_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
+ DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_TTL_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_TAIL_DROP,
+ DEVLINK_TRAP_GENERIC_ID_NON_IP_PACKET,
+ DEVLINK_TRAP_GENERIC_ID_UC_DIP_MC_DMAC,
+ DEVLINK_TRAP_GENERIC_ID_DIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_SIP_MC,
+ DEVLINK_TRAP_GENERIC_ID_SIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_CORRUPTED_IP_HDR,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_SIP_BC,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_RESERVED_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_MTU_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
+ DEVLINK_TRAP_GENERIC_ID_RPF,
+ DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS,
+ DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE,
+ DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC,
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_FLOW_ACTION_DROP,
+ DEVLINK_TRAP_GENERIC_ID_EGRESS_FLOW_ACTION_DROP,
+ DEVLINK_TRAP_GENERIC_ID_STP,
+ DEVLINK_TRAP_GENERIC_ID_LACP,
+ DEVLINK_TRAP_GENERIC_ID_LLDP,
+ DEVLINK_TRAP_GENERIC_ID_IGMP_QUERY,
+ DEVLINK_TRAP_GENERIC_ID_IGMP_V1_REPORT,
+ DEVLINK_TRAP_GENERIC_ID_IGMP_V2_REPORT,
+ DEVLINK_TRAP_GENERIC_ID_IGMP_V3_REPORT,
+ DEVLINK_TRAP_GENERIC_ID_IGMP_V2_LEAVE,
+ DEVLINK_TRAP_GENERIC_ID_MLD_QUERY,
+ DEVLINK_TRAP_GENERIC_ID_MLD_V1_REPORT,
+ DEVLINK_TRAP_GENERIC_ID_MLD_V2_REPORT,
+ DEVLINK_TRAP_GENERIC_ID_MLD_V1_DONE,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_DHCP,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_DHCP,
+ DEVLINK_TRAP_GENERIC_ID_ARP_REQUEST,
+ DEVLINK_TRAP_GENERIC_ID_ARP_RESPONSE,
+ DEVLINK_TRAP_GENERIC_ID_ARP_OVERLAY,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_NEIGH_SOLICIT,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_NEIGH_ADVERT,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_BFD,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_BFD,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_OSPF,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_OSPF,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_BGP,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_BGP,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_VRRP,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_VRRP,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_PIM,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_PIM,
+ DEVLINK_TRAP_GENERIC_ID_UC_LB,
+ DEVLINK_TRAP_GENERIC_ID_LOCAL_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_EXTERNAL_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_UC_DIP_LINK_LOCAL_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_DIP_ALL_NODES,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_DIP_ALL_ROUTERS,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_SOLICIT,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_ADVERT,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_REDIRECT,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_ROUTER_ALERT,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_ALERT,
+ DEVLINK_TRAP_GENERIC_ID_PTP_EVENT,
+ DEVLINK_TRAP_GENERIC_ID_PTP_GENERAL,
+ DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_SAMPLE,
+ DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_TRAP,
+ DEVLINK_TRAP_GENERIC_ID_EARLY_DROP,
+ DEVLINK_TRAP_GENERIC_ID_VXLAN_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_LLC_SNAP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_VLAN_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_PPPOE_PPP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_MPLS_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_ARP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_IP_1_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_IP_N_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_GRE_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_UDP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_TCP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_IPSEC_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_SCTP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_DCCP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_GTP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_ESP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_NEXTHOP,
+ DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER,
+
+ /* Add new generic trap IDs above */
+ __DEVLINK_TRAP_GENERIC_ID_MAX,
+ DEVLINK_TRAP_GENERIC_ID_MAX = __DEVLINK_TRAP_GENERIC_ID_MAX - 1,
+};
+
+/* All trap groups must be documented in
+ * Documentation/networking/devlink/devlink-trap.rst
+ */
+enum devlink_trap_group_generic_id {
+ DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_L3_EXCEPTIONS,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_DROPS,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_STP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_LACP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_LLDP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_MC_SNOOPING,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_DHCP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_NEIGH_DISCOVERY,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_BFD,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_OSPF,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_BGP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_VRRP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_PIM,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_UC_LB,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_LOCAL_DELIVERY,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_EXTERNAL_DELIVERY,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_IPV6,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_EVENT,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_SAMPLE,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_TRAP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_PARSER_ERROR_DROPS,
+
+ /* Add new generic trap group IDs above */
+ __DEVLINK_TRAP_GROUP_GENERIC_ID_MAX,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_MAX =
+ __DEVLINK_TRAP_GROUP_GENERIC_ID_MAX - 1,
+};
+
+#define DEVLINK_TRAP_GENERIC_NAME_SMAC_MC \
+ "source_mac_is_multicast"
+#define DEVLINK_TRAP_GENERIC_NAME_VLAN_TAG_MISMATCH \
+ "vlan_tag_mismatch"
+#define DEVLINK_TRAP_GENERIC_NAME_INGRESS_VLAN_FILTER \
+ "ingress_vlan_filter"
+#define DEVLINK_TRAP_GENERIC_NAME_INGRESS_STP_FILTER \
+ "ingress_spanning_tree_filter"
+#define DEVLINK_TRAP_GENERIC_NAME_EMPTY_TX_LIST \
+ "port_list_is_empty"
+#define DEVLINK_TRAP_GENERIC_NAME_PORT_LOOPBACK_FILTER \
+ "port_loopback_filter"
+#define DEVLINK_TRAP_GENERIC_NAME_BLACKHOLE_ROUTE \
+ "blackhole_route"
+#define DEVLINK_TRAP_GENERIC_NAME_TTL_ERROR \
+ "ttl_value_is_too_small"
+#define DEVLINK_TRAP_GENERIC_NAME_TAIL_DROP \
+ "tail_drop"
+#define DEVLINK_TRAP_GENERIC_NAME_NON_IP_PACKET \
+ "non_ip"
+#define DEVLINK_TRAP_GENERIC_NAME_UC_DIP_MC_DMAC \
+ "uc_dip_over_mc_dmac"
+#define DEVLINK_TRAP_GENERIC_NAME_DIP_LB \
+ "dip_is_loopback_address"
+#define DEVLINK_TRAP_GENERIC_NAME_SIP_MC \
+ "sip_is_mc"
+#define DEVLINK_TRAP_GENERIC_NAME_SIP_LB \
+ "sip_is_loopback_address"
+#define DEVLINK_TRAP_GENERIC_NAME_CORRUPTED_IP_HDR \
+ "ip_header_corrupted"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_SIP_BC \
+ "ipv4_sip_is_limited_bc"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_MC_DIP_RESERVED_SCOPE \
+ "ipv6_mc_dip_reserved_scope"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE \
+ "ipv6_mc_dip_interface_local_scope"
+#define DEVLINK_TRAP_GENERIC_NAME_MTU_ERROR \
+ "mtu_value_is_too_small"
+#define DEVLINK_TRAP_GENERIC_NAME_UNRESOLVED_NEIGH \
+ "unresolved_neigh"
+#define DEVLINK_TRAP_GENERIC_NAME_RPF \
+ "mc_reverse_path_forwarding"
+#define DEVLINK_TRAP_GENERIC_NAME_REJECT_ROUTE \
+ "reject_route"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_LPM_UNICAST_MISS \
+ "ipv4_lpm_miss"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_LPM_UNICAST_MISS \
+ "ipv6_lpm_miss"
+#define DEVLINK_TRAP_GENERIC_NAME_NON_ROUTABLE \
+ "non_routable_packet"
+#define DEVLINK_TRAP_GENERIC_NAME_DECAP_ERROR \
+ "decap_error"
+#define DEVLINK_TRAP_GENERIC_NAME_OVERLAY_SMAC_MC \
+ "overlay_smac_is_mc"
+#define DEVLINK_TRAP_GENERIC_NAME_INGRESS_FLOW_ACTION_DROP \
+ "ingress_flow_action_drop"
+#define DEVLINK_TRAP_GENERIC_NAME_EGRESS_FLOW_ACTION_DROP \
+ "egress_flow_action_drop"
+#define DEVLINK_TRAP_GENERIC_NAME_STP \
+ "stp"
+#define DEVLINK_TRAP_GENERIC_NAME_LACP \
+ "lacp"
+#define DEVLINK_TRAP_GENERIC_NAME_LLDP \
+ "lldp"
+#define DEVLINK_TRAP_GENERIC_NAME_IGMP_QUERY \
+ "igmp_query"
+#define DEVLINK_TRAP_GENERIC_NAME_IGMP_V1_REPORT \
+ "igmp_v1_report"
+#define DEVLINK_TRAP_GENERIC_NAME_IGMP_V2_REPORT \
+ "igmp_v2_report"
+#define DEVLINK_TRAP_GENERIC_NAME_IGMP_V3_REPORT \
+ "igmp_v3_report"
+#define DEVLINK_TRAP_GENERIC_NAME_IGMP_V2_LEAVE \
+ "igmp_v2_leave"
+#define DEVLINK_TRAP_GENERIC_NAME_MLD_QUERY \
+ "mld_query"
+#define DEVLINK_TRAP_GENERIC_NAME_MLD_V1_REPORT \
+ "mld_v1_report"
+#define DEVLINK_TRAP_GENERIC_NAME_MLD_V2_REPORT \
+ "mld_v2_report"
+#define DEVLINK_TRAP_GENERIC_NAME_MLD_V1_DONE \
+ "mld_v1_done"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_DHCP \
+ "ipv4_dhcp"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_DHCP \
+ "ipv6_dhcp"
+#define DEVLINK_TRAP_GENERIC_NAME_ARP_REQUEST \
+ "arp_request"
+#define DEVLINK_TRAP_GENERIC_NAME_ARP_RESPONSE \
+ "arp_response"
+#define DEVLINK_TRAP_GENERIC_NAME_ARP_OVERLAY \
+ "arp_overlay"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_NEIGH_SOLICIT \
+ "ipv6_neigh_solicit"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_NEIGH_ADVERT \
+ "ipv6_neigh_advert"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_BFD \
+ "ipv4_bfd"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_BFD \
+ "ipv6_bfd"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_OSPF \
+ "ipv4_ospf"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_OSPF \
+ "ipv6_ospf"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_BGP \
+ "ipv4_bgp"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_BGP \
+ "ipv6_bgp"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_VRRP \
+ "ipv4_vrrp"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_VRRP \
+ "ipv6_vrrp"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_PIM \
+ "ipv4_pim"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_PIM \
+ "ipv6_pim"
+#define DEVLINK_TRAP_GENERIC_NAME_UC_LB \
+ "uc_loopback"
+#define DEVLINK_TRAP_GENERIC_NAME_LOCAL_ROUTE \
+ "local_route"
+#define DEVLINK_TRAP_GENERIC_NAME_EXTERNAL_ROUTE \
+ "external_route"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_UC_DIP_LINK_LOCAL_SCOPE \
+ "ipv6_uc_dip_link_local_scope"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_DIP_ALL_NODES \
+ "ipv6_dip_all_nodes"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_DIP_ALL_ROUTERS \
+ "ipv6_dip_all_routers"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_ROUTER_SOLICIT \
+ "ipv6_router_solicit"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_ROUTER_ADVERT \
+ "ipv6_router_advert"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_REDIRECT \
+ "ipv6_redirect"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_ROUTER_ALERT \
+ "ipv4_router_alert"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_ROUTER_ALERT \
+ "ipv6_router_alert"
+#define DEVLINK_TRAP_GENERIC_NAME_PTP_EVENT \
+ "ptp_event"
+#define DEVLINK_TRAP_GENERIC_NAME_PTP_GENERAL \
+ "ptp_general"
+#define DEVLINK_TRAP_GENERIC_NAME_FLOW_ACTION_SAMPLE \
+ "flow_action_sample"
+#define DEVLINK_TRAP_GENERIC_NAME_FLOW_ACTION_TRAP \
+ "flow_action_trap"
+#define DEVLINK_TRAP_GENERIC_NAME_EARLY_DROP \
+ "early_drop"
+#define DEVLINK_TRAP_GENERIC_NAME_VXLAN_PARSING \
+ "vxlan_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_LLC_SNAP_PARSING \
+ "llc_snap_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_VLAN_PARSING \
+ "vlan_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_PPPOE_PPP_PARSING \
+ "pppoe_ppp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_MPLS_PARSING \
+ "mpls_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_ARP_PARSING \
+ "arp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_IP_1_PARSING \
+ "ip_1_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_IP_N_PARSING \
+ "ip_n_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_GRE_PARSING \
+ "gre_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_UDP_PARSING \
+ "udp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_TCP_PARSING \
+ "tcp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_IPSEC_PARSING \
+ "ipsec_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_SCTP_PARSING \
+ "sctp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_DCCP_PARSING \
+ "dccp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_GTP_PARSING \
+ "gtp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_ESP_PARSING \
+ "esp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_BLACKHOLE_NEXTHOP \
+ "blackhole_nexthop"
+#define DEVLINK_TRAP_GENERIC_NAME_DMAC_FILTER \
+ "dmac_filter"
+
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \
+ "l2_drops"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L3_DROPS \
+ "l3_drops"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L3_EXCEPTIONS \
+ "l3_exceptions"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_BUFFER_DROPS \
+ "buffer_drops"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_TUNNEL_DROPS \
+ "tunnel_drops"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_ACL_DROPS \
+ "acl_drops"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_STP \
+ "stp"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_LACP \
+ "lacp"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_LLDP \
+ "lldp"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_MC_SNOOPING \
+ "mc_snooping"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_DHCP \
+ "dhcp"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_NEIGH_DISCOVERY \
+ "neigh_discovery"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_BFD \
+ "bfd"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_OSPF \
+ "ospf"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_BGP \
+ "bgp"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_VRRP \
+ "vrrp"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PIM \
+ "pim"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_UC_LB \
+ "uc_loopback"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_LOCAL_DELIVERY \
+ "local_delivery"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_EXTERNAL_DELIVERY \
+ "external_delivery"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_IPV6 \
+ "ipv6"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PTP_EVENT \
+ "ptp_event"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PTP_GENERAL \
+ "ptp_general"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_ACL_SAMPLE \
+ "acl_sample"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_ACL_TRAP \
+ "acl_trap"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PARSER_ERROR_DROPS \
+ "parser_error_drops"
+
+#define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group_id, \
+ _metadata_cap) \
+ { \
+ .type = DEVLINK_TRAP_TYPE_##_type, \
+ .init_action = DEVLINK_TRAP_ACTION_##_init_action, \
+ .generic = true, \
+ .id = DEVLINK_TRAP_GENERIC_ID_##_id, \
+ .name = DEVLINK_TRAP_GENERIC_NAME_##_id, \
+ .init_group_id = _group_id, \
+ .metadata_cap = _metadata_cap, \
+ }
+
+#define DEVLINK_TRAP_DRIVER(_type, _init_action, _id, _name, _group_id, \
+ _metadata_cap) \
+ { \
+ .type = DEVLINK_TRAP_TYPE_##_type, \
+ .init_action = DEVLINK_TRAP_ACTION_##_init_action, \
+ .generic = false, \
+ .id = _id, \
+ .name = _name, \
+ .init_group_id = _group_id, \
+ .metadata_cap = _metadata_cap, \
+ }
+
+#define DEVLINK_TRAP_GROUP_GENERIC(_id, _policer_id) \
+ { \
+ .name = DEVLINK_TRAP_GROUP_GENERIC_NAME_##_id, \
+ .id = DEVLINK_TRAP_GROUP_GENERIC_ID_##_id, \
+ .generic = true, \
+ .init_policer_id = _policer_id, \
+ }
+
+#define DEVLINK_TRAP_POLICER(_id, _rate, _burst, _max_rate, _min_rate, \
+ _max_burst, _min_burst) \
+ { \
+ .id = _id, \
+ .init_rate = _rate, \
+ .init_burst = _burst, \
+ .max_rate = _max_rate, \
+ .min_rate = _min_rate, \
+ .max_burst = _max_burst, \
+ .min_burst = _min_burst, \
+ }
+
+enum {
+ /* device supports reload operations */
+ DEVLINK_F_RELOAD = 1UL << 0,
+};
+
+struct devlink_ops {
+ /**
+ * @supported_flash_update_params:
+ * mask of parameters supported by the driver's .flash_update
+ * implemementation.
+ */
+ u32 supported_flash_update_params;
+ unsigned long reload_actions;
+ unsigned long reload_limits;
+ int (*reload_down)(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack);
+ int (*reload_up)(struct devlink *devlink, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
+ struct netlink_ext_ack *extack);
+ int (*port_type_set)(struct devlink_port *devlink_port,
+ enum devlink_port_type port_type);
+ int (*port_split)(struct devlink *devlink, struct devlink_port *port,
+ unsigned int count, struct netlink_ext_ack *extack);
+ int (*port_unsplit)(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack);
+ int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index,
+ u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+ int (*sb_pool_set)(struct devlink *devlink, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack);
+ int (*sb_port_pool_get)(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+ int (*sb_port_pool_set)(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold, struct netlink_ext_ack *extack);
+ int (*sb_tc_pool_bind_get)(struct devlink_port *devlink_port,
+ unsigned int sb_index,
+ u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+ int (*sb_tc_pool_bind_set)(struct devlink_port *devlink_port,
+ unsigned int sb_index,
+ u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack);
+ int (*sb_occ_snapshot)(struct devlink *devlink,
+ unsigned int sb_index);
+ int (*sb_occ_max_clear)(struct devlink *devlink,
+ unsigned int sb_index);
+ int (*sb_occ_port_pool_get)(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+ int (*sb_occ_tc_port_bind_get)(struct devlink_port *devlink_port,
+ unsigned int sb_index,
+ u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
+
+ int (*eswitch_mode_get)(struct devlink *devlink, u16 *p_mode);
+ int (*eswitch_mode_set)(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack);
+ int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode);
+ int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode,
+ struct netlink_ext_ack *extack);
+ int (*eswitch_encap_mode_get)(struct devlink *devlink,
+ enum devlink_eswitch_encap_mode *p_encap_mode);
+ int (*eswitch_encap_mode_set)(struct devlink *devlink,
+ enum devlink_eswitch_encap_mode encap_mode,
+ struct netlink_ext_ack *extack);
+ int (*info_get)(struct devlink *devlink, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+ /**
+ * @flash_update: Device flash update function
+ *
+ * Used to perform a flash update for the device. The set of
+ * parameters supported by the driver should be set in
+ * supported_flash_update_params.
+ */
+ int (*flash_update)(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack);
+ /**
+ * @trap_init: Trap initialization function.
+ *
+ * Should be used by device drivers to initialize the trap in the
+ * underlying device. Drivers should also store the provided trap
+ * context, so that they could efficiently pass it to
+ * devlink_trap_report() when the trap is triggered.
+ */
+ int (*trap_init)(struct devlink *devlink,
+ const struct devlink_trap *trap, void *trap_ctx);
+ /**
+ * @trap_fini: Trap de-initialization function.
+ *
+ * Should be used by device drivers to de-initialize the trap in the
+ * underlying device.
+ */
+ void (*trap_fini)(struct devlink *devlink,
+ const struct devlink_trap *trap, void *trap_ctx);
+ /**
+ * @trap_action_set: Trap action set function.
+ */
+ int (*trap_action_set)(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
+ /**
+ * @trap_group_init: Trap group initialization function.
+ *
+ * Should be used by device drivers to initialize the trap group in the
+ * underlying device.
+ */
+ int (*trap_group_init)(struct devlink *devlink,
+ const struct devlink_trap_group *group);
+ /**
+ * @trap_group_set: Trap group parameters set function.
+ *
+ * Note: @policer can be NULL when a policer is being unbound from
+ * @group.
+ */
+ int (*trap_group_set)(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack);
+ /**
+ * @trap_group_action_set: Trap group action set function.
+ *
+ * If this callback is populated, it will take precedence over looping
+ * over all traps in a group and calling .trap_action_set().
+ */
+ int (*trap_group_action_set)(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
+ /**
+ * @trap_drop_counter_get: Trap drop counter get function.
+ *
+ * Should be used by device drivers to report number of packets
+ * that have been dropped, and cannot be passed to the devlink
+ * subsystem by the underlying device.
+ */
+ int (*trap_drop_counter_get)(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ u64 *p_drops);
+ /**
+ * @trap_policer_init: Trap policer initialization function.
+ *
+ * Should be used by device drivers to initialize the trap policer in
+ * the underlying device.
+ */
+ int (*trap_policer_init)(struct devlink *devlink,
+ const struct devlink_trap_policer *policer);
+ /**
+ * @trap_policer_fini: Trap policer de-initialization function.
+ *
+ * Should be used by device drivers to de-initialize the trap policer
+ * in the underlying device.
+ */
+ void (*trap_policer_fini)(struct devlink *devlink,
+ const struct devlink_trap_policer *policer);
+ /**
+ * @trap_policer_set: Trap policer parameters set function.
+ */
+ int (*trap_policer_set)(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst,
+ struct netlink_ext_ack *extack);
+ /**
+ * @trap_policer_counter_get: Trap policer counter get function.
+ *
+ * Should be used by device drivers to report number of packets dropped
+ * by the policer.
+ */
+ int (*trap_policer_counter_get)(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops);
+ /**
+ * @port_function_hw_addr_get: Port function's hardware address get function.
+ *
+ * Should be used by device drivers to report the hardware address of a function managed
+ * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port
+ * function handling for a particular port.
+ *
+ * Note: @extack can be NULL when port notifier queries the port function.
+ */
+ int (*port_function_hw_addr_get)(struct devlink_port *port, u8 *hw_addr,
+ int *hw_addr_len,
+ struct netlink_ext_ack *extack);
+ /**
+ * @port_function_hw_addr_set: Port function's hardware address set function.
+ *
+ * Should be used by device drivers to set the hardware address of a function managed
+ * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port
+ * function handling for a particular port.
+ */
+ int (*port_function_hw_addr_set)(struct devlink_port *port,
+ const u8 *hw_addr, int hw_addr_len,
+ struct netlink_ext_ack *extack);
+ /**
+ * port_new() - Add a new port function of a specified flavor
+ * @devlink: Devlink instance
+ * @attrs: attributes of the new port
+ * @extack: extack for reporting error messages
+ * @new_port_index: index of the new port
+ *
+ * Devlink core will call this device driver function upon user request
+ * to create a new port function of a specified flavor and optional
+ * attributes
+ *
+ * Notes:
+ * - Called without devlink instance lock being held. Drivers must
+ * implement own means of synchronization
+ * - On success, drivers must register a port with devlink core
+ *
+ * Return: 0 on success, negative value otherwise.
+ */
+ int (*port_new)(struct devlink *devlink,
+ const struct devlink_port_new_attrs *attrs,
+ struct netlink_ext_ack *extack,
+ unsigned int *new_port_index);
+ /**
+ * port_del() - Delete a port function
+ * @devlink: Devlink instance
+ * @port_index: port function index to delete
+ * @extack: extack for reporting error messages
+ *
+ * Devlink core will call this device driver function upon user request
+ * to delete a previously created port function
+ *
+ * Notes:
+ * - Called without devlink instance lock being held. Drivers must
+ * implement own means of synchronization
+ * - On success, drivers must unregister the corresponding devlink
+ * port
+ *
+ * Return: 0 on success, negative value otherwise.
+ */
+ int (*port_del)(struct devlink *devlink, unsigned int port_index,
+ struct netlink_ext_ack *extack);
+ /**
+ * port_fn_state_get() - Get the state of a port function
+ * @devlink: Devlink instance
+ * @port: The devlink port
+ * @state: Admin configured state
+ * @opstate: Current operational state
+ * @extack: extack for reporting error messages
+ *
+ * Reports the admin and operational state of a devlink port function
+ *
+ * Return: 0 on success, negative value otherwise.
+ */
+ int (*port_fn_state_get)(struct devlink_port *port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack);
+ /**
+ * port_fn_state_set() - Set the admin state of a port function
+ * @devlink: Devlink instance
+ * @port: The devlink port
+ * @state: Admin state
+ * @extack: extack for reporting error messages
+ *
+ * Set the admin state of a devlink port function
+ *
+ * Return: 0 on success, negative value otherwise.
+ */
+ int (*port_fn_state_set)(struct devlink_port *port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack);
+
+ /**
+ * Rate control callbacks.
+ */
+ int (*rate_leaf_tx_share_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack);
+ int (*rate_leaf_tx_max_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack);
+ int (*rate_node_tx_share_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack);
+ int (*rate_node_tx_max_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack);
+ int (*rate_node_new)(struct devlink_rate *rate_node, void **priv,
+ struct netlink_ext_ack *extack);
+ int (*rate_node_del)(struct devlink_rate *rate_node, void *priv,
+ struct netlink_ext_ack *extack);
+ int (*rate_leaf_parent_set)(struct devlink_rate *child,
+ struct devlink_rate *parent,
+ void *priv_child, void *priv_parent,
+ struct netlink_ext_ack *extack);
+ int (*rate_node_parent_set)(struct devlink_rate *child,
+ struct devlink_rate *parent,
+ void *priv_child, void *priv_parent,
+ struct netlink_ext_ack *extack);
+ /**
+ * selftests_check() - queries if selftest is supported
+ * @devlink: devlink instance
+ * @id: test index
+ * @extack: extack for reporting error messages
+ *
+ * Return: true if test is supported by the driver
+ */
+ bool (*selftest_check)(struct devlink *devlink, unsigned int id,
+ struct netlink_ext_ack *extack);
+ /**
+ * selftest_run() - Runs a selftest
+ * @devlink: devlink instance
+ * @id: test index
+ * @extack: extack for reporting error messages
+ *
+ * Return: status of the test
+ */
+ enum devlink_selftest_status
+ (*selftest_run)(struct devlink *devlink, unsigned int id,
+ struct netlink_ext_ack *extack);
+};
+
+void *devlink_priv(struct devlink *devlink);
+struct devlink *priv_to_devlink(void *priv);
+struct device *devlink_to_dev(const struct devlink *devlink);
+
+/* Devlink instance explicit locking */
+void devl_lock(struct devlink *devlink);
+int devl_trylock(struct devlink *devlink);
+void devl_unlock(struct devlink *devlink);
+void devl_assert_locked(struct devlink *devlink);
+bool devl_lock_is_held(struct devlink *devlink);
+
+struct ib_device;
+
+struct net *devlink_net(const struct devlink *devlink);
+/* This call is intended for software devices that can create
+ * devlink instances in other namespaces than init_net.
+ *
+ * Drivers that operate on real HW must use devlink_alloc() instead.
+ */
+struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
+ size_t priv_size, struct net *net,
+ struct device *dev);
+static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
+ size_t priv_size,
+ struct device *dev)
+{
+ return devlink_alloc_ns(ops, priv_size, &init_net, dev);
+}
+void devlink_set_features(struct devlink *devlink, u64 features);
+void devlink_register(struct devlink *devlink);
+void devlink_unregister(struct devlink *devlink);
+void devlink_free(struct devlink *devlink);
+void devlink_port_init(struct devlink *devlink,
+ struct devlink_port *devlink_port);
+void devlink_port_fini(struct devlink_port *devlink_port);
+int devl_port_register(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index);
+int devlink_port_register(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index);
+void devl_port_unregister(struct devlink_port *devlink_port);
+void devlink_port_unregister(struct devlink_port *devlink_port);
+void devlink_port_type_eth_set(struct devlink_port *devlink_port,
+ struct net_device *netdev);
+void devlink_port_type_ib_set(struct devlink_port *devlink_port,
+ struct ib_device *ibdev);
+void devlink_port_type_clear(struct devlink_port *devlink_port);
+void devlink_port_attrs_set(struct devlink_port *devlink_port,
+ struct devlink_port_attrs *devlink_port_attrs);
+void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 controller,
+ u16 pf, bool external);
+void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
+ u16 pf, u16 vf, bool external);
+void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port,
+ u32 controller, u16 pf, u32 sf,
+ bool external);
+int devl_rate_leaf_create(struct devlink_port *port, void *priv);
+void devl_rate_leaf_destroy(struct devlink_port *devlink_port);
+void devl_rate_nodes_destroy(struct devlink *devlink);
+void devlink_port_linecard_set(struct devlink_port *devlink_port,
+ struct devlink_linecard *linecard);
+struct devlink_linecard *
+devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index,
+ const struct devlink_linecard_ops *ops, void *priv);
+void devlink_linecard_destroy(struct devlink_linecard *linecard);
+void devlink_linecard_provision_set(struct devlink_linecard *linecard,
+ const char *type);
+void devlink_linecard_provision_clear(struct devlink_linecard *linecard);
+void devlink_linecard_provision_fail(struct devlink_linecard *linecard);
+void devlink_linecard_activate(struct devlink_linecard *linecard);
+void devlink_linecard_deactivate(struct devlink_linecard *linecard);
+void devlink_linecard_nested_dl_set(struct devlink_linecard *linecard,
+ struct devlink *nested_devlink);
+int devl_sb_register(struct devlink *devlink, unsigned int sb_index,
+ u32 size, u16 ingress_pools_count,
+ u16 egress_pools_count, u16 ingress_tc_count,
+ u16 egress_tc_count);
+int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
+ u32 size, u16 ingress_pools_count,
+ u16 egress_pools_count, u16 ingress_tc_count,
+ u16 egress_tc_count);
+void devl_sb_unregister(struct devlink *devlink, unsigned int sb_index);
+void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index);
+int devl_dpipe_table_register(struct devlink *devlink,
+ const char *table_name,
+ struct devlink_dpipe_table_ops *table_ops,
+ void *priv, bool counter_control_extern);
+void devl_dpipe_table_unregister(struct devlink *devlink,
+ const char *table_name);
+void devl_dpipe_headers_register(struct devlink *devlink,
+ struct devlink_dpipe_headers *dpipe_headers);
+void devl_dpipe_headers_unregister(struct devlink *devlink);
+bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
+ const char *table_name);
+int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx);
+int devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx,
+ struct devlink_dpipe_entry *entry);
+int devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx);
+void devlink_dpipe_entry_clear(struct devlink_dpipe_entry *entry);
+int devlink_dpipe_action_put(struct sk_buff *skb,
+ struct devlink_dpipe_action *action);
+int devlink_dpipe_match_put(struct sk_buff *skb,
+ struct devlink_dpipe_match *match);
+extern struct devlink_dpipe_header devlink_dpipe_header_ethernet;
+extern struct devlink_dpipe_header devlink_dpipe_header_ipv4;
+extern struct devlink_dpipe_header devlink_dpipe_header_ipv6;
+
+int devl_resource_register(struct devlink *devlink,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params);
+int devlink_resource_register(struct devlink *devlink,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params);
+void devl_resources_unregister(struct devlink *devlink);
+void devlink_resources_unregister(struct devlink *devlink);
+int devl_resource_size_get(struct devlink *devlink,
+ u64 resource_id,
+ u64 *p_resource_size);
+int devl_dpipe_table_resource_set(struct devlink *devlink,
+ const char *table_name, u64 resource_id,
+ u64 resource_units);
+int devlink_dpipe_table_resource_set(struct devlink *devlink,
+ const char *table_name, u64 resource_id,
+ u64 resource_units);
+void devl_resource_occ_get_register(struct devlink *devlink,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv);
+void devlink_resource_occ_get_register(struct devlink *devlink,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv);
+void devl_resource_occ_get_unregister(struct devlink *devlink,
+ u64 resource_id);
+
+void devlink_resource_occ_get_unregister(struct devlink *devlink,
+ u64 resource_id);
+int devlink_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count);
+void devlink_params_unregister(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count);
+int devlink_param_register(struct devlink *devlink,
+ const struct devlink_param *param);
+void devlink_param_unregister(struct devlink *devlink,
+ const struct devlink_param *param);
+int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *init_val);
+int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val);
+void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
+struct devlink_region *devl_region_create(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots,
+ u64 region_size);
+struct devlink_region *
+devlink_region_create(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+struct devlink_region *
+devlink_port_region_create(struct devlink_port *port,
+ const struct devlink_port_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+void devl_region_destroy(struct devlink_region *region);
+void devlink_region_destroy(struct devlink_region *region);
+void devlink_port_region_destroy(struct devlink_region *region);
+
+int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id);
+void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id);
+int devlink_region_snapshot_create(struct devlink_region *region,
+ u8 *data, u32 snapshot_id);
+int devlink_info_serial_number_put(struct devlink_info_req *req,
+ const char *sn);
+int devlink_info_driver_name_put(struct devlink_info_req *req,
+ const char *name);
+int devlink_info_board_serial_number_put(struct devlink_info_req *req,
+ const char *bsn);
+
+enum devlink_info_version_type {
+ DEVLINK_INFO_VERSION_TYPE_NONE,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT, /* May be used as flash update
+ * component by name.
+ */
+};
+
+int devlink_info_version_fixed_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value);
+int devlink_info_version_stored_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value);
+int devlink_info_version_stored_put_ext(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value,
+ enum devlink_info_version_type version_type);
+int devlink_info_version_running_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value);
+int devlink_info_version_running_put_ext(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value,
+ enum devlink_info_version_type version_type);
+
+int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg);
+int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg);
+
+int devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name);
+int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg);
+
+int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
+ const char *name);
+int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg);
+int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg,
+ const char *name);
+int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg);
+
+int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value);
+int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value);
+int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
+ u16 value_len);
+
+int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ bool value);
+int devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u8 value);
+int devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u32 value);
+int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u64 value);
+int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const char *value);
+int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const void *value, u32 value_len);
+
+struct devlink_health_reporter *
+devlink_health_reporter_create(struct devlink *devlink,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv);
+
+struct devlink_health_reporter *
+devlink_port_health_reporter_create(struct devlink_port *port,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv);
+
+void
+devlink_health_reporter_destroy(struct devlink_health_reporter *reporter);
+
+void
+devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter);
+
+void *
+devlink_health_reporter_priv(struct devlink_health_reporter *reporter);
+int devlink_health_report(struct devlink_health_reporter *reporter,
+ const char *msg, void *priv_ctx);
+void
+devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
+ enum devlink_health_reporter_state state);
+void
+devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter);
+
+bool devlink_is_reload_failed(const struct devlink *devlink);
+void devlink_remote_reload_actions_performed(struct devlink *devlink,
+ enum devlink_reload_limit limit,
+ u32 actions_performed);
+
+void devlink_flash_update_status_notify(struct devlink *devlink,
+ const char *status_msg,
+ const char *component,
+ unsigned long done,
+ unsigned long total);
+void devlink_flash_update_timeout_notify(struct devlink *devlink,
+ const char *status_msg,
+ const char *component,
+ unsigned long timeout);
+
+int devl_traps_register(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count, void *priv);
+int devlink_traps_register(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count, void *priv);
+void devl_traps_unregister(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count);
+void devlink_traps_unregister(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count);
+void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb,
+ void *trap_ctx, struct devlink_port *in_devlink_port,
+ const struct flow_action_cookie *fa_cookie);
+void *devlink_trap_ctx_priv(void *trap_ctx);
+int devl_trap_groups_register(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count);
+int devlink_trap_groups_register(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count);
+void devl_trap_groups_unregister(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count);
+void devlink_trap_groups_unregister(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count);
+int
+devl_trap_policers_register(struct devlink *devlink,
+ const struct devlink_trap_policer *policers,
+ size_t policers_count);
+void
+devl_trap_policers_unregister(struct devlink *devlink,
+ const struct devlink_trap_policer *policers,
+ size_t policers_count);
+
+#if IS_ENABLED(CONFIG_NET_DEVLINK)
+
+struct devlink *__must_check devlink_try_get(struct devlink *devlink);
+void devlink_put(struct devlink *devlink);
+
+void devlink_compat_running_version(struct devlink *devlink,
+ char *buf, size_t len);
+int devlink_compat_flash_update(struct devlink *devlink, const char *file_name);
+int devlink_compat_phys_port_name_get(struct net_device *dev,
+ char *name, size_t len);
+int devlink_compat_switch_id_get(struct net_device *dev,
+ struct netdev_phys_item_id *ppid);
+
+#else
+
+static inline struct devlink *devlink_try_get(struct devlink *devlink)
+{
+ return NULL;
+}
+
+static inline void devlink_put(struct devlink *devlink)
+{
+}
+
+static inline void
+devlink_compat_running_version(struct devlink *devlink, char *buf, size_t len)
+{
+}
+
+static inline int
+devlink_compat_flash_update(struct devlink *devlink, const char *file_name)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+devlink_compat_phys_port_name_get(struct net_device *dev,
+ char *name, size_t len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+devlink_compat_switch_id_get(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
+
+#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dropreason.h b/include/net/dropreason.h
new file mode 100644
index 000000000..c1cbcdbaf
--- /dev/null
+++ b/include/net/dropreason.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _LINUX_DROPREASON_H
+#define _LINUX_DROPREASON_H
+
+#define DEFINE_DROP_REASON(FN, FNe) \
+ FN(NOT_SPECIFIED) \
+ FN(NO_SOCKET) \
+ FN(PKT_TOO_SMALL) \
+ FN(TCP_CSUM) \
+ FN(SOCKET_FILTER) \
+ FN(UDP_CSUM) \
+ FN(NETFILTER_DROP) \
+ FN(OTHERHOST) \
+ FN(IP_CSUM) \
+ FN(IP_INHDR) \
+ FN(IP_RPFILTER) \
+ FN(UNICAST_IN_L2_MULTICAST) \
+ FN(XFRM_POLICY) \
+ FN(IP_NOPROTO) \
+ FN(SOCKET_RCVBUFF) \
+ FN(PROTO_MEM) \
+ FN(TCP_MD5NOTFOUND) \
+ FN(TCP_MD5UNEXPECTED) \
+ FN(TCP_MD5FAILURE) \
+ FN(SOCKET_BACKLOG) \
+ FN(TCP_FLAGS) \
+ FN(TCP_ZEROWINDOW) \
+ FN(TCP_OLD_DATA) \
+ FN(TCP_OVERWINDOW) \
+ FN(TCP_OFOMERGE) \
+ FN(TCP_RFC7323_PAWS) \
+ FN(TCP_INVALID_SEQUENCE) \
+ FN(TCP_RESET) \
+ FN(TCP_INVALID_SYN) \
+ FN(TCP_CLOSE) \
+ FN(TCP_FASTOPEN) \
+ FN(TCP_OLD_ACK) \
+ FN(TCP_TOO_OLD_ACK) \
+ FN(TCP_ACK_UNSENT_DATA) \
+ FN(TCP_OFO_QUEUE_PRUNE) \
+ FN(TCP_OFO_DROP) \
+ FN(IP_OUTNOROUTES) \
+ FN(BPF_CGROUP_EGRESS) \
+ FN(IPV6DISABLED) \
+ FN(NEIGH_CREATEFAIL) \
+ FN(NEIGH_FAILED) \
+ FN(NEIGH_QUEUEFULL) \
+ FN(NEIGH_DEAD) \
+ FN(TC_EGRESS) \
+ FN(QDISC_DROP) \
+ FN(CPU_BACKLOG) \
+ FN(XDP) \
+ FN(TC_INGRESS) \
+ FN(UNHANDLED_PROTO) \
+ FN(SKB_CSUM) \
+ FN(SKB_GSO_SEG) \
+ FN(SKB_UCOPY_FAULT) \
+ FN(DEV_HDR) \
+ FN(DEV_READY) \
+ FN(FULL_RING) \
+ FN(NOMEM) \
+ FN(HDR_TRUNC) \
+ FN(TAP_FILTER) \
+ FN(TAP_TXFILTER) \
+ FN(ICMP_CSUM) \
+ FN(INVALID_PROTO) \
+ FN(IP_INADDRERRORS) \
+ FN(IP_INNOROUTES) \
+ FN(PKT_TOO_BIG) \
+ FNe(MAX)
+
+/**
+ * enum skb_drop_reason - the reasons of skb drops
+ *
+ * The reason of skb drop, which is used in kfree_skb_reason().
+ */
+enum skb_drop_reason {
+ /**
+ * @SKB_NOT_DROPPED_YET: skb is not dropped yet (used for no-drop case)
+ */
+ SKB_NOT_DROPPED_YET = 0,
+ /** @SKB_DROP_REASON_NOT_SPECIFIED: drop reason is not specified */
+ SKB_DROP_REASON_NOT_SPECIFIED,
+ /** @SKB_DROP_REASON_NO_SOCKET: socket not found */
+ SKB_DROP_REASON_NO_SOCKET,
+ /** @SKB_DROP_REASON_PKT_TOO_SMALL: packet size is too small */
+ SKB_DROP_REASON_PKT_TOO_SMALL,
+ /** @SKB_DROP_REASON_TCP_CSUM: TCP checksum error */
+ SKB_DROP_REASON_TCP_CSUM,
+ /** @SKB_DROP_REASON_SOCKET_FILTER: dropped by socket filter */
+ SKB_DROP_REASON_SOCKET_FILTER,
+ /** @SKB_DROP_REASON_UDP_CSUM: UDP checksum error */
+ SKB_DROP_REASON_UDP_CSUM,
+ /** @SKB_DROP_REASON_NETFILTER_DROP: dropped by netfilter */
+ SKB_DROP_REASON_NETFILTER_DROP,
+ /**
+ * @SKB_DROP_REASON_OTHERHOST: packet don't belong to current host
+ * (interface is in promisc mode)
+ */
+ SKB_DROP_REASON_OTHERHOST,
+ /** @SKB_DROP_REASON_IP_CSUM: IP checksum error */
+ SKB_DROP_REASON_IP_CSUM,
+ /**
+ * @SKB_DROP_REASON_IP_INHDR: there is something wrong with IP header (see
+ * IPSTATS_MIB_INHDRERRORS)
+ */
+ SKB_DROP_REASON_IP_INHDR,
+ /**
+ * @SKB_DROP_REASON_IP_RPFILTER: IP rpfilter validate failed. see the
+ * document for rp_filter in ip-sysctl.rst for more information
+ */
+ SKB_DROP_REASON_IP_RPFILTER,
+ /**
+ * @SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST: destination address of L2 is
+ * multicast, but L3 is unicast.
+ */
+ SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST,
+ /** @SKB_DROP_REASON_XFRM_POLICY: xfrm policy check failed */
+ SKB_DROP_REASON_XFRM_POLICY,
+ /** @SKB_DROP_REASON_IP_NOPROTO: no support for IP protocol */
+ SKB_DROP_REASON_IP_NOPROTO,
+ /** @SKB_DROP_REASON_SOCKET_RCVBUFF: socket receive buff is full */
+ SKB_DROP_REASON_SOCKET_RCVBUFF,
+ /**
+ * @SKB_DROP_REASON_PROTO_MEM: proto memory limition, such as udp packet
+ * drop out of udp_memory_allocated.
+ */
+ SKB_DROP_REASON_PROTO_MEM,
+ /**
+ * @SKB_DROP_REASON_TCP_MD5NOTFOUND: no MD5 hash and one expected,
+ * corresponding to LINUX_MIB_TCPMD5NOTFOUND
+ */
+ SKB_DROP_REASON_TCP_MD5NOTFOUND,
+ /**
+ * @SKB_DROP_REASON_TCP_MD5UNEXPECTED: MD5 hash and we're not expecting
+ * one, corresponding to LINUX_MIB_TCPMD5UNEXPECTED
+ */
+ SKB_DROP_REASON_TCP_MD5UNEXPECTED,
+ /**
+ * @SKB_DROP_REASON_TCP_MD5FAILURE: MD5 hash and its wrong, corresponding
+ * to LINUX_MIB_TCPMD5FAILURE
+ */
+ SKB_DROP_REASON_TCP_MD5FAILURE,
+ /**
+ * @SKB_DROP_REASON_SOCKET_BACKLOG: failed to add skb to socket backlog (
+ * see LINUX_MIB_TCPBACKLOGDROP)
+ */
+ SKB_DROP_REASON_SOCKET_BACKLOG,
+ /** @SKB_DROP_REASON_TCP_FLAGS: TCP flags invalid */
+ SKB_DROP_REASON_TCP_FLAGS,
+ /**
+ * @SKB_DROP_REASON_TCP_ZEROWINDOW: TCP receive window size is zero,
+ * see LINUX_MIB_TCPZEROWINDOWDROP
+ */
+ SKB_DROP_REASON_TCP_ZEROWINDOW,
+ /**
+ * @SKB_DROP_REASON_TCP_OLD_DATA: the TCP data reveived is already
+ * received before (spurious retrans may happened), see
+ * LINUX_MIB_DELAYEDACKLOST
+ */
+ SKB_DROP_REASON_TCP_OLD_DATA,
+ /**
+ * @SKB_DROP_REASON_TCP_OVERWINDOW: the TCP data is out of window,
+ * the seq of the first byte exceed the right edges of receive
+ * window
+ */
+ SKB_DROP_REASON_TCP_OVERWINDOW,
+ /**
+ * @SKB_DROP_REASON_TCP_OFOMERGE: the data of skb is already in the ofo
+ * queue, corresponding to LINUX_MIB_TCPOFOMERGE
+ */
+ SKB_DROP_REASON_TCP_OFOMERGE,
+ /**
+ * @SKB_DROP_REASON_TCP_RFC7323_PAWS: PAWS check, corresponding to
+ * LINUX_MIB_PAWSESTABREJECTED
+ */
+ SKB_DROP_REASON_TCP_RFC7323_PAWS,
+ /** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */
+ SKB_DROP_REASON_TCP_INVALID_SEQUENCE,
+ /** @SKB_DROP_REASON_TCP_RESET: Invalid RST packet */
+ SKB_DROP_REASON_TCP_RESET,
+ /**
+ * @SKB_DROP_REASON_TCP_INVALID_SYN: Incoming packet has unexpected
+ * SYN flag
+ */
+ SKB_DROP_REASON_TCP_INVALID_SYN,
+ /** @SKB_DROP_REASON_TCP_CLOSE: TCP socket in CLOSE state */
+ SKB_DROP_REASON_TCP_CLOSE,
+ /** @SKB_DROP_REASON_TCP_FASTOPEN: dropped by FASTOPEN request socket */
+ SKB_DROP_REASON_TCP_FASTOPEN,
+ /** @SKB_DROP_REASON_TCP_OLD_ACK: TCP ACK is old, but in window */
+ SKB_DROP_REASON_TCP_OLD_ACK,
+ /** @SKB_DROP_REASON_TCP_TOO_OLD_ACK: TCP ACK is too old */
+ SKB_DROP_REASON_TCP_TOO_OLD_ACK,
+ /**
+ * @SKB_DROP_REASON_TCP_ACK_UNSENT_DATA: TCP ACK for data we haven't
+ * sent yet
+ */
+ SKB_DROP_REASON_TCP_ACK_UNSENT_DATA,
+ /** @SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE: pruned from TCP OFO queue */
+ SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE,
+ /** @SKB_DROP_REASON_TCP_OFO_DROP: data already in receive queue */
+ SKB_DROP_REASON_TCP_OFO_DROP,
+ /** @SKB_DROP_REASON_IP_OUTNOROUTES: route lookup failed */
+ SKB_DROP_REASON_IP_OUTNOROUTES,
+ /**
+ * @SKB_DROP_REASON_BPF_CGROUP_EGRESS: dropped by BPF_PROG_TYPE_CGROUP_SKB
+ * eBPF program
+ */
+ SKB_DROP_REASON_BPF_CGROUP_EGRESS,
+ /** @SKB_DROP_REASON_IPV6DISABLED: IPv6 is disabled on the device */
+ SKB_DROP_REASON_IPV6DISABLED,
+ /** @SKB_DROP_REASON_NEIGH_CREATEFAIL: failed to create neigh entry */
+ SKB_DROP_REASON_NEIGH_CREATEFAIL,
+ /** @SKB_DROP_REASON_NEIGH_FAILED: neigh entry in failed state */
+ SKB_DROP_REASON_NEIGH_FAILED,
+ /** @SKB_DROP_REASON_NEIGH_QUEUEFULL: arp_queue for neigh entry is full */
+ SKB_DROP_REASON_NEIGH_QUEUEFULL,
+ /** @SKB_DROP_REASON_NEIGH_DEAD: neigh entry is dead */
+ SKB_DROP_REASON_NEIGH_DEAD,
+ /** @SKB_DROP_REASON_TC_EGRESS: dropped in TC egress HOOK */
+ SKB_DROP_REASON_TC_EGRESS,
+ /**
+ * @SKB_DROP_REASON_QDISC_DROP: dropped by qdisc when packet outputting (
+ * failed to enqueue to current qdisc)
+ */
+ SKB_DROP_REASON_QDISC_DROP,
+ /**
+ * @SKB_DROP_REASON_CPU_BACKLOG: failed to enqueue the skb to the per CPU
+ * backlog queue. This can be caused by backlog queue full (see
+ * netdev_max_backlog in net.rst) or RPS flow limit
+ */
+ SKB_DROP_REASON_CPU_BACKLOG,
+ /** @SKB_DROP_REASON_XDP: dropped by XDP in input path */
+ SKB_DROP_REASON_XDP,
+ /** @SKB_DROP_REASON_TC_INGRESS: dropped in TC ingress HOOK */
+ SKB_DROP_REASON_TC_INGRESS,
+ /** @SKB_DROP_REASON_UNHANDLED_PROTO: protocol not implemented or not supported */
+ SKB_DROP_REASON_UNHANDLED_PROTO,
+ /** @SKB_DROP_REASON_SKB_CSUM: sk_buff checksum computation error */
+ SKB_DROP_REASON_SKB_CSUM,
+ /** @SKB_DROP_REASON_SKB_GSO_SEG: gso segmentation error */
+ SKB_DROP_REASON_SKB_GSO_SEG,
+ /**
+ * @SKB_DROP_REASON_SKB_UCOPY_FAULT: failed to copy data from user space,
+ * e.g., via zerocopy_sg_from_iter() or skb_orphan_frags_rx()
+ */
+ SKB_DROP_REASON_SKB_UCOPY_FAULT,
+ /** @SKB_DROP_REASON_DEV_HDR: device driver specific header/metadata is invalid */
+ SKB_DROP_REASON_DEV_HDR,
+ /**
+ * @SKB_DROP_REASON_DEV_READY: the device is not ready to xmit/recv due to
+ * any of its data structure that is not up/ready/initialized,
+ * e.g., the IFF_UP is not set, or driver specific tun->tfiles[txq]
+ * is not initialized
+ */
+ SKB_DROP_REASON_DEV_READY,
+ /** @SKB_DROP_REASON_FULL_RING: ring buffer is full */
+ SKB_DROP_REASON_FULL_RING,
+ /** @SKB_DROP_REASON_NOMEM: error due to OOM */
+ SKB_DROP_REASON_NOMEM,
+ /**
+ * @SKB_DROP_REASON_HDR_TRUNC: failed to trunc/extract the header from
+ * networking data, e.g., failed to pull the protocol header from
+ * frags via pskb_may_pull()
+ */
+ SKB_DROP_REASON_HDR_TRUNC,
+ /**
+ * @SKB_DROP_REASON_TAP_FILTER: dropped by (ebpf) filter directly attached
+ * to tun/tap, e.g., via TUNSETFILTEREBPF
+ */
+ SKB_DROP_REASON_TAP_FILTER,
+ /**
+ * @SKB_DROP_REASON_TAP_TXFILTER: dropped by tx filter implemented at
+ * tun/tap, e.g., check_filter()
+ */
+ SKB_DROP_REASON_TAP_TXFILTER,
+ /** @SKB_DROP_REASON_ICMP_CSUM: ICMP checksum error */
+ SKB_DROP_REASON_ICMP_CSUM,
+ /**
+ * @SKB_DROP_REASON_INVALID_PROTO: the packet doesn't follow RFC 2211,
+ * such as a broadcasts ICMP_TIMESTAMP
+ */
+ SKB_DROP_REASON_INVALID_PROTO,
+ /**
+ * @SKB_DROP_REASON_IP_INADDRERRORS: host unreachable, corresponding to
+ * IPSTATS_MIB_INADDRERRORS
+ */
+ SKB_DROP_REASON_IP_INADDRERRORS,
+ /**
+ * @SKB_DROP_REASON_IP_INNOROUTES: network unreachable, corresponding to
+ * IPSTATS_MIB_INADDRERRORS
+ */
+ SKB_DROP_REASON_IP_INNOROUTES,
+ /**
+ * @SKB_DROP_REASON_PKT_TOO_BIG: packet size is too big (maybe exceed the
+ * MTU)
+ */
+ SKB_DROP_REASON_PKT_TOO_BIG,
+ /**
+ * @SKB_DROP_REASON_MAX: the maximum of drop reason, which shouldn't be
+ * used as a real 'reason'
+ */
+ SKB_DROP_REASON_MAX,
+};
+
+#define SKB_DR_INIT(name, reason) \
+ enum skb_drop_reason name = SKB_DROP_REASON_##reason
+#define SKB_DR(name) \
+ SKB_DR_INIT(name, NOT_SPECIFIED)
+#define SKB_DR_SET(name, reason) \
+ (name = SKB_DROP_REASON_##reason)
+#define SKB_DR_OR(name, reason) \
+ do { \
+ if (name == SKB_DROP_REASON_NOT_SPECIFIED || \
+ name == SKB_NOT_DROPPED_YET) \
+ SKB_DR_SET(name, reason); \
+ } while (0)
+
+extern const char * const drop_reasons[];
+
+#endif
diff --git a/include/net/dsa.h b/include/net/dsa.h
new file mode 100644
index 000000000..ee369670e
--- /dev/null
+++ b/include/net/dsa.h
@@ -0,0 +1,1472 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/net/dsa.h - Driver for Distributed Switch Architecture switch chips
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ */
+
+#ifndef __LINUX_NET_DSA_H
+#define __LINUX_NET_DSA_H
+
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/of.h>
+#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/platform_data/dsa.h>
+#include <linux/phylink.h>
+#include <net/devlink.h>
+#include <net/switchdev.h>
+
+struct tc_action;
+struct phy_device;
+struct fixed_phy_status;
+struct phylink_link_state;
+
+#define DSA_TAG_PROTO_NONE_VALUE 0
+#define DSA_TAG_PROTO_BRCM_VALUE 1
+#define DSA_TAG_PROTO_BRCM_PREPEND_VALUE 2
+#define DSA_TAG_PROTO_DSA_VALUE 3
+#define DSA_TAG_PROTO_EDSA_VALUE 4
+#define DSA_TAG_PROTO_GSWIP_VALUE 5
+#define DSA_TAG_PROTO_KSZ9477_VALUE 6
+#define DSA_TAG_PROTO_KSZ9893_VALUE 7
+#define DSA_TAG_PROTO_LAN9303_VALUE 8
+#define DSA_TAG_PROTO_MTK_VALUE 9
+#define DSA_TAG_PROTO_QCA_VALUE 10
+#define DSA_TAG_PROTO_TRAILER_VALUE 11
+#define DSA_TAG_PROTO_8021Q_VALUE 12
+#define DSA_TAG_PROTO_SJA1105_VALUE 13
+#define DSA_TAG_PROTO_KSZ8795_VALUE 14
+#define DSA_TAG_PROTO_OCELOT_VALUE 15
+#define DSA_TAG_PROTO_AR9331_VALUE 16
+#define DSA_TAG_PROTO_RTL4_A_VALUE 17
+#define DSA_TAG_PROTO_HELLCREEK_VALUE 18
+#define DSA_TAG_PROTO_XRS700X_VALUE 19
+#define DSA_TAG_PROTO_OCELOT_8021Q_VALUE 20
+#define DSA_TAG_PROTO_SEVILLE_VALUE 21
+#define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22
+#define DSA_TAG_PROTO_SJA1110_VALUE 23
+#define DSA_TAG_PROTO_RTL8_4_VALUE 24
+#define DSA_TAG_PROTO_RTL8_4T_VALUE 25
+#define DSA_TAG_PROTO_RZN1_A5PSW_VALUE 26
+#define DSA_TAG_PROTO_LAN937X_VALUE 27
+
+enum dsa_tag_protocol {
+ DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
+ DSA_TAG_PROTO_BRCM = DSA_TAG_PROTO_BRCM_VALUE,
+ DSA_TAG_PROTO_BRCM_LEGACY = DSA_TAG_PROTO_BRCM_LEGACY_VALUE,
+ DSA_TAG_PROTO_BRCM_PREPEND = DSA_TAG_PROTO_BRCM_PREPEND_VALUE,
+ DSA_TAG_PROTO_DSA = DSA_TAG_PROTO_DSA_VALUE,
+ DSA_TAG_PROTO_EDSA = DSA_TAG_PROTO_EDSA_VALUE,
+ DSA_TAG_PROTO_GSWIP = DSA_TAG_PROTO_GSWIP_VALUE,
+ DSA_TAG_PROTO_KSZ9477 = DSA_TAG_PROTO_KSZ9477_VALUE,
+ DSA_TAG_PROTO_KSZ9893 = DSA_TAG_PROTO_KSZ9893_VALUE,
+ DSA_TAG_PROTO_LAN9303 = DSA_TAG_PROTO_LAN9303_VALUE,
+ DSA_TAG_PROTO_MTK = DSA_TAG_PROTO_MTK_VALUE,
+ DSA_TAG_PROTO_QCA = DSA_TAG_PROTO_QCA_VALUE,
+ DSA_TAG_PROTO_TRAILER = DSA_TAG_PROTO_TRAILER_VALUE,
+ DSA_TAG_PROTO_8021Q = DSA_TAG_PROTO_8021Q_VALUE,
+ DSA_TAG_PROTO_SJA1105 = DSA_TAG_PROTO_SJA1105_VALUE,
+ DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE,
+ DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE,
+ DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE,
+ DSA_TAG_PROTO_RTL4_A = DSA_TAG_PROTO_RTL4_A_VALUE,
+ DSA_TAG_PROTO_HELLCREEK = DSA_TAG_PROTO_HELLCREEK_VALUE,
+ DSA_TAG_PROTO_XRS700X = DSA_TAG_PROTO_XRS700X_VALUE,
+ DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE,
+ DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE,
+ DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE,
+ DSA_TAG_PROTO_RTL8_4 = DSA_TAG_PROTO_RTL8_4_VALUE,
+ DSA_TAG_PROTO_RTL8_4T = DSA_TAG_PROTO_RTL8_4T_VALUE,
+ DSA_TAG_PROTO_RZN1_A5PSW = DSA_TAG_PROTO_RZN1_A5PSW_VALUE,
+ DSA_TAG_PROTO_LAN937X = DSA_TAG_PROTO_LAN937X_VALUE,
+};
+
+struct dsa_switch;
+
+struct dsa_device_ops {
+ struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
+ struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
+ void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
+ int *offset);
+ int (*connect)(struct dsa_switch *ds);
+ void (*disconnect)(struct dsa_switch *ds);
+ unsigned int needed_headroom;
+ unsigned int needed_tailroom;
+ const char *name;
+ enum dsa_tag_protocol proto;
+ /* Some tagging protocols either mangle or shift the destination MAC
+ * address, in which case the DSA master would drop packets on ingress
+ * if what it understands out of the destination MAC address is not in
+ * its RX filter.
+ */
+ bool promisc_on_master;
+};
+
+/* This structure defines the control interfaces that are overlayed by the
+ * DSA layer on top of the DSA CPU/management net_device instance. This is
+ * used by the core net_device layer while calling various net_device_ops
+ * function pointers.
+ */
+struct dsa_netdevice_ops {
+ int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr,
+ int cmd);
+};
+
+#define DSA_TAG_DRIVER_ALIAS "dsa_tag-"
+#define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \
+ MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE))
+
+struct dsa_lag {
+ struct net_device *dev;
+ unsigned int id;
+ struct mutex fdb_lock;
+ struct list_head fdbs;
+ refcount_t refcount;
+};
+
+struct dsa_switch_tree {
+ struct list_head list;
+
+ /* List of switch ports */
+ struct list_head ports;
+
+ /* Notifier chain for switch-wide events */
+ struct raw_notifier_head nh;
+
+ /* Tree identifier */
+ unsigned int index;
+
+ /* Number of switches attached to this tree */
+ struct kref refcount;
+
+ /* Maps offloaded LAG netdevs to a zero-based linear ID for
+ * drivers that need it.
+ */
+ struct dsa_lag **lags;
+
+ /* Tagging protocol operations */
+ const struct dsa_device_ops *tag_ops;
+
+ /* Default tagging protocol preferred by the switches in this
+ * tree.
+ */
+ enum dsa_tag_protocol default_proto;
+
+ /* Has this tree been applied to the hardware? */
+ bool setup;
+
+ /*
+ * Configuration data for the platform device that owns
+ * this dsa switch tree instance.
+ */
+ struct dsa_platform_data *pd;
+
+ /* List of DSA links composing the routing table */
+ struct list_head rtable;
+
+ /* Length of "lags" array */
+ unsigned int lags_len;
+
+ /* Track the largest switch index within a tree */
+ unsigned int last_switch;
+};
+
+/* LAG IDs are one-based, the dst->lags array is zero-based */
+#define dsa_lags_foreach_id(_id, _dst) \
+ for ((_id) = 1; (_id) <= (_dst)->lags_len; (_id)++) \
+ if ((_dst)->lags[(_id) - 1])
+
+#define dsa_lag_foreach_port(_dp, _dst, _lag) \
+ list_for_each_entry((_dp), &(_dst)->ports, list) \
+ if (dsa_port_offloads_lag((_dp), (_lag)))
+
+#define dsa_hsr_foreach_port(_dp, _ds, _hsr) \
+ list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr))
+
+static inline struct dsa_lag *dsa_lag_by_id(struct dsa_switch_tree *dst,
+ unsigned int id)
+{
+ /* DSA LAG IDs are one-based, dst->lags is zero-based */
+ return dst->lags[id - 1];
+}
+
+static inline int dsa_lag_id(struct dsa_switch_tree *dst,
+ struct net_device *lag_dev)
+{
+ unsigned int id;
+
+ dsa_lags_foreach_id(id, dst) {
+ struct dsa_lag *lag = dsa_lag_by_id(dst, id);
+
+ if (lag->dev == lag_dev)
+ return lag->id;
+ }
+
+ return -ENODEV;
+}
+
+/* TC matchall action types */
+enum dsa_port_mall_action_type {
+ DSA_PORT_MALL_MIRROR,
+ DSA_PORT_MALL_POLICER,
+};
+
+/* TC mirroring entry */
+struct dsa_mall_mirror_tc_entry {
+ u8 to_local_port;
+ bool ingress;
+};
+
+/* TC port policer entry */
+struct dsa_mall_policer_tc_entry {
+ u32 burst;
+ u64 rate_bytes_per_sec;
+};
+
+/* TC matchall entry */
+struct dsa_mall_tc_entry {
+ struct list_head list;
+ unsigned long cookie;
+ enum dsa_port_mall_action_type type;
+ union {
+ struct dsa_mall_mirror_tc_entry mirror;
+ struct dsa_mall_policer_tc_entry policer;
+ };
+};
+
+struct dsa_bridge {
+ struct net_device *dev;
+ unsigned int num;
+ bool tx_fwd_offload;
+ refcount_t refcount;
+};
+
+struct dsa_port {
+ /* A CPU port is physically connected to a master device.
+ * A user port exposed to userspace has a slave device.
+ */
+ union {
+ struct net_device *master;
+ struct net_device *slave;
+ };
+
+ /* Copy of the tagging protocol operations, for quicker access
+ * in the data path. Valid only for the CPU ports.
+ */
+ const struct dsa_device_ops *tag_ops;
+
+ /* Copies for faster access in master receive hot path */
+ struct dsa_switch_tree *dst;
+ struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
+
+ struct dsa_switch *ds;
+
+ unsigned int index;
+
+ enum {
+ DSA_PORT_TYPE_UNUSED = 0,
+ DSA_PORT_TYPE_CPU,
+ DSA_PORT_TYPE_DSA,
+ DSA_PORT_TYPE_USER,
+ } type;
+
+ const char *name;
+ struct dsa_port *cpu_dp;
+ u8 mac[ETH_ALEN];
+
+ u8 stp_state;
+
+ /* Warning: the following bit fields are not atomic, and updating them
+ * can only be done from code paths where concurrency is not possible
+ * (probe time or under rtnl_lock).
+ */
+ u8 vlan_filtering:1;
+
+ /* Managed by DSA on user ports and by drivers on CPU and DSA ports */
+ u8 learning:1;
+
+ u8 lag_tx_enabled:1;
+
+ /* Master state bits, valid only on CPU ports */
+ u8 master_admin_up:1;
+ u8 master_oper_up:1;
+
+ /* Valid only on user ports */
+ u8 cpu_port_in_lag:1;
+
+ u8 setup:1;
+
+ struct device_node *dn;
+ unsigned int ageing_time;
+
+ struct dsa_bridge *bridge;
+ struct devlink_port devlink_port;
+ struct phylink *pl;
+ struct phylink_config pl_config;
+ struct dsa_lag *lag;
+ struct net_device *hsr_dev;
+
+ struct list_head list;
+
+ /*
+ * Original copy of the master netdev ethtool_ops
+ */
+ const struct ethtool_ops *orig_ethtool_ops;
+
+ /*
+ * Original copy of the master netdev net_device_ops
+ */
+ const struct dsa_netdevice_ops *netdev_ops;
+
+ /* List of MAC addresses that must be forwarded on this port.
+ * These are only valid on CPU ports and DSA links.
+ */
+ struct mutex addr_lists_lock;
+ struct list_head fdbs;
+ struct list_head mdbs;
+
+ /* List of VLANs that CPU and DSA ports are members of. */
+ struct mutex vlans_lock;
+ struct list_head vlans;
+};
+
+/* TODO: ideally DSA ports would have a single dp->link_dp member,
+ * and no dst->rtable nor this struct dsa_link would be needed,
+ * but this would require some more complex tree walking,
+ * so keep it stupid at the moment and list them all.
+ */
+struct dsa_link {
+ struct dsa_port *dp;
+ struct dsa_port *link_dp;
+ struct list_head list;
+};
+
+enum dsa_db_type {
+ DSA_DB_PORT,
+ DSA_DB_LAG,
+ DSA_DB_BRIDGE,
+};
+
+struct dsa_db {
+ enum dsa_db_type type;
+
+ union {
+ const struct dsa_port *dp;
+ struct dsa_lag lag;
+ struct dsa_bridge bridge;
+ };
+};
+
+struct dsa_mac_addr {
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+ refcount_t refcount;
+ struct list_head list;
+ struct dsa_db db;
+};
+
+struct dsa_vlan {
+ u16 vid;
+ refcount_t refcount;
+ struct list_head list;
+};
+
+struct dsa_switch {
+ struct device *dev;
+
+ /*
+ * Parent switch tree, and switch index.
+ */
+ struct dsa_switch_tree *dst;
+ unsigned int index;
+
+ /* Warning: the following bit fields are not atomic, and updating them
+ * can only be done from code paths where concurrency is not possible
+ * (probe time or under rtnl_lock).
+ */
+ u32 setup:1;
+
+ /* Disallow bridge core from requesting different VLAN awareness
+ * settings on ports if not hardware-supported
+ */
+ u32 vlan_filtering_is_global:1;
+
+ /* Keep VLAN filtering enabled on ports not offloading any upper */
+ u32 needs_standalone_vlan_filtering:1;
+
+ /* Pass .port_vlan_add and .port_vlan_del to drivers even for bridges
+ * that have vlan_filtering=0. All drivers should ideally set this (and
+ * then the option would get removed), but it is unknown whether this
+ * would break things or not.
+ */
+ u32 configure_vlan_while_not_filtering:1;
+
+ /* If the switch driver always programs the CPU port as egress tagged
+ * despite the VLAN configuration indicating otherwise, then setting
+ * @untag_bridge_pvid will force the DSA receive path to pop the
+ * bridge's default_pvid VLAN tagged frames to offer a consistent
+ * behavior between a vlan_filtering=0 and vlan_filtering=1 bridge
+ * device.
+ */
+ u32 untag_bridge_pvid:1;
+
+ /* Let DSA manage the FDB entries towards the
+ * CPU, based on the software bridge database.
+ */
+ u32 assisted_learning_on_cpu_port:1;
+
+ /* In case vlan_filtering_is_global is set, the VLAN awareness state
+ * should be retrieved from here and not from the per-port settings.
+ */
+ u32 vlan_filtering:1;
+
+ /* For switches that only have the MRU configurable. To ensure the
+ * configured MTU is not exceeded, normalization of MRU on all bridged
+ * interfaces is needed.
+ */
+ u32 mtu_enforcement_ingress:1;
+
+ /* Drivers that isolate the FDBs of multiple bridges must set this
+ * to true to receive the bridge as an argument in .port_fdb_{add,del}
+ * and .port_mdb_{add,del}. Otherwise, the bridge.num will always be
+ * passed as zero.
+ */
+ u32 fdb_isolation:1;
+
+ /* Listener for switch fabric events */
+ struct notifier_block nb;
+
+ /*
+ * Give the switch driver somewhere to hang its private data
+ * structure.
+ */
+ void *priv;
+
+ void *tagger_data;
+
+ /*
+ * Configuration data for this switch.
+ */
+ struct dsa_chip_data *cd;
+
+ /*
+ * The switch operations.
+ */
+ const struct dsa_switch_ops *ops;
+
+ /*
+ * Slave mii_bus and devices for the individual ports.
+ */
+ u32 phys_mii_mask;
+ struct mii_bus *slave_mii_bus;
+
+ /* Ageing Time limits in msecs */
+ unsigned int ageing_time_min;
+ unsigned int ageing_time_max;
+
+ /* Storage for drivers using tag_8021q */
+ struct dsa_8021q_context *tag_8021q_ctx;
+
+ /* devlink used to represent this switch device */
+ struct devlink *devlink;
+
+ /* Number of switch port queues */
+ unsigned int num_tx_queues;
+
+ /* Drivers that benefit from having an ID associated with each
+ * offloaded LAG should set this to the maximum number of
+ * supported IDs. DSA will then maintain a mapping of _at
+ * least_ these many IDs, accessible to drivers via
+ * dsa_lag_id().
+ */
+ unsigned int num_lag_ids;
+
+ /* Drivers that support bridge forwarding offload or FDB isolation
+ * should set this to the maximum number of bridges spanning the same
+ * switch tree (or all trees, in the case of cross-tree bridging
+ * support) that can be offloaded.
+ */
+ unsigned int max_num_bridges;
+
+ unsigned int num_ports;
+};
+
+static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
+{
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dp->ds == ds && dp->index == p)
+ return dp;
+
+ return NULL;
+}
+
+static inline bool dsa_port_is_dsa(struct dsa_port *port)
+{
+ return port->type == DSA_PORT_TYPE_DSA;
+}
+
+static inline bool dsa_port_is_cpu(struct dsa_port *port)
+{
+ return port->type == DSA_PORT_TYPE_CPU;
+}
+
+static inline bool dsa_port_is_user(struct dsa_port *dp)
+{
+ return dp->type == DSA_PORT_TYPE_USER;
+}
+
+static inline bool dsa_port_is_unused(struct dsa_port *dp)
+{
+ return dp->type == DSA_PORT_TYPE_UNUSED;
+}
+
+static inline bool dsa_port_master_is_operational(struct dsa_port *dp)
+{
+ return dsa_port_is_cpu(dp) && dp->master_admin_up &&
+ dp->master_oper_up;
+}
+
+static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
+{
+ return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
+}
+
+static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
+{
+ return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_CPU;
+}
+
+static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
+{
+ return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_DSA;
+}
+
+static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
+{
+ return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER;
+}
+
+#define dsa_tree_for_each_user_port(_dp, _dst) \
+ list_for_each_entry((_dp), &(_dst)->ports, list) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_tree_for_each_user_port_continue_reverse(_dp, _dst) \
+ list_for_each_entry_continue_reverse((_dp), &(_dst)->ports, list) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_tree_for_each_cpu_port(_dp, _dst) \
+ list_for_each_entry((_dp), &(_dst)->ports, list) \
+ if (dsa_port_is_cpu((_dp)))
+
+#define dsa_switch_for_each_port(_dp, _ds) \
+ list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_port_safe(_dp, _next, _ds) \
+ list_for_each_entry_safe((_dp), (_next), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_port_continue_reverse(_dp, _ds) \
+ list_for_each_entry_continue_reverse((_dp), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_available_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (!dsa_port_is_unused((_dp)))
+
+#define dsa_switch_for_each_user_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_switch_for_each_cpu_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (dsa_port_is_cpu((_dp)))
+
+#define dsa_switch_for_each_cpu_port_continue_reverse(_dp, _ds) \
+ dsa_switch_for_each_port_continue_reverse((_dp), (_ds)) \
+ if (dsa_port_is_cpu((_dp)))
+
+static inline u32 dsa_user_ports(struct dsa_switch *ds)
+{
+ struct dsa_port *dp;
+ u32 mask = 0;
+
+ dsa_switch_for_each_user_port(dp, ds)
+ mask |= BIT(dp->index);
+
+ return mask;
+}
+
+static inline u32 dsa_cpu_ports(struct dsa_switch *ds)
+{
+ struct dsa_port *cpu_dp;
+ u32 mask = 0;
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds)
+ mask |= BIT(cpu_dp->index);
+
+ return mask;
+}
+
+/* Return the local port used to reach an arbitrary switch device */
+static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device)
+{
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_link *dl;
+
+ list_for_each_entry(dl, &dst->rtable, list)
+ if (dl->dp->ds == ds && dl->link_dp->ds->index == device)
+ return dl->dp->index;
+
+ return ds->num_ports;
+}
+
+/* Return the local port used to reach an arbitrary switch port */
+static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
+ int port)
+{
+ if (device == ds->index)
+ return port;
+ else
+ return dsa_routing_port(ds, device);
+}
+
+/* Return the local port used to reach the dedicated CPU port */
+static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
+{
+ const struct dsa_port *dp = dsa_to_port(ds, port);
+ const struct dsa_port *cpu_dp = dp->cpu_dp;
+
+ if (!cpu_dp)
+ return port;
+
+ return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
+}
+
+/* Return true if this is the local port used to reach the CPU port */
+static inline bool dsa_is_upstream_port(struct dsa_switch *ds, int port)
+{
+ if (dsa_is_unused_port(ds, port))
+ return false;
+
+ return port == dsa_upstream_port(ds, port);
+}
+
+/* Return true if this is a DSA port leading away from the CPU */
+static inline bool dsa_is_downstream_port(struct dsa_switch *ds, int port)
+{
+ return dsa_is_dsa_port(ds, port) && !dsa_is_upstream_port(ds, port);
+}
+
+/* Return the local port used to reach the CPU port */
+static inline unsigned int dsa_switch_upstream_port(struct dsa_switch *ds)
+{
+ struct dsa_port *dp;
+
+ dsa_switch_for_each_available_port(dp, ds) {
+ return dsa_upstream_port(ds, dp->index);
+ }
+
+ return ds->num_ports;
+}
+
+/* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning
+ * that the routing port from @downstream_ds to @upstream_ds is also the port
+ * which @downstream_ds uses to reach its dedicated CPU.
+ */
+static inline bool dsa_switch_is_upstream_of(struct dsa_switch *upstream_ds,
+ struct dsa_switch *downstream_ds)
+{
+ int routing_port;
+
+ if (upstream_ds == downstream_ds)
+ return true;
+
+ routing_port = dsa_routing_port(downstream_ds, upstream_ds->index);
+
+ return dsa_is_upstream_port(downstream_ds, routing_port);
+}
+
+static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp)
+{
+ const struct dsa_switch *ds = dp->ds;
+
+ if (ds->vlan_filtering_is_global)
+ return ds->vlan_filtering;
+ else
+ return dp->vlan_filtering;
+}
+
+static inline unsigned int dsa_port_lag_id_get(struct dsa_port *dp)
+{
+ return dp->lag ? dp->lag->id : 0;
+}
+
+static inline struct net_device *dsa_port_lag_dev_get(struct dsa_port *dp)
+{
+ return dp->lag ? dp->lag->dev : NULL;
+}
+
+static inline bool dsa_port_offloads_lag(struct dsa_port *dp,
+ const struct dsa_lag *lag)
+{
+ return dsa_port_lag_dev_get(dp) == lag->dev;
+}
+
+static inline struct net_device *dsa_port_to_master(const struct dsa_port *dp)
+{
+ if (dp->cpu_port_in_lag)
+ return dsa_port_lag_dev_get(dp->cpu_dp);
+
+ return dp->cpu_dp->master;
+}
+
+static inline
+struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
+{
+ if (!dp->bridge)
+ return NULL;
+
+ if (dp->lag)
+ return dp->lag->dev;
+ else if (dp->hsr_dev)
+ return dp->hsr_dev;
+
+ return dp->slave;
+}
+
+static inline struct net_device *
+dsa_port_bridge_dev_get(const struct dsa_port *dp)
+{
+ return dp->bridge ? dp->bridge->dev : NULL;
+}
+
+static inline unsigned int dsa_port_bridge_num_get(struct dsa_port *dp)
+{
+ return dp->bridge ? dp->bridge->num : 0;
+}
+
+static inline bool dsa_port_bridge_same(const struct dsa_port *a,
+ const struct dsa_port *b)
+{
+ struct net_device *br_a = dsa_port_bridge_dev_get(a);
+ struct net_device *br_b = dsa_port_bridge_dev_get(b);
+
+ /* Standalone ports are not in the same bridge with one another */
+ return (!br_a || !br_b) ? false : (br_a == br_b);
+}
+
+static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
+ const struct net_device *dev)
+{
+ return dsa_port_to_bridge_port(dp) == dev;
+}
+
+static inline bool
+dsa_port_offloads_bridge_dev(struct dsa_port *dp,
+ const struct net_device *bridge_dev)
+{
+ /* DSA ports connected to a bridge, and event was emitted
+ * for the bridge.
+ */
+ return dsa_port_bridge_dev_get(dp) == bridge_dev;
+}
+
+static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
+ const struct dsa_bridge *bridge)
+{
+ return dsa_port_bridge_dev_get(dp) == bridge->dev;
+}
+
+/* Returns true if any port of this tree offloads the given net_device */
+static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
+ const struct net_device *dev)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_offloads_bridge_port(dp, dev))
+ return true;
+
+ return false;
+}
+
+/* Returns true if any port of this tree offloads the given bridge */
+static inline bool
+dsa_tree_offloads_bridge_dev(struct dsa_switch_tree *dst,
+ const struct net_device *bridge_dev)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_offloads_bridge_dev(dp, bridge_dev))
+ return true;
+
+ return false;
+}
+
+static inline bool dsa_port_tree_same(const struct dsa_port *a,
+ const struct dsa_port *b)
+{
+ return a->ds->dst == b->ds->dst;
+}
+
+typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
+ bool is_static, void *data);
+struct dsa_switch_ops {
+ /*
+ * Tagging protocol helpers called for the CPU ports and DSA links.
+ * @get_tag_protocol retrieves the initial tagging protocol and is
+ * mandatory. Switches which can operate using multiple tagging
+ * protocols should implement @change_tag_protocol and report in
+ * @get_tag_protocol the tagger in current use.
+ */
+ enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mprot);
+ int (*change_tag_protocol)(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto);
+ /*
+ * Method for switch drivers to connect to the tagging protocol driver
+ * in current use. The switch driver can provide handlers for certain
+ * types of packets for switch management.
+ */
+ int (*connect_tag_protocol)(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto);
+
+ int (*port_change_master)(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack);
+
+ /* Optional switch-wide initialization and destruction methods */
+ int (*setup)(struct dsa_switch *ds);
+ void (*teardown)(struct dsa_switch *ds);
+
+ /* Per-port initialization and destruction methods. Mandatory if the
+ * driver registers devlink port regions, optional otherwise.
+ */
+ int (*port_setup)(struct dsa_switch *ds, int port);
+ void (*port_teardown)(struct dsa_switch *ds, int port);
+
+ u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
+
+ /*
+ * Access to the switch's PHY registers.
+ */
+ int (*phy_read)(struct dsa_switch *ds, int port, int regnum);
+ int (*phy_write)(struct dsa_switch *ds, int port,
+ int regnum, u16 val);
+
+ /*
+ * Link state adjustment (called from libphy)
+ */
+ void (*adjust_link)(struct dsa_switch *ds, int port,
+ struct phy_device *phydev);
+ void (*fixed_link_update)(struct dsa_switch *ds, int port,
+ struct fixed_phy_status *st);
+
+ /*
+ * PHYLINK integration
+ */
+ void (*phylink_get_caps)(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
+ void (*phylink_validate)(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state);
+ struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds,
+ int port,
+ phy_interface_t iface);
+ int (*phylink_mac_link_state)(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state);
+ void (*phylink_mac_config)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+ void (*phylink_mac_an_restart)(struct dsa_switch *ds, int port);
+ void (*phylink_mac_link_down)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
+ void (*phylink_mac_link_up)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause);
+ void (*phylink_fixed_state)(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state);
+ /*
+ * Port statistics counters.
+ */
+ void (*get_strings)(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data);
+ void (*get_ethtool_stats)(struct dsa_switch *ds,
+ int port, uint64_t *data);
+ int (*get_sset_count)(struct dsa_switch *ds, int port, int sset);
+ void (*get_ethtool_phy_stats)(struct dsa_switch *ds,
+ int port, uint64_t *data);
+ void (*get_eth_phy_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats);
+ void (*get_eth_mac_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats);
+ void (*get_eth_ctrl_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+ void (*get_rmon_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges);
+ void (*get_stats64)(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s);
+ void (*get_pause_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats);
+ void (*self_test)(struct dsa_switch *ds, int port,
+ struct ethtool_test *etest, u64 *data);
+
+ /*
+ * ethtool Wake-on-LAN
+ */
+ void (*get_wol)(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *w);
+ int (*set_wol)(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *w);
+
+ /*
+ * ethtool timestamp info
+ */
+ int (*get_ts_info)(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *ts);
+
+ /*
+ * DCB ops
+ */
+ int (*port_get_default_prio)(struct dsa_switch *ds, int port);
+ int (*port_set_default_prio)(struct dsa_switch *ds, int port,
+ u8 prio);
+ int (*port_get_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp);
+ int (*port_add_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio);
+ int (*port_del_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio);
+
+ /*
+ * Suspend and resume
+ */
+ int (*suspend)(struct dsa_switch *ds);
+ int (*resume)(struct dsa_switch *ds);
+
+ /*
+ * Port enable/disable
+ */
+ int (*port_enable)(struct dsa_switch *ds, int port,
+ struct phy_device *phy);
+ void (*port_disable)(struct dsa_switch *ds, int port);
+
+ /*
+ * Port's MAC EEE settings
+ */
+ int (*set_mac_eee)(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e);
+ int (*get_mac_eee)(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e);
+
+ /* EEPROM access */
+ int (*get_eeprom_len)(struct dsa_switch *ds);
+ int (*get_eeprom)(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data);
+ int (*set_eeprom)(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data);
+
+ /*
+ * Register access.
+ */
+ int (*get_regs_len)(struct dsa_switch *ds, int port);
+ void (*get_regs)(struct dsa_switch *ds, int port,
+ struct ethtool_regs *regs, void *p);
+
+ /*
+ * Upper device tracking.
+ */
+ int (*port_prechangeupper)(struct dsa_switch *ds, int port,
+ struct netdev_notifier_changeupper_info *info);
+
+ /*
+ * Bridge integration
+ */
+ int (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs);
+ int (*port_bridge_join)(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack);
+ void (*port_bridge_leave)(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge);
+ void (*port_stp_state_set)(struct dsa_switch *ds, int port,
+ u8 state);
+ int (*port_mst_state_set)(struct dsa_switch *ds, int port,
+ const struct switchdev_mst_state *state);
+ void (*port_fast_age)(struct dsa_switch *ds, int port);
+ int (*port_vlan_fast_age)(struct dsa_switch *ds, int port, u16 vid);
+ int (*port_pre_bridge_flags)(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack);
+ int (*port_bridge_flags)(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack);
+ void (*port_set_host_flood)(struct dsa_switch *ds, int port,
+ bool uc, bool mc);
+
+ /*
+ * VLAN support
+ */
+ int (*port_vlan_filtering)(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack);
+ int (*port_vlan_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+ int (*port_vlan_del)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+ int (*vlan_msti_set)(struct dsa_switch *ds, struct dsa_bridge bridge,
+ const struct switchdev_vlan_msti *msti);
+
+ /*
+ * Forwarding database
+ */
+ int (*port_fdb_add)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+ int (*port_fdb_del)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+ int (*port_fdb_dump)(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+ int (*lag_fdb_add)(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+ int (*lag_fdb_del)(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+
+ /*
+ * Multicast database
+ */
+ int (*port_mdb_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+ int (*port_mdb_del)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+ /*
+ * RXNFC
+ */
+ int (*get_rxnfc)(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs);
+ int (*set_rxnfc)(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc);
+
+ /*
+ * TC integration
+ */
+ int (*cls_flower_add)(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+ int (*cls_flower_del)(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+ int (*cls_flower_stats)(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+ int (*port_mirror_add)(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+ void (*port_mirror_del)(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+ int (*port_policer_add)(struct dsa_switch *ds, int port,
+ struct dsa_mall_policer_tc_entry *policer);
+ void (*port_policer_del)(struct dsa_switch *ds, int port);
+ int (*port_setup_tc)(struct dsa_switch *ds, int port,
+ enum tc_setup_type type, void *type_data);
+
+ /*
+ * Cross-chip operations
+ */
+ int (*crosschip_bridge_join)(struct dsa_switch *ds, int tree_index,
+ int sw_index, int port,
+ struct dsa_bridge bridge,
+ struct netlink_ext_ack *extack);
+ void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index,
+ int sw_index, int port,
+ struct dsa_bridge bridge);
+ int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
+ int port);
+ int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
+ int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
+ int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
+ int port, struct dsa_lag lag);
+
+ /*
+ * PTP functionality
+ */
+ int (*port_hwtstamp_get)(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+ int (*port_hwtstamp_set)(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+ void (*port_txtstamp)(struct dsa_switch *ds, int port,
+ struct sk_buff *skb);
+ bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type);
+
+ /* Devlink parameters, etc */
+ int (*devlink_param_get)(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+ int (*devlink_param_set)(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+ int (*devlink_info_get)(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+ int (*devlink_sb_pool_get)(struct dsa_switch *ds,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+ int (*devlink_sb_pool_set)(struct dsa_switch *ds, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack);
+ int (*devlink_sb_port_pool_get)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+ int (*devlink_sb_port_pool_set)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold,
+ struct netlink_ext_ack *extack);
+ int (*devlink_sb_tc_pool_bind_get)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+ int (*devlink_sb_tc_pool_bind_set)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack);
+ int (*devlink_sb_occ_snapshot)(struct dsa_switch *ds,
+ unsigned int sb_index);
+ int (*devlink_sb_occ_max_clear)(struct dsa_switch *ds,
+ unsigned int sb_index);
+ int (*devlink_sb_occ_port_pool_get)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+ int (*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
+
+ /*
+ * MTU change functionality. Switches can also adjust their MRU through
+ * this method. By MTU, one understands the SDU (L2 payload) length.
+ * If the switch needs to account for the DSA tag on the CPU port, this
+ * method needs to do so privately.
+ */
+ int (*port_change_mtu)(struct dsa_switch *ds, int port,
+ int new_mtu);
+ int (*port_max_mtu)(struct dsa_switch *ds, int port);
+
+ /*
+ * LAG integration
+ */
+ int (*port_lag_change)(struct dsa_switch *ds, int port);
+ int (*port_lag_join)(struct dsa_switch *ds, int port,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
+ int (*port_lag_leave)(struct dsa_switch *ds, int port,
+ struct dsa_lag lag);
+
+ /*
+ * HSR integration
+ */
+ int (*port_hsr_join)(struct dsa_switch *ds, int port,
+ struct net_device *hsr);
+ int (*port_hsr_leave)(struct dsa_switch *ds, int port,
+ struct net_device *hsr);
+
+ /*
+ * MRP integration
+ */
+ int (*port_mrp_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_mrp *mrp);
+ int (*port_mrp_del)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_mrp *mrp);
+ int (*port_mrp_add_ring_role)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp);
+ int (*port_mrp_del_ring_role)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp);
+
+ /*
+ * tag_8021q operations
+ */
+ int (*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags);
+ int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
+
+ /*
+ * DSA master tracking operations
+ */
+ void (*master_state_change)(struct dsa_switch *ds,
+ const struct net_device *master,
+ bool operational);
+};
+
+#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
+ DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, \
+ dsa_devlink_param_get, dsa_devlink_param_set, NULL)
+
+int dsa_devlink_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int dsa_devlink_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int dsa_devlink_params_register(struct dsa_switch *ds,
+ const struct devlink_param *params,
+ size_t params_count);
+void dsa_devlink_params_unregister(struct dsa_switch *ds,
+ const struct devlink_param *params,
+ size_t params_count);
+int dsa_devlink_resource_register(struct dsa_switch *ds,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params);
+
+void dsa_devlink_resources_unregister(struct dsa_switch *ds);
+
+void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv);
+void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
+ u64 resource_id);
+struct devlink_region *
+dsa_devlink_region_create(struct dsa_switch *ds,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+struct devlink_region *
+dsa_devlink_port_region_create(struct dsa_switch *ds,
+ int port,
+ const struct devlink_port_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+void dsa_devlink_region_destroy(struct devlink_region *region);
+
+struct dsa_port *dsa_port_from_netdev(struct net_device *netdev);
+
+struct dsa_devlink_priv {
+ struct dsa_switch *ds;
+};
+
+static inline struct dsa_switch *dsa_devlink_to_ds(struct devlink *dl)
+{
+ struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
+
+ return dl_priv->ds;
+}
+
+static inline
+struct dsa_switch *dsa_devlink_port_to_ds(struct devlink_port *port)
+{
+ struct devlink *dl = port->devlink;
+ struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
+
+ return dl_priv->ds;
+}
+
+static inline int dsa_devlink_port_to_port(struct devlink_port *port)
+{
+ return port->index;
+}
+
+struct dsa_switch_driver {
+ struct list_head list;
+ const struct dsa_switch_ops *ops;
+};
+
+struct net_device *dsa_dev_to_net_device(struct device *dev);
+
+bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+
+/* Keep inline for faster access in hot path */
+static inline bool netdev_uses_dsa(const struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ return dev->dsa_ptr && dev->dsa_ptr->rcv;
+#endif
+ return false;
+}
+
+/* All DSA tags that push the EtherType to the right (basically all except tail
+ * tags, which don't break dissection) can be treated the same from the
+ * perspective of the flow dissector.
+ *
+ * We need to return:
+ * - offset: the (B - A) difference between:
+ * A. the position of the real EtherType and
+ * B. the current skb->data (aka ETH_HLEN bytes into the frame, aka 2 bytes
+ * after the normal EtherType was supposed to be)
+ * The offset in bytes is exactly equal to the tagger overhead (and half of
+ * that, in __be16 shorts).
+ *
+ * - proto: the value of the real EtherType.
+ */
+static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb,
+ __be16 *proto, int *offset)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops;
+ int tag_len = ops->needed_headroom;
+
+ *offset = tag_len;
+ *proto = ((__be16 *)skb->data)[(tag_len / 2) - 1];
+#endif
+}
+
+#if IS_ENABLED(CONFIG_NET_DSA)
+static inline int __dsa_netdevice_ops_check(struct net_device *dev)
+{
+ int err = -EOPNOTSUPP;
+
+ if (!dev->dsa_ptr)
+ return err;
+
+ if (!dev->dsa_ptr->netdev_ops)
+ return err;
+
+ return 0;
+}
+
+static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ const struct dsa_netdevice_ops *ops;
+ int err;
+
+ err = __dsa_netdevice_ops_check(dev);
+ if (err)
+ return err;
+
+ ops = dev->dsa_ptr->netdev_ops;
+
+ return ops->ndo_eth_ioctl(dev, ifr, cmd);
+}
+#else
+static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+void dsa_unregister_switch(struct dsa_switch *ds);
+int dsa_register_switch(struct dsa_switch *ds);
+void dsa_switch_shutdown(struct dsa_switch *ds);
+struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
+void dsa_flush_workqueue(void);
+#ifdef CONFIG_PM_SLEEP
+int dsa_switch_suspend(struct dsa_switch *ds);
+int dsa_switch_resume(struct dsa_switch *ds);
+#else
+static inline int dsa_switch_suspend(struct dsa_switch *ds)
+{
+ return 0;
+}
+static inline int dsa_switch_resume(struct dsa_switch *ds)
+{
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#if IS_ENABLED(CONFIG_NET_DSA)
+bool dsa_slave_dev_check(const struct net_device *dev);
+#else
+static inline bool dsa_slave_dev_check(const struct net_device *dev)
+{
+ return false;
+}
+#endif
+
+netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
+void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up);
+
+struct dsa_tag_driver {
+ const struct dsa_device_ops *ops;
+ struct list_head list;
+ struct module *owner;
+};
+
+void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
+ unsigned int count,
+ struct module *owner);
+void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
+ unsigned int count);
+
+#define dsa_tag_driver_module_drivers(__dsa_tag_drivers_array, __count) \
+static int __init dsa_tag_driver_module_init(void) \
+{ \
+ dsa_tag_drivers_register(__dsa_tag_drivers_array, __count, \
+ THIS_MODULE); \
+ return 0; \
+} \
+module_init(dsa_tag_driver_module_init); \
+ \
+static void __exit dsa_tag_driver_module_exit(void) \
+{ \
+ dsa_tag_drivers_unregister(__dsa_tag_drivers_array, __count); \
+} \
+module_exit(dsa_tag_driver_module_exit)
+
+/**
+ * module_dsa_tag_drivers() - Helper macro for registering DSA tag
+ * drivers
+ * @__ops_array: Array of tag driver structures
+ *
+ * Helper macro for DSA tag drivers which do not do anything special
+ * in module init/exit. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit().
+ */
+#define module_dsa_tag_drivers(__ops_array) \
+dsa_tag_driver_module_drivers(__ops_array, ARRAY_SIZE(__ops_array))
+
+#define DSA_TAG_DRIVER_NAME(__ops) dsa_tag_driver ## _ ## __ops
+
+/* Create a static structure we can build a linked list of dsa_tag
+ * drivers
+ */
+#define DSA_TAG_DRIVER(__ops) \
+static struct dsa_tag_driver DSA_TAG_DRIVER_NAME(__ops) = { \
+ .ops = &__ops, \
+}
+
+/**
+ * module_dsa_tag_driver() - Helper macro for registering a single DSA tag
+ * driver
+ * @__ops: Single tag driver structures
+ *
+ * Helper macro for DSA tag drivers which do not do anything special
+ * in module init/exit. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit().
+ */
+#define module_dsa_tag_driver(__ops) \
+DSA_TAG_DRIVER(__ops); \
+ \
+static struct dsa_tag_driver *dsa_tag_driver_array[] = { \
+ &DSA_TAG_DRIVER_NAME(__ops) \
+}; \
+module_dsa_tag_drivers(dsa_tag_driver_array)
+#endif
+
diff --git a/include/net/dsfield.h b/include/net/dsfield.h
new file mode 100644
index 000000000..a59a57ffc
--- /dev/null
+++ b/include/net/dsfield.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* include/net/dsfield.h - Manipulation of the Differentiated Services field */
+
+/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
+
+
+#ifndef __NET_DSFIELD_H
+#define __NET_DSFIELD_H
+
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <asm/byteorder.h>
+
+
+static inline __u8 ipv4_get_dsfield(const struct iphdr *iph)
+{
+ return iph->tos;
+}
+
+
+static inline __u8 ipv6_get_dsfield(const struct ipv6hdr *ipv6h)
+{
+ return ntohs(*(__force const __be16 *)ipv6h) >> 4;
+}
+
+
+static inline void ipv4_change_dsfield(struct iphdr *iph,__u8 mask,
+ __u8 value)
+{
+ __u32 check = ntohs((__force __be16)iph->check);
+ __u8 dsfield;
+
+ dsfield = (iph->tos & mask) | value;
+ check += iph->tos;
+ if ((check+1) >> 16) check = (check+1) & 0xffff;
+ check -= dsfield;
+ check += check >> 16; /* adjust carry */
+ iph->check = (__force __sum16)htons(check);
+ iph->tos = dsfield;
+}
+
+
+static inline void ipv6_change_dsfield(struct ipv6hdr *ipv6h,__u8 mask,
+ __u8 value)
+{
+ __be16 *p = (__force __be16 *)ipv6h;
+
+ *p = (*p & htons((((u16)mask << 4) | 0xf00f))) | htons((u16)value << 4);
+}
+
+
+#endif
diff --git a/include/net/dst.h b/include/net/dst.h
new file mode 100644
index 000000000..d67fda89c
--- /dev/null
+++ b/include/net/dst.h
@@ -0,0 +1,560 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * net/dst.h Protocol independent destination cache definitions.
+ *
+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *
+ */
+
+#ifndef _NET_DST_H
+#define _NET_DST_H
+
+#include <net/dst_ops.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/rcupdate.h>
+#include <linux/bug.h>
+#include <linux/jiffies.h>
+#include <linux/refcount.h>
+#include <net/neighbour.h>
+#include <asm/processor.h>
+#include <linux/indirect_call_wrapper.h>
+
+struct sk_buff;
+
+struct dst_entry {
+ struct net_device *dev;
+ struct dst_ops *ops;
+ unsigned long _metrics;
+ unsigned long expires;
+#ifdef CONFIG_XFRM
+ struct xfrm_state *xfrm;
+#else
+ void *__pad1;
+#endif
+ int (*input)(struct sk_buff *);
+ int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
+
+ unsigned short flags;
+#define DST_NOXFRM 0x0002
+#define DST_NOPOLICY 0x0004
+#define DST_NOCOUNT 0x0008
+#define DST_FAKE_RTABLE 0x0010
+#define DST_XFRM_TUNNEL 0x0020
+#define DST_XFRM_QUEUE 0x0040
+#define DST_METADATA 0x0080
+
+ /* A non-zero value of dst->obsolete forces by-hand validation
+ * of the route entry. Positive values are set by the generic
+ * dst layer to indicate that the entry has been forcefully
+ * destroyed.
+ *
+ * Negative values are used by the implementation layer code to
+ * force invocation of the dst_ops->check() method.
+ */
+ short obsolete;
+#define DST_OBSOLETE_NONE 0
+#define DST_OBSOLETE_DEAD 2
+#define DST_OBSOLETE_FORCE_CHK -1
+#define DST_OBSOLETE_KILL -2
+ unsigned short header_len; /* more space at head required */
+ unsigned short trailer_len; /* space to reserve at tail */
+
+ /*
+ * __refcnt wants to be on a different cache line from
+ * input/output/ops or performance tanks badly
+ */
+#ifdef CONFIG_64BIT
+ atomic_t __refcnt; /* 64-bit offset 64 */
+#endif
+ int __use;
+ unsigned long lastuse;
+ struct lwtunnel_state *lwtstate;
+ struct rcu_head rcu_head;
+ short error;
+ short __pad;
+ __u32 tclassid;
+#ifndef CONFIG_64BIT
+ atomic_t __refcnt; /* 32-bit offset 64 */
+#endif
+ netdevice_tracker dev_tracker;
+};
+
+struct dst_metrics {
+ u32 metrics[RTAX_MAX];
+ refcount_t refcnt;
+} __aligned(4); /* Low pointer bits contain DST_METRICS_FLAGS */
+extern const struct dst_metrics dst_default_metrics;
+
+u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
+
+#define DST_METRICS_READ_ONLY 0x1UL
+#define DST_METRICS_REFCOUNTED 0x2UL
+#define DST_METRICS_FLAGS 0x3UL
+#define __DST_METRICS_PTR(Y) \
+ ((u32 *)((Y) & ~DST_METRICS_FLAGS))
+#define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
+
+static inline bool dst_metrics_read_only(const struct dst_entry *dst)
+{
+ return dst->_metrics & DST_METRICS_READ_ONLY;
+}
+
+void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
+
+static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
+{
+ unsigned long val = dst->_metrics;
+ if (!(val & DST_METRICS_READ_ONLY))
+ __dst_destroy_metrics_generic(dst, val);
+}
+
+static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
+{
+ unsigned long p = dst->_metrics;
+
+ BUG_ON(!p);
+
+ if (p & DST_METRICS_READ_ONLY)
+ return dst->ops->cow_metrics(dst, p);
+ return __DST_METRICS_PTR(p);
+}
+
+/* This may only be invoked before the entry has reached global
+ * visibility.
+ */
+static inline void dst_init_metrics(struct dst_entry *dst,
+ const u32 *src_metrics,
+ bool read_only)
+{
+ dst->_metrics = ((unsigned long) src_metrics) |
+ (read_only ? DST_METRICS_READ_ONLY : 0);
+}
+
+static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
+{
+ u32 *dst_metrics = dst_metrics_write_ptr(dest);
+
+ if (dst_metrics) {
+ u32 *src_metrics = DST_METRICS_PTR(src);
+
+ memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
+ }
+}
+
+static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
+{
+ return DST_METRICS_PTR(dst);
+}
+
+static inline u32
+dst_metric_raw(const struct dst_entry *dst, const int metric)
+{
+ u32 *p = DST_METRICS_PTR(dst);
+
+ return p[metric-1];
+}
+
+static inline u32
+dst_metric(const struct dst_entry *dst, const int metric)
+{
+ WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
+ metric == RTAX_ADVMSS ||
+ metric == RTAX_MTU);
+ return dst_metric_raw(dst, metric);
+}
+
+static inline u32
+dst_metric_advmss(const struct dst_entry *dst)
+{
+ u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
+
+ if (!advmss)
+ advmss = dst->ops->default_advmss(dst);
+
+ return advmss;
+}
+
+static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
+{
+ u32 *p = dst_metrics_write_ptr(dst);
+
+ if (p)
+ p[metric-1] = val;
+}
+
+/* Kernel-internal feature bits that are unallocated in user space. */
+#define DST_FEATURE_ECN_CA (1U << 31)
+
+#define DST_FEATURE_MASK (DST_FEATURE_ECN_CA)
+#define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
+
+static inline u32
+dst_feature(const struct dst_entry *dst, u32 feature)
+{
+ return dst_metric(dst, RTAX_FEATURES) & feature;
+}
+
+INDIRECT_CALLABLE_DECLARE(unsigned int ip6_mtu(const struct dst_entry *));
+INDIRECT_CALLABLE_DECLARE(unsigned int ipv4_mtu(const struct dst_entry *));
+static inline u32 dst_mtu(const struct dst_entry *dst)
+{
+ return INDIRECT_CALL_INET(dst->ops->mtu, ip6_mtu, ipv4_mtu, dst);
+}
+
+/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
+static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
+{
+ return msecs_to_jiffies(dst_metric(dst, metric));
+}
+
+static inline u32
+dst_allfrag(const struct dst_entry *dst)
+{
+ int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
+ return ret;
+}
+
+static inline int
+dst_metric_locked(const struct dst_entry *dst, int metric)
+{
+ return dst_metric(dst, RTAX_LOCK) & (1 << metric);
+}
+
+static inline void dst_hold(struct dst_entry *dst)
+{
+ /*
+ * If your kernel compilation stops here, please check
+ * the placement of __refcnt in struct dst_entry
+ */
+ BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
+ WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0);
+}
+
+static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
+{
+ if (unlikely(time != dst->lastuse)) {
+ dst->__use++;
+ dst->lastuse = time;
+ }
+}
+
+static inline struct dst_entry *dst_clone(struct dst_entry *dst)
+{
+ if (dst)
+ dst_hold(dst);
+ return dst;
+}
+
+void dst_release(struct dst_entry *dst);
+
+void dst_release_immediate(struct dst_entry *dst);
+
+static inline void refdst_drop(unsigned long refdst)
+{
+ if (!(refdst & SKB_DST_NOREF))
+ dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
+}
+
+/**
+ * skb_dst_drop - drops skb dst
+ * @skb: buffer
+ *
+ * Drops dst reference count if a reference was taken.
+ */
+static inline void skb_dst_drop(struct sk_buff *skb)
+{
+ if (skb->_skb_refdst) {
+ refdst_drop(skb->_skb_refdst);
+ skb->_skb_refdst = 0UL;
+ }
+}
+
+static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
+{
+ nskb->slow_gro |= !!refdst;
+ nskb->_skb_refdst = refdst;
+ if (!(nskb->_skb_refdst & SKB_DST_NOREF))
+ dst_clone(skb_dst(nskb));
+}
+
+static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
+{
+ __skb_dst_copy(nskb, oskb->_skb_refdst);
+}
+
+/**
+ * dst_hold_safe - Take a reference on a dst if possible
+ * @dst: pointer to dst entry
+ *
+ * This helper returns false if it could not safely
+ * take a reference on a dst.
+ */
+static inline bool dst_hold_safe(struct dst_entry *dst)
+{
+ return atomic_inc_not_zero(&dst->__refcnt);
+}
+
+/**
+ * skb_dst_force - makes sure skb dst is refcounted
+ * @skb: buffer
+ *
+ * If dst is not yet refcounted and not destroyed, grab a ref on it.
+ * Returns true if dst is refcounted.
+ */
+static inline bool skb_dst_force(struct sk_buff *skb)
+{
+ if (skb_dst_is_noref(skb)) {
+ struct dst_entry *dst = skb_dst(skb);
+
+ WARN_ON(!rcu_read_lock_held());
+ if (!dst_hold_safe(dst))
+ dst = NULL;
+
+ skb->_skb_refdst = (unsigned long)dst;
+ skb->slow_gro |= !!dst;
+ }
+
+ return skb->_skb_refdst != 0UL;
+}
+
+
+/**
+ * __skb_tunnel_rx - prepare skb for rx reinsert
+ * @skb: buffer
+ * @dev: tunnel device
+ * @net: netns for packet i/o
+ *
+ * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
+ * so make some cleanups. (no accounting done)
+ */
+static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+ struct net *net)
+{
+ skb->dev = dev;
+
+ /*
+ * Clear hash so that we can recalulate the hash for the
+ * encapsulated packet, unless we have already determine the hash
+ * over the L4 4-tuple.
+ */
+ skb_clear_hash_if_not_l4(skb);
+ skb_set_queue_mapping(skb, 0);
+ skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
+}
+
+/**
+ * skb_tunnel_rx - prepare skb for rx reinsert
+ * @skb: buffer
+ * @dev: tunnel device
+ * @net: netns for packet i/o
+ *
+ * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
+ * so make some cleanups, and perform accounting.
+ * Note: this accounting is not SMP safe.
+ */
+static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+ struct net *net)
+{
+ DEV_STATS_INC(dev, rx_packets);
+ DEV_STATS_ADD(dev, rx_bytes, skb->len);
+ __skb_tunnel_rx(skb, dev, net);
+}
+
+static inline u32 dst_tclassid(const struct sk_buff *skb)
+{
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ const struct dst_entry *dst;
+
+ dst = skb_dst(skb);
+ if (dst)
+ return dst->tclassid;
+#endif
+ return 0;
+}
+
+int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+static inline int dst_discard(struct sk_buff *skb)
+{
+ return dst_discard_out(&init_net, skb->sk, skb);
+}
+void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
+ int initial_obsolete, unsigned short flags);
+void dst_init(struct dst_entry *dst, struct dst_ops *ops,
+ struct net_device *dev, int initial_ref, int initial_obsolete,
+ unsigned short flags);
+struct dst_entry *dst_destroy(struct dst_entry *dst);
+void dst_dev_put(struct dst_entry *dst);
+
+static inline void dst_confirm(struct dst_entry *dst)
+{
+}
+
+static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
+{
+ struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
+ return IS_ERR(n) ? NULL : n;
+}
+
+static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
+ struct sk_buff *skb)
+{
+ struct neighbour *n;
+
+ if (WARN_ON_ONCE(!dst->ops->neigh_lookup))
+ return NULL;
+
+ n = dst->ops->neigh_lookup(dst, skb, NULL);
+
+ return IS_ERR(n) ? NULL : n;
+}
+
+static inline void dst_confirm_neigh(const struct dst_entry *dst,
+ const void *daddr)
+{
+ if (dst->ops->confirm_neigh)
+ dst->ops->confirm_neigh(dst, daddr);
+}
+
+static inline void dst_link_failure(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ if (dst && dst->ops && dst->ops->link_failure)
+ dst->ops->link_failure(skb);
+}
+
+static inline void dst_set_expires(struct dst_entry *dst, int timeout)
+{
+ unsigned long expires = jiffies + timeout;
+
+ if (expires == 0)
+ expires = 1;
+
+ if (dst->expires == 0 || time_before(expires, dst->expires))
+ dst->expires = expires;
+}
+
+INDIRECT_CALLABLE_DECLARE(int ip6_output(struct net *, struct sock *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *,
+ struct sk_buff *));
+/* Output packet to network from transport. */
+static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ return INDIRECT_CALL_INET(skb_dst(skb)->output,
+ ip6_output, ip_output,
+ net, sk, skb);
+}
+
+INDIRECT_CALLABLE_DECLARE(int ip6_input(struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int ip_local_deliver(struct sk_buff *));
+/* Input packet from network to transport. */
+static inline int dst_input(struct sk_buff *skb)
+{
+ return INDIRECT_CALL_INET(skb_dst(skb)->input,
+ ip6_input, ip_local_deliver, skb);
+}
+
+INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *,
+ u32));
+INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
+ u32));
+static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
+{
+ if (dst->obsolete)
+ dst = INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check,
+ ipv4_dst_check, dst, cookie);
+ return dst;
+}
+
+/* Flags for xfrm_lookup flags argument. */
+enum {
+ XFRM_LOOKUP_ICMP = 1 << 0,
+ XFRM_LOOKUP_QUEUE = 1 << 1,
+ XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
+};
+
+struct flowi;
+#ifndef CONFIG_XFRM
+static inline struct dst_entry *xfrm_lookup(struct net *net,
+ struct dst_entry *dst_orig,
+ const struct flowi *fl,
+ const struct sock *sk,
+ int flags)
+{
+ return dst_orig;
+}
+
+static inline struct dst_entry *
+xfrm_lookup_with_ifid(struct net *net, struct dst_entry *dst_orig,
+ const struct flowi *fl, const struct sock *sk,
+ int flags, u32 if_id)
+{
+ return dst_orig;
+}
+
+static inline struct dst_entry *xfrm_lookup_route(struct net *net,
+ struct dst_entry *dst_orig,
+ const struct flowi *fl,
+ const struct sock *sk,
+ int flags)
+{
+ return dst_orig;
+}
+
+static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+{
+ return NULL;
+}
+
+#else
+struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+ const struct flowi *fl, const struct sock *sk,
+ int flags);
+
+struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
+ struct dst_entry *dst_orig,
+ const struct flowi *fl,
+ const struct sock *sk, int flags,
+ u32 if_id);
+
+struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
+ const struct flowi *fl, const struct sock *sk,
+ int flags);
+
+/* skb attached with this dst needs transformation if dst->xfrm is valid */
+static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+{
+ return dst->xfrm;
+}
+#endif
+
+static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (dst && dst->ops->update_pmtu)
+ dst->ops->update_pmtu(dst, NULL, skb, mtu, true);
+}
+
+/* update dst pmtu but not do neighbor confirm */
+static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (dst && dst->ops->update_pmtu)
+ dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
+}
+
+struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
+void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu, bool confirm_neigh);
+void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb);
+u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old);
+struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr);
+unsigned int dst_blackhole_mtu(const struct dst_entry *dst);
+
+#endif /* _NET_DST_H */
diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h
new file mode 100644
index 000000000..df6622a5f
--- /dev/null
+++ b/include/net/dst_cache.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_DST_CACHE_H
+#define _NET_DST_CACHE_H
+
+#include <linux/jiffies.h>
+#include <net/dst.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ip6_fib.h>
+#endif
+
+struct dst_cache {
+ struct dst_cache_pcpu __percpu *cache;
+ unsigned long reset_ts;
+};
+
+/**
+ * dst_cache_get - perform cache lookup
+ * @dst_cache: the cache
+ *
+ * The caller should use dst_cache_get_ip4() if it need to retrieve the
+ * source address to be used when xmitting to the cached dst.
+ * local BH must be disabled.
+ */
+struct dst_entry *dst_cache_get(struct dst_cache *dst_cache);
+
+/**
+ * dst_cache_get_ip4 - perform cache lookup and fetch ipv4 source address
+ * @dst_cache: the cache
+ * @saddr: return value for the retrieved source address
+ *
+ * local BH must be disabled.
+ */
+struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr);
+
+/**
+ * dst_cache_set_ip4 - store the ipv4 dst into the cache
+ * @dst_cache: the cache
+ * @dst: the entry to be cached
+ * @saddr: the source address to be stored inside the cache
+ *
+ * local BH must be disabled.
+ */
+void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst,
+ __be32 saddr);
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+/**
+ * dst_cache_set_ip6 - store the ipv6 dst into the cache
+ * @dst_cache: the cache
+ * @dst: the entry to be cached
+ * @saddr: the source address to be stored inside the cache
+ *
+ * local BH must be disabled.
+ */
+void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst,
+ const struct in6_addr *saddr);
+
+/**
+ * dst_cache_get_ip6 - perform cache lookup and fetch ipv6 source address
+ * @dst_cache: the cache
+ * @saddr: return value for the retrieved source address
+ *
+ * local BH must be disabled.
+ */
+struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache,
+ struct in6_addr *saddr);
+#endif
+
+/**
+ * dst_cache_reset - invalidate the cache contents
+ * @dst_cache: the cache
+ *
+ * This does not free the cached dst to avoid races and contentions.
+ * the dst will be freed on later cache lookup.
+ */
+static inline void dst_cache_reset(struct dst_cache *dst_cache)
+{
+ dst_cache->reset_ts = jiffies;
+}
+
+/**
+ * dst_cache_reset_now - invalidate the cache contents immediately
+ * @dst_cache: the cache
+ *
+ * The caller must be sure there are no concurrent users, as this frees
+ * all dst_cache users immediately, rather than waiting for the next
+ * per-cpu usage like dst_cache_reset does. Most callers should use the
+ * higher speed lazily-freed dst_cache_reset function instead.
+ */
+void dst_cache_reset_now(struct dst_cache *dst_cache);
+
+/**
+ * dst_cache_init - initialize the cache, allocating the required storage
+ * @dst_cache: the cache
+ * @gfp: allocation flags
+ */
+int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp);
+
+/**
+ * dst_cache_destroy - empty the cache and free the allocated storage
+ * @dst_cache: the cache
+ *
+ * No synchronization is enforced: it must be called only when the cache
+ * is unsed.
+ */
+void dst_cache_destroy(struct dst_cache *dst_cache);
+
+#endif
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
new file mode 100644
index 000000000..a454cf432
--- /dev/null
+++ b/include/net/dst_metadata.h
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_DST_METADATA_H
+#define __NET_DST_METADATA_H 1
+
+#include <linux/skbuff.h>
+#include <net/ip_tunnels.h>
+#include <net/macsec.h>
+#include <net/dst.h>
+
+enum metadata_type {
+ METADATA_IP_TUNNEL,
+ METADATA_HW_PORT_MUX,
+ METADATA_MACSEC,
+ METADATA_XFRM,
+};
+
+struct hw_port_info {
+ struct net_device *lower_dev;
+ u32 port_id;
+};
+
+struct macsec_info {
+ sci_t sci;
+};
+
+struct xfrm_md_info {
+ u32 if_id;
+ int link;
+};
+
+struct metadata_dst {
+ struct dst_entry dst;
+ enum metadata_type type;
+ union {
+ struct ip_tunnel_info tun_info;
+ struct hw_port_info port_info;
+ struct macsec_info macsec_info;
+ struct xfrm_md_info xfrm_info;
+ } u;
+};
+
+static inline struct metadata_dst *skb_metadata_dst(const struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = (struct metadata_dst *) skb_dst(skb);
+
+ if (md_dst && md_dst->dst.flags & DST_METADATA)
+ return md_dst;
+
+ return NULL;
+}
+
+static inline struct ip_tunnel_info *
+skb_tunnel_info(const struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ struct dst_entry *dst;
+
+ if (md_dst && md_dst->type == METADATA_IP_TUNNEL)
+ return &md_dst->u.tun_info;
+
+ dst = skb_dst(skb);
+ if (dst && dst->lwtstate &&
+ (dst->lwtstate->type == LWTUNNEL_ENCAP_IP ||
+ dst->lwtstate->type == LWTUNNEL_ENCAP_IP6))
+ return lwt_tun_info(dst->lwtstate);
+
+ return NULL;
+}
+
+static inline struct xfrm_md_info *lwt_xfrm_info(struct lwtunnel_state *lwt)
+{
+ return (struct xfrm_md_info *)lwt->data;
+}
+
+static inline struct xfrm_md_info *skb_xfrm_md_info(const struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ struct dst_entry *dst;
+
+ if (md_dst && md_dst->type == METADATA_XFRM)
+ return &md_dst->u.xfrm_info;
+
+ dst = skb_dst(skb);
+ if (dst && dst->lwtstate &&
+ dst->lwtstate->type == LWTUNNEL_ENCAP_XFRM)
+ return lwt_xfrm_info(dst->lwtstate);
+
+ return NULL;
+}
+
+static inline bool skb_valid_dst(const struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ return dst && !(dst->flags & DST_METADATA);
+}
+
+static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
+ const struct sk_buff *skb_b)
+{
+ const struct metadata_dst *a, *b;
+
+ if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
+ return 0;
+
+ a = (const struct metadata_dst *) skb_dst(skb_a);
+ b = (const struct metadata_dst *) skb_dst(skb_b);
+
+ if (!a != !b || a->type != b->type)
+ return 1;
+
+ switch (a->type) {
+ case METADATA_HW_PORT_MUX:
+ return memcmp(&a->u.port_info, &b->u.port_info,
+ sizeof(a->u.port_info));
+ case METADATA_IP_TUNNEL:
+ return memcmp(&a->u.tun_info, &b->u.tun_info,
+ sizeof(a->u.tun_info) +
+ a->u.tun_info.options_len);
+ case METADATA_MACSEC:
+ return memcmp(&a->u.macsec_info, &b->u.macsec_info,
+ sizeof(a->u.macsec_info));
+ case METADATA_XFRM:
+ return memcmp(&a->u.xfrm_info, &b->u.xfrm_info,
+ sizeof(a->u.xfrm_info));
+ default:
+ return 1;
+ }
+}
+
+void metadata_dst_free(struct metadata_dst *);
+struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
+ gfp_t flags);
+void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst);
+struct metadata_dst __percpu *
+metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags);
+
+static inline struct metadata_dst *tun_rx_dst(int md_size)
+{
+ struct metadata_dst *tun_dst;
+
+ tun_dst = metadata_dst_alloc(md_size, METADATA_IP_TUNNEL, GFP_ATOMIC);
+ if (!tun_dst)
+ return NULL;
+
+ tun_dst->u.tun_info.options_len = 0;
+ tun_dst->u.tun_info.mode = 0;
+ return tun_dst;
+}
+
+static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ int md_size;
+ struct metadata_dst *new_md;
+
+ if (!md_dst || md_dst->type != METADATA_IP_TUNNEL)
+ return ERR_PTR(-EINVAL);
+
+ md_size = md_dst->u.tun_info.options_len;
+ new_md = metadata_dst_alloc(md_size, METADATA_IP_TUNNEL, GFP_ATOMIC);
+ if (!new_md)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
+ sizeof(struct ip_tunnel_info) + md_size);
+#ifdef CONFIG_DST_CACHE
+ /* Unclone the dst cache if there is one */
+ if (new_md->u.tun_info.dst_cache.cache) {
+ int ret;
+
+ ret = dst_cache_init(&new_md->u.tun_info.dst_cache, GFP_ATOMIC);
+ if (ret) {
+ metadata_dst_free(new_md);
+ return ERR_PTR(ret);
+ }
+ }
+#endif
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, &new_md->dst);
+ return new_md;
+}
+
+static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb)
+{
+ struct metadata_dst *dst;
+
+ dst = tun_dst_unclone(skb);
+ if (IS_ERR(dst))
+ return NULL;
+
+ return &dst->u.tun_info;
+}
+
+static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
+ __be32 daddr,
+ __u8 tos, __u8 ttl,
+ __be16 tp_dst,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
+ struct metadata_dst *tun_dst;
+
+ tun_dst = tun_rx_dst(md_size);
+ if (!tun_dst)
+ return NULL;
+
+ ip_tunnel_key_init(&tun_dst->u.tun_info.key,
+ saddr, daddr, tos, ttl,
+ 0, 0, tp_dst, tunnel_id, flags);
+ return tun_dst;
+}
+
+static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+
+ return __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
+ 0, flags, tunnel_id, md_size);
+}
+
+static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u8 tos, __u8 ttl,
+ __be16 tp_dst,
+ __be32 label,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
+ struct metadata_dst *tun_dst;
+ struct ip_tunnel_info *info;
+
+ tun_dst = tun_rx_dst(md_size);
+ if (!tun_dst)
+ return NULL;
+
+ info = &tun_dst->u.tun_info;
+ info->mode = IP_TUNNEL_INFO_IPV6;
+ info->key.tun_flags = flags;
+ info->key.tun_id = tunnel_id;
+ info->key.tp_src = 0;
+ info->key.tp_dst = tp_dst;
+
+ info->key.u.ipv6.src = *saddr;
+ info->key.u.ipv6.dst = *daddr;
+
+ info->key.tos = tos;
+ info->key.ttl = ttl;
+ info->key.label = label;
+
+ return tun_dst;
+}
+
+static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ return __ipv6_tun_set_dst(&ip6h->saddr, &ip6h->daddr,
+ ipv6_get_dsfield(ip6h), ip6h->hop_limit,
+ 0, ip6_flowlabel(ip6h), flags, tunnel_id,
+ md_size);
+}
+#endif /* __NET_DST_METADATA_H */
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
new file mode 100644
index 000000000..632086b2f
--- /dev/null
+++ b/include/net/dst_ops.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_DST_OPS_H
+#define _NET_DST_OPS_H
+#include <linux/types.h>
+#include <linux/percpu_counter.h>
+#include <linux/cache.h>
+
+struct dst_entry;
+struct kmem_cachep;
+struct net_device;
+struct sk_buff;
+struct sock;
+struct net;
+
+struct dst_ops {
+ unsigned short family;
+ unsigned int gc_thresh;
+
+ void (*gc)(struct dst_ops *ops);
+ struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
+ unsigned int (*default_advmss)(const struct dst_entry *);
+ unsigned int (*mtu)(const struct dst_entry *);
+ u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
+ void (*destroy)(struct dst_entry *);
+ void (*ifdown)(struct dst_entry *,
+ struct net_device *dev, int how);
+ struct dst_entry * (*negative_advice)(struct dst_entry *);
+ void (*link_failure)(struct sk_buff *);
+ void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh);
+ void (*redirect)(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb);
+ int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
+ struct neighbour * (*neigh_lookup)(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr);
+ void (*confirm_neigh)(const struct dst_entry *dst,
+ const void *daddr);
+
+ struct kmem_cache *kmem_cachep;
+
+ struct percpu_counter pcpuc_entries ____cacheline_aligned_in_smp;
+};
+
+static inline int dst_entries_get_fast(struct dst_ops *dst)
+{
+ return percpu_counter_read_positive(&dst->pcpuc_entries);
+}
+
+static inline int dst_entries_get_slow(struct dst_ops *dst)
+{
+ return percpu_counter_sum_positive(&dst->pcpuc_entries);
+}
+
+#define DST_PERCPU_COUNTER_BATCH 32
+static inline void dst_entries_add(struct dst_ops *dst, int val)
+{
+ percpu_counter_add_batch(&dst->pcpuc_entries, val,
+ DST_PERCPU_COUNTER_BATCH);
+}
+
+static inline int dst_entries_init(struct dst_ops *dst)
+{
+ return percpu_counter_init(&dst->pcpuc_entries, 0, GFP_KERNEL);
+}
+
+static inline void dst_entries_destroy(struct dst_ops *dst)
+{
+ percpu_counter_destroy(&dst->pcpuc_entries);
+}
+
+#endif
diff --git a/include/net/erspan.h b/include/net/erspan.h
new file mode 100644
index 000000000..6cb4cbd6a
--- /dev/null
+++ b/include/net/erspan.h
@@ -0,0 +1,321 @@
+#ifndef __LINUX_ERSPAN_H
+#define __LINUX_ERSPAN_H
+
+/*
+ * GRE header for ERSPAN type I encapsulation (4 octets [34:37])
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |0|0|0|0|0|00000|000000000|00000| Protocol Type for ERSPAN |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * The Type I ERSPAN frame format is based on the barebones IP + GRE
+ * encapsulation (as described above) on top of the raw mirrored frame.
+ * There is no extra ERSPAN header.
+ *
+ *
+ * GRE header for ERSPAN type II and II encapsulation (8 octets [34:41])
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |0|0|0|1|0|00000|000000000|00000| Protocol Type for ERSPAN |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Sequence Number (increments per packet per session) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Note that in the above GRE header [RFC1701] out of the C, R, K, S,
+ * s, Recur, Flags, Version fields only S (bit 03) is set to 1. The
+ * other fields are set to zero, so only a sequence number follows.
+ *
+ * ERSPAN Version 1 (Type II) header (8 octets [42:49])
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Ver | VLAN | COS | En|T| Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | Index |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ *
+ * ERSPAN Version 2 (Type III) header (12 octets [42:49])
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Ver | VLAN | COS |BSO|T| Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Timestamp |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | SGT |P| FT | Hw ID |D|Gra|O|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Platform Specific SubHeader (8 octets, optional)
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Platf ID | Platform Specific Info |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Platform Specific Info |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * GRE proto ERSPAN type I/II = 0x88BE, type III = 0x22EB
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
+#include <uapi/linux/erspan.h>
+
+#define ERSPAN_VERSION 0x1 /* ERSPAN type II */
+#define VER_MASK 0xf000
+#define VLAN_MASK 0x0fff
+#define COS_MASK 0xe000
+#define EN_MASK 0x1800
+#define T_MASK 0x0400
+#define ID_MASK 0x03ff
+#define INDEX_MASK 0xfffff
+
+#define ERSPAN_VERSION2 0x2 /* ERSPAN type III*/
+#define BSO_MASK EN_MASK
+#define SGT_MASK 0xffff0000
+#define P_MASK 0x8000
+#define FT_MASK 0x7c00
+#define HWID_MASK 0x03f0
+#define DIR_MASK 0x0008
+#define GRA_MASK 0x0006
+#define O_MASK 0x0001
+
+#define HWID_OFFSET 4
+#define DIR_OFFSET 3
+
+enum erspan_encap_type {
+ ERSPAN_ENCAP_NOVLAN = 0x0, /* originally without VLAN tag */
+ ERSPAN_ENCAP_ISL = 0x1, /* originally ISL encapsulated */
+ ERSPAN_ENCAP_8021Q = 0x2, /* originally 802.1Q encapsulated */
+ ERSPAN_ENCAP_INFRAME = 0x3, /* VLAN tag perserved in frame */
+};
+
+#define ERSPAN_V1_MDSIZE 4
+#define ERSPAN_V2_MDSIZE 8
+
+struct erspan_base_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 vlan_upper:4,
+ ver:4;
+ __u8 vlan:8;
+ __u8 session_id_upper:2,
+ t:1,
+ en:2,
+ cos:3;
+ __u8 session_id:8;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 ver: 4,
+ vlan_upper:4;
+ __u8 vlan:8;
+ __u8 cos:3,
+ en:2,
+ t:1,
+ session_id_upper:2;
+ __u8 session_id:8;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+};
+
+static inline void set_session_id(struct erspan_base_hdr *ershdr, u16 id)
+{
+ ershdr->session_id = id & 0xff;
+ ershdr->session_id_upper = (id >> 8) & 0x3;
+}
+
+static inline u16 get_session_id(const struct erspan_base_hdr *ershdr)
+{
+ return (ershdr->session_id_upper << 8) + ershdr->session_id;
+}
+
+static inline void set_vlan(struct erspan_base_hdr *ershdr, u16 vlan)
+{
+ ershdr->vlan = vlan & 0xff;
+ ershdr->vlan_upper = (vlan >> 8) & 0xf;
+}
+
+static inline u16 get_vlan(const struct erspan_base_hdr *ershdr)
+{
+ return (ershdr->vlan_upper << 8) + ershdr->vlan;
+}
+
+static inline void set_hwid(struct erspan_md2 *md2, u8 hwid)
+{
+ md2->hwid = hwid & 0xf;
+ md2->hwid_upper = (hwid >> 4) & 0x3;
+}
+
+static inline u8 get_hwid(const struct erspan_md2 *md2)
+{
+ return (md2->hwid_upper << 4) + md2->hwid;
+}
+
+static inline int erspan_hdr_len(int version)
+{
+ if (version == 0)
+ return 0;
+
+ return sizeof(struct erspan_base_hdr) +
+ (version == 1 ? ERSPAN_V1_MDSIZE : ERSPAN_V2_MDSIZE);
+}
+
+static inline u8 tos_to_cos(u8 tos)
+{
+ u8 dscp, cos;
+
+ dscp = tos >> 2;
+ cos = dscp >> 3;
+ return cos;
+}
+
+static inline void erspan_build_header(struct sk_buff *skb,
+ u32 id, u32 index,
+ bool truncate, bool is_ipv4)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ enum erspan_encap_type enc_type;
+ struct erspan_base_hdr *ershdr;
+ struct qtag_prefix {
+ __be16 eth_type;
+ __be16 tci;
+ } *qp;
+ u16 vlan_tci = 0;
+ u8 tos;
+ __be32 *idx;
+
+ tos = is_ipv4 ? ip_hdr(skb)->tos :
+ (ipv6_hdr(skb)->priority << 4) +
+ (ipv6_hdr(skb)->flow_lbl[0] >> 4);
+
+ enc_type = ERSPAN_ENCAP_NOVLAN;
+
+ /* If mirrored packet has vlan tag, extract tci and
+ * perserve vlan header in the mirrored frame.
+ */
+ if (eth->h_proto == htons(ETH_P_8021Q)) {
+ qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
+ vlan_tci = ntohs(qp->tci);
+ enc_type = ERSPAN_ENCAP_INFRAME;
+ }
+
+ skb_push(skb, sizeof(*ershdr) + ERSPAN_V1_MDSIZE);
+ ershdr = (struct erspan_base_hdr *)skb->data;
+ memset(ershdr, 0, sizeof(*ershdr) + ERSPAN_V1_MDSIZE);
+
+ /* Build base header */
+ ershdr->ver = ERSPAN_VERSION;
+ ershdr->cos = tos_to_cos(tos);
+ ershdr->en = enc_type;
+ ershdr->t = truncate;
+ set_vlan(ershdr, vlan_tci);
+ set_session_id(ershdr, id);
+
+ /* Build metadata */
+ idx = (__be32 *)(ershdr + 1);
+ *idx = htonl(index & INDEX_MASK);
+}
+
+/* ERSPAN GRA: timestamp granularity
+ * 00b --> granularity = 100 microseconds
+ * 01b --> granularity = 100 nanoseconds
+ * 10b --> granularity = IEEE 1588
+ * Here we only support 100 microseconds.
+ */
+static inline __be32 erspan_get_timestamp(void)
+{
+ u64 h_usecs;
+ ktime_t kt;
+
+ kt = ktime_get_real();
+ h_usecs = ktime_divns(kt, 100 * NSEC_PER_USEC);
+
+ /* ERSPAN base header only has 32-bit,
+ * so it wraps around 4 days.
+ */
+ return htonl((u32)h_usecs);
+}
+
+/* ERSPAN BSO (Bad/Short/Oversized), see RFC1757
+ * 00b --> Good frame with no error, or unknown integrity
+ * 01b --> Payload is a Short Frame
+ * 10b --> Payload is an Oversized Frame
+ * 11b --> Payload is a Bad Frame with CRC or Alignment Error
+ */
+enum erspan_bso {
+ BSO_NOERROR = 0x0,
+ BSO_SHORT = 0x1,
+ BSO_OVERSIZED = 0x2,
+ BSO_BAD = 0x3,
+};
+
+static inline u8 erspan_detect_bso(struct sk_buff *skb)
+{
+ /* BSO_BAD is not handled because the frame CRC
+ * or alignment error information is in FCS.
+ */
+ if (skb->len < ETH_ZLEN)
+ return BSO_SHORT;
+
+ if (skb->len > ETH_FRAME_LEN)
+ return BSO_OVERSIZED;
+
+ return BSO_NOERROR;
+}
+
+static inline void erspan_build_header_v2(struct sk_buff *skb,
+ u32 id, u8 direction, u16 hwid,
+ bool truncate, bool is_ipv4)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ struct erspan_base_hdr *ershdr;
+ struct erspan_md2 *md2;
+ struct qtag_prefix {
+ __be16 eth_type;
+ __be16 tci;
+ } *qp;
+ u16 vlan_tci = 0;
+ u8 gra = 0; /* 100 usec */
+ u8 bso = 0; /* Bad/Short/Oversized */
+ u8 sgt = 0;
+ u8 tos;
+
+ tos = is_ipv4 ? ip_hdr(skb)->tos :
+ (ipv6_hdr(skb)->priority << 4) +
+ (ipv6_hdr(skb)->flow_lbl[0] >> 4);
+
+ /* Unlike v1, v2 does not have En field,
+ * so only extract vlan tci field.
+ */
+ if (eth->h_proto == htons(ETH_P_8021Q)) {
+ qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
+ vlan_tci = ntohs(qp->tci);
+ }
+
+ bso = erspan_detect_bso(skb);
+ skb_push(skb, sizeof(*ershdr) + ERSPAN_V2_MDSIZE);
+ ershdr = (struct erspan_base_hdr *)skb->data;
+ memset(ershdr, 0, sizeof(*ershdr) + ERSPAN_V2_MDSIZE);
+
+ /* Build base header */
+ ershdr->ver = ERSPAN_VERSION2;
+ ershdr->cos = tos_to_cos(tos);
+ ershdr->en = bso;
+ ershdr->t = truncate;
+ set_vlan(ershdr, vlan_tci);
+ set_session_id(ershdr, id);
+
+ /* Build metadata */
+ md2 = (struct erspan_md2 *)(ershdr + 1);
+ md2->timestamp = erspan_get_timestamp();
+ md2->sgt = htons(sgt);
+ md2->p = 1;
+ md2->ft = 0;
+ md2->dir = direction;
+ md2->gra = gra;
+ md2->o = 0;
+ set_hwid(md2, hwid);
+}
+
+#endif
diff --git a/include/net/esp.h b/include/net/esp.h
new file mode 100644
index 000000000..322950727
--- /dev/null
+++ b/include/net/esp.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_ESP_H
+#define _NET_ESP_H
+
+#include <linux/skbuff.h>
+
+struct ip_esp_hdr;
+struct xfrm_state;
+
+static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
+{
+ return (struct ip_esp_hdr *)skb_transport_header(skb);
+}
+
+static inline void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
+{
+ /* Fill padding... */
+ if (tfclen) {
+ memset(tail, 0, tfclen);
+ tail += tfclen;
+ }
+ do {
+ int i;
+ for (i = 0; i < plen - 2; i++)
+ tail[i] = i + 1;
+ } while (0);
+ tail[plen - 2] = plen - 2;
+ tail[plen - 1] = proto;
+}
+
+struct esp_info {
+ struct ip_esp_hdr *esph;
+ __be64 seqno;
+ int tfclen;
+ int tailen;
+ int plen;
+ int clen;
+ int len;
+ int nfrags;
+ __u8 proto;
+ bool inplace;
+};
+
+int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
+int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
+int esp_input_done2(struct sk_buff *skb, int err);
+int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
+int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
+int esp6_input_done2(struct sk_buff *skb, int err);
+#endif
diff --git a/include/net/espintcp.h b/include/net/espintcp.h
new file mode 100644
index 000000000..0335bbd76
--- /dev/null
+++ b/include/net/espintcp.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_ESPINTCP_H
+#define _NET_ESPINTCP_H
+
+#include <net/strparser.h>
+#include <linux/skmsg.h>
+
+void __init espintcp_init(void);
+
+int espintcp_push_skb(struct sock *sk, struct sk_buff *skb);
+int espintcp_queue_out(struct sock *sk, struct sk_buff *skb);
+bool tcp_is_ulp_esp(struct sock *sk);
+
+struct espintcp_msg {
+ struct sk_buff *skb;
+ struct sk_msg skmsg;
+ int offset;
+ int len;
+};
+
+struct espintcp_ctx {
+ struct strparser strp;
+ struct sk_buff_head ike_queue;
+ struct sk_buff_head out_queue;
+ struct espintcp_msg partial;
+ void (*saved_data_ready)(struct sock *sk);
+ void (*saved_write_space)(struct sock *sk);
+ void (*saved_destruct)(struct sock *sk);
+ struct work_struct work;
+ bool tx_running;
+};
+
+static inline struct espintcp_ctx *espintcp_getctx(const struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ /* RCU is only needed for diag */
+ return (__force void *)icsk->icsk_ulp_data;
+}
+#endif
diff --git a/include/net/ethoc.h b/include/net/ethoc.h
new file mode 100644
index 000000000..73810f3ca
--- /dev/null
+++ b/include/net/ethoc.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/include/net/ethoc.h
+ *
+ * Copyright (C) 2008-2009 Avionic Design GmbH
+ *
+ * Written by Thierry Reding <thierry.reding@avionic-design.de>
+ */
+
+#ifndef LINUX_NET_ETHOC_H
+#define LINUX_NET_ETHOC_H 1
+
+#include <linux/if.h>
+#include <linux/types.h>
+
+struct ethoc_platform_data {
+ u8 hwaddr[IFHWADDRLEN];
+ s8 phy_id;
+ u32 eth_clkfreq;
+ bool big_endian;
+};
+
+#endif /* !LINUX_NET_ETHOC_H */
diff --git a/include/net/failover.h b/include/net/failover.h
new file mode 100644
index 000000000..f2b42b4b9
--- /dev/null
+++ b/include/net/failover.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _FAILOVER_H
+#define _FAILOVER_H
+
+#include <linux/netdevice.h>
+
+struct failover_ops {
+ int (*slave_pre_register)(struct net_device *slave_dev,
+ struct net_device *failover_dev);
+ int (*slave_register)(struct net_device *slave_dev,
+ struct net_device *failover_dev);
+ int (*slave_pre_unregister)(struct net_device *slave_dev,
+ struct net_device *failover_dev);
+ int (*slave_unregister)(struct net_device *slave_dev,
+ struct net_device *failover_dev);
+ int (*slave_link_change)(struct net_device *slave_dev,
+ struct net_device *failover_dev);
+ int (*slave_name_change)(struct net_device *slave_dev,
+ struct net_device *failover_dev);
+ rx_handler_result_t (*slave_handle_frame)(struct sk_buff **pskb);
+};
+
+struct failover {
+ struct list_head list;
+ struct net_device __rcu *failover_dev;
+ netdevice_tracker dev_tracker;
+ struct failover_ops __rcu *ops;
+};
+
+struct failover *failover_register(struct net_device *dev,
+ struct failover_ops *ops);
+void failover_unregister(struct failover *failover);
+int failover_slave_unregister(struct net_device *slave_dev);
+
+#endif /* _FAILOVER_H */
diff --git a/include/net/fib_notifier.h b/include/net/fib_notifier.h
new file mode 100644
index 000000000..6d59221ff
--- /dev/null
+++ b/include/net/fib_notifier.h
@@ -0,0 +1,51 @@
+#ifndef __NET_FIB_NOTIFIER_H
+#define __NET_FIB_NOTIFIER_H
+
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <net/net_namespace.h>
+
+struct module;
+
+struct fib_notifier_info {
+ int family;
+ struct netlink_ext_ack *extack;
+};
+
+enum fib_event_type {
+ FIB_EVENT_ENTRY_REPLACE,
+ FIB_EVENT_ENTRY_APPEND,
+ FIB_EVENT_ENTRY_ADD,
+ FIB_EVENT_ENTRY_DEL,
+ FIB_EVENT_RULE_ADD,
+ FIB_EVENT_RULE_DEL,
+ FIB_EVENT_NH_ADD,
+ FIB_EVENT_NH_DEL,
+ FIB_EVENT_VIF_ADD,
+ FIB_EVENT_VIF_DEL,
+};
+
+struct fib_notifier_ops {
+ int family;
+ struct list_head list;
+ unsigned int (*fib_seq_read)(struct net *net);
+ int (*fib_dump)(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
+ struct module *owner;
+ struct rcu_head rcu;
+};
+
+int call_fib_notifier(struct notifier_block *nb,
+ enum fib_event_type event_type,
+ struct fib_notifier_info *info);
+int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
+ struct fib_notifier_info *info);
+int register_fib_notifier(struct net *net, struct notifier_block *nb,
+ void (*cb)(struct notifier_block *nb),
+ struct netlink_ext_ack *extack);
+int unregister_fib_notifier(struct net *net, struct notifier_block *nb);
+struct fib_notifier_ops *
+fib_notifier_ops_register(const struct fib_notifier_ops *tmpl, struct net *net);
+void fib_notifier_ops_unregister(struct fib_notifier_ops *ops);
+
+#endif
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
new file mode 100644
index 000000000..82da359bc
--- /dev/null
+++ b/include/net/fib_rules.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_FIB_RULES_H
+#define __NET_FIB_RULES_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/fib_rules.h>
+#include <linux/refcount.h>
+#include <net/flow.h>
+#include <net/rtnetlink.h>
+#include <net/fib_notifier.h>
+#include <linux/indirect_call_wrapper.h>
+
+struct fib_kuid_range {
+ kuid_t start;
+ kuid_t end;
+};
+
+struct fib_rule {
+ struct list_head list;
+ int iifindex;
+ int oifindex;
+ u32 mark;
+ u32 mark_mask;
+ u32 flags;
+ u32 table;
+ u8 action;
+ u8 l3mdev;
+ u8 proto;
+ u8 ip_proto;
+ u32 target;
+ __be64 tun_id;
+ struct fib_rule __rcu *ctarget;
+ struct net *fr_net;
+
+ refcount_t refcnt;
+ u32 pref;
+ int suppress_ifgroup;
+ int suppress_prefixlen;
+ char iifname[IFNAMSIZ];
+ char oifname[IFNAMSIZ];
+ struct fib_kuid_range uid_range;
+ struct fib_rule_port_range sport_range;
+ struct fib_rule_port_range dport_range;
+ struct rcu_head rcu;
+};
+
+struct fib_lookup_arg {
+ void *lookup_ptr;
+ const void *lookup_data;
+ void *result;
+ struct fib_rule *rule;
+ u32 table;
+ int flags;
+#define FIB_LOOKUP_NOREF 1
+#define FIB_LOOKUP_IGNORE_LINKSTATE 2
+};
+
+struct fib_rules_ops {
+ int family;
+ struct list_head list;
+ int rule_size;
+ int addr_size;
+ int unresolved_rules;
+ int nr_goto_rules;
+ unsigned int fib_rules_seq;
+
+ int (*action)(struct fib_rule *,
+ struct flowi *, int,
+ struct fib_lookup_arg *);
+ bool (*suppress)(struct fib_rule *, int,
+ struct fib_lookup_arg *);
+ int (*match)(struct fib_rule *,
+ struct flowi *, int);
+ int (*configure)(struct fib_rule *,
+ struct sk_buff *,
+ struct fib_rule_hdr *,
+ struct nlattr **,
+ struct netlink_ext_ack *);
+ int (*delete)(struct fib_rule *);
+ int (*compare)(struct fib_rule *,
+ struct fib_rule_hdr *,
+ struct nlattr **);
+ int (*fill)(struct fib_rule *, struct sk_buff *,
+ struct fib_rule_hdr *);
+ size_t (*nlmsg_payload)(struct fib_rule *);
+
+ /* Called after modifications to the rules set, must flush
+ * the route cache if one exists. */
+ void (*flush_cache)(struct fib_rules_ops *ops);
+
+ int nlgroup;
+ struct list_head rules_list;
+ struct module *owner;
+ struct net *fro_net;
+ struct rcu_head rcu;
+};
+
+struct fib_rule_notifier_info {
+ struct fib_notifier_info info; /* must be first */
+ struct fib_rule *rule;
+};
+
+static inline void fib_rule_get(struct fib_rule *rule)
+{
+ refcount_inc(&rule->refcnt);
+}
+
+static inline void fib_rule_put(struct fib_rule *rule)
+{
+ if (refcount_dec_and_test(&rule->refcnt))
+ kfree_rcu(rule, rcu);
+}
+
+#ifdef CONFIG_NET_L3_MASTER_DEV
+static inline u32 fib_rule_get_table(struct fib_rule *rule,
+ struct fib_lookup_arg *arg)
+{
+ return rule->l3mdev ? arg->table : rule->table;
+}
+#else
+static inline u32 fib_rule_get_table(struct fib_rule *rule,
+ struct fib_lookup_arg *arg)
+{
+ return rule->table;
+}
+#endif
+
+static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
+{
+ if (nla[FRA_TABLE])
+ return nla_get_u32(nla[FRA_TABLE]);
+ return frh->table;
+}
+
+static inline bool fib_rule_port_range_set(const struct fib_rule_port_range *range)
+{
+ return range->start != 0 && range->end != 0;
+}
+
+static inline bool fib_rule_port_inrange(const struct fib_rule_port_range *a,
+ __be16 port)
+{
+ return ntohs(port) >= a->start &&
+ ntohs(port) <= a->end;
+}
+
+static inline bool fib_rule_port_range_valid(const struct fib_rule_port_range *a)
+{
+ return a->start != 0 && a->end != 0 && a->end < 0xffff &&
+ a->start <= a->end;
+}
+
+static inline bool fib_rule_port_range_compare(struct fib_rule_port_range *a,
+ struct fib_rule_port_range *b)
+{
+ return a->start == b->start &&
+ a->end == b->end;
+}
+
+static inline bool fib_rule_requires_fldissect(struct fib_rule *rule)
+{
+ return rule->iifindex != LOOPBACK_IFINDEX && (rule->ip_proto ||
+ fib_rule_port_range_set(&rule->sport_range) ||
+ fib_rule_port_range_set(&rule->dport_range));
+}
+
+struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *,
+ struct net *);
+void fib_rules_unregister(struct fib_rules_ops *);
+
+int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
+ struct fib_lookup_arg *);
+int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
+ u32 flags);
+bool fib_rule_matchall(const struct fib_rule *rule);
+int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
+ struct netlink_ext_ack *extack);
+unsigned int fib_rules_seq_read(struct net *net, int family);
+
+int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack);
+int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack);
+
+INDIRECT_CALLABLE_DECLARE(int fib6_rule_match(struct fib_rule *rule,
+ struct flowi *fl, int flags));
+INDIRECT_CALLABLE_DECLARE(int fib4_rule_match(struct fib_rule *rule,
+ struct flowi *fl, int flags));
+
+INDIRECT_CALLABLE_DECLARE(int fib6_rule_action(struct fib_rule *rule,
+ struct flowi *flp, int flags,
+ struct fib_lookup_arg *arg));
+INDIRECT_CALLABLE_DECLARE(int fib4_rule_action(struct fib_rule *rule,
+ struct flowi *flp, int flags,
+ struct fib_lookup_arg *arg));
+
+INDIRECT_CALLABLE_DECLARE(bool fib6_rule_suppress(struct fib_rule *rule,
+ int flags,
+ struct fib_lookup_arg *arg));
+INDIRECT_CALLABLE_DECLARE(bool fib4_rule_suppress(struct fib_rule *rule,
+ int flags,
+ struct fib_lookup_arg *arg));
+#endif
diff --git a/include/net/firewire.h b/include/net/firewire.h
new file mode 100644
index 000000000..8fbff8d77
--- /dev/null
+++ b/include/net/firewire.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_FIREWIRE_H
+#define _NET_FIREWIRE_H
+
+#include <linux/types.h>
+
+/* Pseudo L2 address */
+#define FWNET_ALEN 16
+union fwnet_hwaddr {
+ u8 u[FWNET_ALEN];
+ /* "Hardware address" defined in RFC2734/RF3146 */
+ struct {
+ __be64 uniq_id; /* EUI-64 */
+ u8 max_rec; /* max packet size */
+ u8 sspd; /* max speed */
+ u8 fifo[6]; /* FIFO addr */
+ } __packed uc;
+};
+
+/* Pseudo L2 Header */
+#define FWNET_HLEN 18
+struct fwnet_header {
+ u8 h_dest[FWNET_ALEN]; /* destination address */
+ __be16 h_proto; /* packet type ID field */
+} __packed;
+
+#endif
diff --git a/include/net/flow.h b/include/net/flow.h
new file mode 100644
index 000000000..079cc493f
--- /dev/null
+++ b/include/net/flow.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ * Generic internet FLOW.
+ *
+ */
+
+#ifndef _NET_FLOW_H
+#define _NET_FLOW_H
+
+#include <linux/socket.h>
+#include <linux/in6.h>
+#include <linux/atomic.h>
+#include <net/flow_dissector.h>
+#include <linux/uidgid.h>
+
+/*
+ * ifindex generation is per-net namespace, and loopback is
+ * always the 1st device in ns (see net_dev_init), thus any
+ * loopback device should get ifindex 1
+ */
+
+#define LOOPBACK_IFINDEX 1
+
+struct flowi_tunnel {
+ __be64 tun_id;
+};
+
+struct flowi_common {
+ int flowic_oif;
+ int flowic_iif;
+ int flowic_l3mdev;
+ __u32 flowic_mark;
+ __u8 flowic_tos;
+ __u8 flowic_scope;
+ __u8 flowic_proto;
+ __u8 flowic_flags;
+#define FLOWI_FLAG_ANYSRC 0x01
+#define FLOWI_FLAG_KNOWN_NH 0x02
+ __u32 flowic_secid;
+ kuid_t flowic_uid;
+ __u32 flowic_multipath_hash;
+ struct flowi_tunnel flowic_tun_key;
+};
+
+union flowi_uli {
+ struct {
+ __be16 dport;
+ __be16 sport;
+ } ports;
+
+ struct {
+ __u8 type;
+ __u8 code;
+ } icmpt;
+
+ __be32 gre_key;
+
+ struct {
+ __u8 type;
+ } mht;
+};
+
+struct flowi4 {
+ struct flowi_common __fl_common;
+#define flowi4_oif __fl_common.flowic_oif
+#define flowi4_iif __fl_common.flowic_iif
+#define flowi4_l3mdev __fl_common.flowic_l3mdev
+#define flowi4_mark __fl_common.flowic_mark
+#define flowi4_tos __fl_common.flowic_tos
+#define flowi4_scope __fl_common.flowic_scope
+#define flowi4_proto __fl_common.flowic_proto
+#define flowi4_flags __fl_common.flowic_flags
+#define flowi4_secid __fl_common.flowic_secid
+#define flowi4_tun_key __fl_common.flowic_tun_key
+#define flowi4_uid __fl_common.flowic_uid
+#define flowi4_multipath_hash __fl_common.flowic_multipath_hash
+
+ /* (saddr,daddr) must be grouped, same order as in IP header */
+ __be32 saddr;
+ __be32 daddr;
+
+ union flowi_uli uli;
+#define fl4_sport uli.ports.sport
+#define fl4_dport uli.ports.dport
+#define fl4_icmp_type uli.icmpt.type
+#define fl4_icmp_code uli.icmpt.code
+#define fl4_mh_type uli.mht.type
+#define fl4_gre_key uli.gre_key
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
+
+static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
+ __u32 mark, __u8 tos, __u8 scope,
+ __u8 proto, __u8 flags,
+ __be32 daddr, __be32 saddr,
+ __be16 dport, __be16 sport,
+ kuid_t uid)
+{
+ fl4->flowi4_oif = oif;
+ fl4->flowi4_iif = LOOPBACK_IFINDEX;
+ fl4->flowi4_l3mdev = 0;
+ fl4->flowi4_mark = mark;
+ fl4->flowi4_tos = tos;
+ fl4->flowi4_scope = scope;
+ fl4->flowi4_proto = proto;
+ fl4->flowi4_flags = flags;
+ fl4->flowi4_secid = 0;
+ fl4->flowi4_tun_key.tun_id = 0;
+ fl4->flowi4_uid = uid;
+ fl4->daddr = daddr;
+ fl4->saddr = saddr;
+ fl4->fl4_dport = dport;
+ fl4->fl4_sport = sport;
+ fl4->flowi4_multipath_hash = 0;
+}
+
+/* Reset some input parameters after previous lookup */
+static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos,
+ __be32 daddr, __be32 saddr)
+{
+ fl4->flowi4_oif = oif;
+ fl4->flowi4_tos = tos;
+ fl4->daddr = daddr;
+ fl4->saddr = saddr;
+}
+
+
+struct flowi6 {
+ struct flowi_common __fl_common;
+#define flowi6_oif __fl_common.flowic_oif
+#define flowi6_iif __fl_common.flowic_iif
+#define flowi6_l3mdev __fl_common.flowic_l3mdev
+#define flowi6_mark __fl_common.flowic_mark
+#define flowi6_scope __fl_common.flowic_scope
+#define flowi6_proto __fl_common.flowic_proto
+#define flowi6_flags __fl_common.flowic_flags
+#define flowi6_secid __fl_common.flowic_secid
+#define flowi6_tun_key __fl_common.flowic_tun_key
+#define flowi6_uid __fl_common.flowic_uid
+ struct in6_addr daddr;
+ struct in6_addr saddr;
+ /* Note: flowi6_tos is encoded in flowlabel, too. */
+ __be32 flowlabel;
+ union flowi_uli uli;
+#define fl6_sport uli.ports.sport
+#define fl6_dport uli.ports.dport
+#define fl6_icmp_type uli.icmpt.type
+#define fl6_icmp_code uli.icmpt.code
+#define fl6_mh_type uli.mht.type
+#define fl6_gre_key uli.gre_key
+ __u32 mp_hash;
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
+
+struct flowi {
+ union {
+ struct flowi_common __fl_common;
+ struct flowi4 ip4;
+ struct flowi6 ip6;
+ } u;
+#define flowi_oif u.__fl_common.flowic_oif
+#define flowi_iif u.__fl_common.flowic_iif
+#define flowi_l3mdev u.__fl_common.flowic_l3mdev
+#define flowi_mark u.__fl_common.flowic_mark
+#define flowi_tos u.__fl_common.flowic_tos
+#define flowi_scope u.__fl_common.flowic_scope
+#define flowi_proto u.__fl_common.flowic_proto
+#define flowi_flags u.__fl_common.flowic_flags
+#define flowi_secid u.__fl_common.flowic_secid
+#define flowi_tun_key u.__fl_common.flowic_tun_key
+#define flowi_uid u.__fl_common.flowic_uid
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
+
+static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
+{
+ return container_of(fl4, struct flowi, u.ip4);
+}
+
+static inline struct flowi_common *flowi4_to_flowi_common(struct flowi4 *fl4)
+{
+ return &(fl4->__fl_common);
+}
+
+static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6)
+{
+ return container_of(fl6, struct flowi, u.ip6);
+}
+
+static inline struct flowi_common *flowi6_to_flowi_common(struct flowi6 *fl6)
+{
+ return &(fl6->__fl_common);
+}
+
+__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys);
+
+#endif
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
new file mode 100644
index 000000000..5ccf52ef8
--- /dev/null
+++ b/include/net/flow_dissector.h
@@ -0,0 +1,438 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_FLOW_DISSECTOR_H
+#define _NET_FLOW_DISSECTOR_H
+
+#include <linux/types.h>
+#include <linux/in6.h>
+#include <linux/siphash.h>
+#include <linux/string.h>
+#include <uapi/linux/if_ether.h>
+
+struct bpf_prog;
+struct net;
+struct sk_buff;
+
+/**
+ * struct flow_dissector_key_control:
+ * @thoff: Transport header offset
+ */
+struct flow_dissector_key_control {
+ u16 thoff;
+ u16 addr_type;
+ u32 flags;
+};
+
+#define FLOW_DIS_IS_FRAGMENT BIT(0)
+#define FLOW_DIS_FIRST_FRAG BIT(1)
+#define FLOW_DIS_ENCAPSULATION BIT(2)
+
+enum flow_dissect_ret {
+ FLOW_DISSECT_RET_OUT_GOOD,
+ FLOW_DISSECT_RET_OUT_BAD,
+ FLOW_DISSECT_RET_PROTO_AGAIN,
+ FLOW_DISSECT_RET_IPPROTO_AGAIN,
+ FLOW_DISSECT_RET_CONTINUE,
+};
+
+/**
+ * struct flow_dissector_key_basic:
+ * @n_proto: Network header protocol (eg. IPv4/IPv6)
+ * @ip_proto: Transport header protocol (eg. TCP/UDP)
+ */
+struct flow_dissector_key_basic {
+ __be16 n_proto;
+ u8 ip_proto;
+ u8 padding;
+};
+
+struct flow_dissector_key_tags {
+ u32 flow_label;
+};
+
+struct flow_dissector_key_vlan {
+ union {
+ struct {
+ u16 vlan_id:12,
+ vlan_dei:1,
+ vlan_priority:3;
+ };
+ __be16 vlan_tci;
+ };
+ __be16 vlan_tpid;
+ __be16 vlan_eth_type;
+ u16 padding;
+};
+
+struct flow_dissector_mpls_lse {
+ u32 mpls_ttl:8,
+ mpls_bos:1,
+ mpls_tc:3,
+ mpls_label:20;
+};
+
+#define FLOW_DIS_MPLS_MAX 7
+struct flow_dissector_key_mpls {
+ struct flow_dissector_mpls_lse ls[FLOW_DIS_MPLS_MAX]; /* Label Stack */
+ u8 used_lses; /* One bit set for each Label Stack Entry in use */
+};
+
+static inline void dissector_set_mpls_lse(struct flow_dissector_key_mpls *mpls,
+ int lse_index)
+{
+ mpls->used_lses |= 1 << lse_index;
+}
+
+#define FLOW_DIS_TUN_OPTS_MAX 255
+/**
+ * struct flow_dissector_key_enc_opts:
+ * @data: tunnel option data
+ * @len: length of tunnel option data
+ * @dst_opt_type: tunnel option type
+ */
+struct flow_dissector_key_enc_opts {
+ u8 data[FLOW_DIS_TUN_OPTS_MAX]; /* Using IP_TUNNEL_OPTS_MAX is desired
+ * here but seems difficult to #include
+ */
+ u8 len;
+ __be16 dst_opt_type;
+};
+
+struct flow_dissector_key_keyid {
+ __be32 keyid;
+};
+
+/**
+ * struct flow_dissector_key_ipv4_addrs:
+ * @src: source ip address
+ * @dst: destination ip address
+ */
+struct flow_dissector_key_ipv4_addrs {
+ /* (src,dst) must be grouped, in the same way than in IP header */
+ __be32 src;
+ __be32 dst;
+};
+
+/**
+ * struct flow_dissector_key_ipv6_addrs:
+ * @src: source ip address
+ * @dst: destination ip address
+ */
+struct flow_dissector_key_ipv6_addrs {
+ /* (src,dst) must be grouped, in the same way than in IP header */
+ struct in6_addr src;
+ struct in6_addr dst;
+};
+
+/**
+ * struct flow_dissector_key_tipc:
+ * @key: source node address combined with selector
+ */
+struct flow_dissector_key_tipc {
+ __be32 key;
+};
+
+/**
+ * struct flow_dissector_key_addrs:
+ * @v4addrs: IPv4 addresses
+ * @v6addrs: IPv6 addresses
+ */
+struct flow_dissector_key_addrs {
+ union {
+ struct flow_dissector_key_ipv4_addrs v4addrs;
+ struct flow_dissector_key_ipv6_addrs v6addrs;
+ struct flow_dissector_key_tipc tipckey;
+ };
+};
+
+/**
+ * flow_dissector_key_arp:
+ * @ports: Operation, source and target addresses for an ARP header
+ * for Ethernet hardware addresses and IPv4 protocol addresses
+ * sip: Sender IP address
+ * tip: Target IP address
+ * op: Operation
+ * sha: Sender hardware address
+ * tpa: Target hardware address
+ */
+struct flow_dissector_key_arp {
+ __u32 sip;
+ __u32 tip;
+ __u8 op;
+ unsigned char sha[ETH_ALEN];
+ unsigned char tha[ETH_ALEN];
+};
+
+/**
+ * flow_dissector_key_tp_ports:
+ * @ports: port numbers of Transport header
+ * src: source port number
+ * dst: destination port number
+ */
+struct flow_dissector_key_ports {
+ union {
+ __be32 ports;
+ struct {
+ __be16 src;
+ __be16 dst;
+ };
+ };
+};
+
+/**
+ * struct flow_dissector_key_ports_range
+ * @tp: port number from packet
+ * @tp_min: min port number in range
+ * @tp_max: max port number in range
+ */
+struct flow_dissector_key_ports_range {
+ union {
+ struct flow_dissector_key_ports tp;
+ struct {
+ struct flow_dissector_key_ports tp_min;
+ struct flow_dissector_key_ports tp_max;
+ };
+ };
+};
+
+/**
+ * flow_dissector_key_icmp:
+ * type: ICMP type
+ * code: ICMP code
+ * id: session identifier
+ */
+struct flow_dissector_key_icmp {
+ struct {
+ u8 type;
+ u8 code;
+ };
+ u16 id;
+};
+
+/**
+ * struct flow_dissector_key_eth_addrs:
+ * @src: source Ethernet address
+ * @dst: destination Ethernet address
+ */
+struct flow_dissector_key_eth_addrs {
+ /* (dst,src) must be grouped, in the same way than in ETH header */
+ unsigned char dst[ETH_ALEN];
+ unsigned char src[ETH_ALEN];
+};
+
+/**
+ * struct flow_dissector_key_tcp:
+ * @flags: flags
+ */
+struct flow_dissector_key_tcp {
+ __be16 flags;
+};
+
+/**
+ * struct flow_dissector_key_ip:
+ * @tos: tos
+ * @ttl: ttl
+ */
+struct flow_dissector_key_ip {
+ __u8 tos;
+ __u8 ttl;
+};
+
+/**
+ * struct flow_dissector_key_meta:
+ * @ingress_ifindex: ingress ifindex
+ * @ingress_iftype: ingress interface type
+ */
+struct flow_dissector_key_meta {
+ int ingress_ifindex;
+ u16 ingress_iftype;
+};
+
+/**
+ * struct flow_dissector_key_ct:
+ * @ct_state: conntrack state after converting with map
+ * @ct_mark: conttrack mark
+ * @ct_zone: conntrack zone
+ * @ct_labels: conntrack labels
+ */
+struct flow_dissector_key_ct {
+ u16 ct_state;
+ u16 ct_zone;
+ u32 ct_mark;
+ u32 ct_labels[4];
+};
+
+/**
+ * struct flow_dissector_key_hash:
+ * @hash: hash value
+ */
+struct flow_dissector_key_hash {
+ u32 hash;
+};
+
+/**
+ * struct flow_dissector_key_num_of_vlans:
+ * @num_of_vlans: num_of_vlans value
+ */
+struct flow_dissector_key_num_of_vlans {
+ u8 num_of_vlans;
+};
+
+/**
+ * struct flow_dissector_key_pppoe:
+ * @session_id: pppoe session id
+ * @ppp_proto: ppp protocol
+ * @type: pppoe eth type
+ */
+struct flow_dissector_key_pppoe {
+ __be16 session_id;
+ __be16 ppp_proto;
+ __be16 type;
+};
+
+/**
+ * struct flow_dissector_key_l2tpv3:
+ * @session_id: identifier for a l2tp session
+ */
+struct flow_dissector_key_l2tpv3 {
+ __be32 session_id;
+};
+
+enum flow_dissector_key_id {
+ FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
+ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
+ FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
+ FLOW_DISSECTOR_KEY_PORTS_RANGE, /* struct flow_dissector_key_ports */
+ FLOW_DISSECTOR_KEY_ICMP, /* struct flow_dissector_key_icmp */
+ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
+ FLOW_DISSECTOR_KEY_TIPC, /* struct flow_dissector_key_tipc */
+ FLOW_DISSECTOR_KEY_ARP, /* struct flow_dissector_key_arp */
+ FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_vlan */
+ FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_tags */
+ FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
+ FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
+ FLOW_DISSECTOR_KEY_ENC_KEYID, /* struct flow_dissector_key_keyid */
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */
+ FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
+ FLOW_DISSECTOR_KEY_ENC_CONTROL, /* struct flow_dissector_key_control */
+ FLOW_DISSECTOR_KEY_ENC_PORTS, /* struct flow_dissector_key_ports */
+ FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */
+ FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */
+ FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */
+ FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_vlan */
+ FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */
+ FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */
+ FLOW_DISSECTOR_KEY_META, /* struct flow_dissector_key_meta */
+ FLOW_DISSECTOR_KEY_CT, /* struct flow_dissector_key_ct */
+ FLOW_DISSECTOR_KEY_HASH, /* struct flow_dissector_key_hash */
+ FLOW_DISSECTOR_KEY_NUM_OF_VLANS, /* struct flow_dissector_key_num_of_vlans */
+ FLOW_DISSECTOR_KEY_PPPOE, /* struct flow_dissector_key_pppoe */
+ FLOW_DISSECTOR_KEY_L2TPV3, /* struct flow_dissector_key_l2tpv3 */
+
+ FLOW_DISSECTOR_KEY_MAX,
+};
+
+#define FLOW_DISSECTOR_F_PARSE_1ST_FRAG BIT(0)
+#define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL BIT(1)
+#define FLOW_DISSECTOR_F_STOP_AT_ENCAP BIT(2)
+#define FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP BIT(3)
+
+struct flow_dissector_key {
+ enum flow_dissector_key_id key_id;
+ size_t offset; /* offset of struct flow_dissector_key_*
+ in target the struct */
+};
+
+struct flow_dissector {
+ unsigned int used_keys; /* each bit repesents presence of one key id */
+ unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
+};
+
+struct flow_keys_basic {
+ struct flow_dissector_key_control control;
+ struct flow_dissector_key_basic basic;
+};
+
+struct flow_keys {
+ struct flow_dissector_key_control control;
+#define FLOW_KEYS_HASH_START_FIELD basic
+ struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT);
+ struct flow_dissector_key_tags tags;
+ struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_vlan cvlan;
+ struct flow_dissector_key_keyid keyid;
+ struct flow_dissector_key_ports ports;
+ struct flow_dissector_key_icmp icmp;
+ /* 'addrs' must be the last member */
+ struct flow_dissector_key_addrs addrs;
+};
+
+#define FLOW_KEYS_HASH_OFFSET \
+ offsetof(struct flow_keys, FLOW_KEYS_HASH_START_FIELD)
+
+__be32 flow_get_u32_src(const struct flow_keys *flow);
+__be32 flow_get_u32_dst(const struct flow_keys *flow);
+
+extern struct flow_dissector flow_keys_dissector;
+extern struct flow_dissector flow_keys_basic_dissector;
+
+/* struct flow_keys_digest:
+ *
+ * This structure is used to hold a digest of the full flow keys. This is a
+ * larger "hash" of a flow to allow definitively matching specific flows where
+ * the 32 bit skb->hash is not large enough. The size is limited to 16 bytes so
+ * that it can be used in CB of skb (see sch_choke for an example).
+ */
+#define FLOW_KEYS_DIGEST_LEN 16
+struct flow_keys_digest {
+ u8 data[FLOW_KEYS_DIGEST_LEN];
+};
+
+void make_flow_keys_digest(struct flow_keys_digest *digest,
+ const struct flow_keys *flow);
+
+static inline bool flow_keys_have_l4(const struct flow_keys *keys)
+{
+ return (keys->ports.ports || keys->tags.flow_label);
+}
+
+u32 flow_hash_from_keys(struct flow_keys *keys);
+void skb_flow_get_icmp_tci(const struct sk_buff *skb,
+ struct flow_dissector_key_icmp *key_icmp,
+ const void *data, int thoff, int hlen);
+
+static inline bool dissector_uses_key(const struct flow_dissector *flow_dissector,
+ enum flow_dissector_key_id key_id)
+{
+ return flow_dissector->used_keys & (1 << key_id);
+}
+
+static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissector,
+ enum flow_dissector_key_id key_id,
+ void *target_container)
+{
+ return ((char *)target_container) + flow_dissector->offset[key_id];
+}
+
+struct bpf_flow_dissector {
+ struct bpf_flow_keys *flow_keys;
+ const struct sk_buff *skb;
+ const void *data;
+ const void *data_end;
+};
+
+static inline void
+flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
+ struct flow_dissector_key_basic *key_basic)
+{
+ memset(key_control, 0, sizeof(*key_control));
+ memset(key_basic, 0, sizeof(*key_basic));
+}
+
+#ifdef CONFIG_BPF_SYSCALL
+int flow_dissector_bpf_prog_attach_check(struct net *net,
+ struct bpf_prog *prog);
+#endif /* CONFIG_BPF_SYSCALL */
+
+#endif
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
new file mode 100644
index 000000000..e343f9f83
--- /dev/null
+++ b/include/net/flow_offload.h
@@ -0,0 +1,636 @@
+#ifndef _NET_FLOW_OFFLOAD_H
+#define _NET_FLOW_OFFLOAD_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netlink.h>
+#include <net/flow_dissector.h>
+
+struct flow_match {
+ struct flow_dissector *dissector;
+ void *mask;
+ void *key;
+};
+
+struct flow_match_meta {
+ struct flow_dissector_key_meta *key, *mask;
+};
+
+struct flow_match_basic {
+ struct flow_dissector_key_basic *key, *mask;
+};
+
+struct flow_match_control {
+ struct flow_dissector_key_control *key, *mask;
+};
+
+struct flow_match_eth_addrs {
+ struct flow_dissector_key_eth_addrs *key, *mask;
+};
+
+struct flow_match_vlan {
+ struct flow_dissector_key_vlan *key, *mask;
+};
+
+struct flow_match_ipv4_addrs {
+ struct flow_dissector_key_ipv4_addrs *key, *mask;
+};
+
+struct flow_match_ipv6_addrs {
+ struct flow_dissector_key_ipv6_addrs *key, *mask;
+};
+
+struct flow_match_ip {
+ struct flow_dissector_key_ip *key, *mask;
+};
+
+struct flow_match_ports {
+ struct flow_dissector_key_ports *key, *mask;
+};
+
+struct flow_match_ports_range {
+ struct flow_dissector_key_ports_range *key, *mask;
+};
+
+struct flow_match_icmp {
+ struct flow_dissector_key_icmp *key, *mask;
+};
+
+struct flow_match_tcp {
+ struct flow_dissector_key_tcp *key, *mask;
+};
+
+struct flow_match_mpls {
+ struct flow_dissector_key_mpls *key, *mask;
+};
+
+struct flow_match_enc_keyid {
+ struct flow_dissector_key_keyid *key, *mask;
+};
+
+struct flow_match_enc_opts {
+ struct flow_dissector_key_enc_opts *key, *mask;
+};
+
+struct flow_match_ct {
+ struct flow_dissector_key_ct *key, *mask;
+};
+
+struct flow_match_pppoe {
+ struct flow_dissector_key_pppoe *key, *mask;
+};
+
+struct flow_match_l2tpv3 {
+ struct flow_dissector_key_l2tpv3 *key, *mask;
+};
+
+struct flow_rule;
+
+void flow_rule_match_meta(const struct flow_rule *rule,
+ struct flow_match_meta *out);
+void flow_rule_match_basic(const struct flow_rule *rule,
+ struct flow_match_basic *out);
+void flow_rule_match_control(const struct flow_rule *rule,
+ struct flow_match_control *out);
+void flow_rule_match_eth_addrs(const struct flow_rule *rule,
+ struct flow_match_eth_addrs *out);
+void flow_rule_match_vlan(const struct flow_rule *rule,
+ struct flow_match_vlan *out);
+void flow_rule_match_cvlan(const struct flow_rule *rule,
+ struct flow_match_vlan *out);
+void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv4_addrs *out);
+void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv6_addrs *out);
+void flow_rule_match_ip(const struct flow_rule *rule,
+ struct flow_match_ip *out);
+void flow_rule_match_ports(const struct flow_rule *rule,
+ struct flow_match_ports *out);
+void flow_rule_match_ports_range(const struct flow_rule *rule,
+ struct flow_match_ports_range *out);
+void flow_rule_match_tcp(const struct flow_rule *rule,
+ struct flow_match_tcp *out);
+void flow_rule_match_icmp(const struct flow_rule *rule,
+ struct flow_match_icmp *out);
+void flow_rule_match_mpls(const struct flow_rule *rule,
+ struct flow_match_mpls *out);
+void flow_rule_match_enc_control(const struct flow_rule *rule,
+ struct flow_match_control *out);
+void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv4_addrs *out);
+void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv6_addrs *out);
+void flow_rule_match_enc_ip(const struct flow_rule *rule,
+ struct flow_match_ip *out);
+void flow_rule_match_enc_ports(const struct flow_rule *rule,
+ struct flow_match_ports *out);
+void flow_rule_match_enc_keyid(const struct flow_rule *rule,
+ struct flow_match_enc_keyid *out);
+void flow_rule_match_enc_opts(const struct flow_rule *rule,
+ struct flow_match_enc_opts *out);
+void flow_rule_match_ct(const struct flow_rule *rule,
+ struct flow_match_ct *out);
+void flow_rule_match_pppoe(const struct flow_rule *rule,
+ struct flow_match_pppoe *out);
+void flow_rule_match_l2tpv3(const struct flow_rule *rule,
+ struct flow_match_l2tpv3 *out);
+
+enum flow_action_id {
+ FLOW_ACTION_ACCEPT = 0,
+ FLOW_ACTION_DROP,
+ FLOW_ACTION_TRAP,
+ FLOW_ACTION_GOTO,
+ FLOW_ACTION_REDIRECT,
+ FLOW_ACTION_MIRRED,
+ FLOW_ACTION_REDIRECT_INGRESS,
+ FLOW_ACTION_MIRRED_INGRESS,
+ FLOW_ACTION_VLAN_PUSH,
+ FLOW_ACTION_VLAN_POP,
+ FLOW_ACTION_VLAN_MANGLE,
+ FLOW_ACTION_TUNNEL_ENCAP,
+ FLOW_ACTION_TUNNEL_DECAP,
+ FLOW_ACTION_MANGLE,
+ FLOW_ACTION_ADD,
+ FLOW_ACTION_CSUM,
+ FLOW_ACTION_MARK,
+ FLOW_ACTION_PTYPE,
+ FLOW_ACTION_PRIORITY,
+ FLOW_ACTION_WAKE,
+ FLOW_ACTION_QUEUE,
+ FLOW_ACTION_SAMPLE,
+ FLOW_ACTION_POLICE,
+ FLOW_ACTION_CT,
+ FLOW_ACTION_CT_METADATA,
+ FLOW_ACTION_MPLS_PUSH,
+ FLOW_ACTION_MPLS_POP,
+ FLOW_ACTION_MPLS_MANGLE,
+ FLOW_ACTION_GATE,
+ FLOW_ACTION_PPPOE_PUSH,
+ FLOW_ACTION_JUMP,
+ FLOW_ACTION_PIPE,
+ FLOW_ACTION_VLAN_PUSH_ETH,
+ FLOW_ACTION_VLAN_POP_ETH,
+ FLOW_ACTION_CONTINUE,
+ NUM_FLOW_ACTIONS,
+};
+
+/* This is mirroring enum pedit_header_type definition for easy mapping between
+ * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to
+ * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver.
+ */
+enum flow_action_mangle_base {
+ FLOW_ACT_MANGLE_UNSPEC = 0,
+ FLOW_ACT_MANGLE_HDR_TYPE_ETH,
+ FLOW_ACT_MANGLE_HDR_TYPE_IP4,
+ FLOW_ACT_MANGLE_HDR_TYPE_IP6,
+ FLOW_ACT_MANGLE_HDR_TYPE_TCP,
+ FLOW_ACT_MANGLE_HDR_TYPE_UDP,
+};
+
+enum flow_action_hw_stats_bit {
+ FLOW_ACTION_HW_STATS_IMMEDIATE_BIT,
+ FLOW_ACTION_HW_STATS_DELAYED_BIT,
+ FLOW_ACTION_HW_STATS_DISABLED_BIT,
+
+ FLOW_ACTION_HW_STATS_NUM_BITS
+};
+
+enum flow_action_hw_stats {
+ FLOW_ACTION_HW_STATS_IMMEDIATE =
+ BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT),
+ FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT),
+ FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE |
+ FLOW_ACTION_HW_STATS_DELAYED,
+ FLOW_ACTION_HW_STATS_DISABLED =
+ BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT),
+ FLOW_ACTION_HW_STATS_DONT_CARE = BIT(FLOW_ACTION_HW_STATS_NUM_BITS) - 1,
+};
+
+typedef void (*action_destr)(void *priv);
+
+struct flow_action_cookie {
+ u32 cookie_len;
+ u8 cookie[];
+};
+
+struct flow_action_cookie *flow_action_cookie_create(void *data,
+ unsigned int len,
+ gfp_t gfp);
+void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
+
+struct flow_action_entry {
+ enum flow_action_id id;
+ u32 hw_index;
+ enum flow_action_hw_stats hw_stats;
+ action_destr destructor;
+ void *destructor_priv;
+ union {
+ u32 chain_index; /* FLOW_ACTION_GOTO */
+ struct net_device *dev; /* FLOW_ACTION_REDIRECT */
+ struct { /* FLOW_ACTION_VLAN */
+ u16 vid;
+ __be16 proto;
+ u8 prio;
+ } vlan;
+ struct { /* FLOW_ACTION_VLAN_PUSH_ETH */
+ unsigned char dst[ETH_ALEN];
+ unsigned char src[ETH_ALEN];
+ } vlan_push_eth;
+ struct { /* FLOW_ACTION_MANGLE */
+ /* FLOW_ACTION_ADD */
+ enum flow_action_mangle_base htype;
+ u32 offset;
+ u32 mask;
+ u32 val;
+ } mangle;
+ struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */
+ u32 csum_flags; /* FLOW_ACTION_CSUM */
+ u32 mark; /* FLOW_ACTION_MARK */
+ u16 ptype; /* FLOW_ACTION_PTYPE */
+ u32 priority; /* FLOW_ACTION_PRIORITY */
+ struct { /* FLOW_ACTION_QUEUE */
+ u32 ctx;
+ u32 index;
+ u8 vf;
+ } queue;
+ struct { /* FLOW_ACTION_SAMPLE */
+ struct psample_group *psample_group;
+ u32 rate;
+ u32 trunc_size;
+ bool truncate;
+ } sample;
+ struct { /* FLOW_ACTION_POLICE */
+ u32 burst;
+ u64 rate_bytes_ps;
+ u64 peakrate_bytes_ps;
+ u32 avrate;
+ u16 overhead;
+ u64 burst_pkt;
+ u64 rate_pkt_ps;
+ u32 mtu;
+ struct {
+ enum flow_action_id act_id;
+ u32 extval;
+ } exceed, notexceed;
+ } police;
+ struct { /* FLOW_ACTION_CT */
+ int action;
+ u16 zone;
+ struct nf_flowtable *flow_table;
+ } ct;
+ struct {
+ unsigned long cookie;
+ u32 mark;
+ u32 labels[4];
+ bool orig_dir;
+ } ct_metadata;
+ struct { /* FLOW_ACTION_MPLS_PUSH */
+ u32 label;
+ __be16 proto;
+ u8 tc;
+ u8 bos;
+ u8 ttl;
+ } mpls_push;
+ struct { /* FLOW_ACTION_MPLS_POP */
+ __be16 proto;
+ } mpls_pop;
+ struct { /* FLOW_ACTION_MPLS_MANGLE */
+ u32 label;
+ u8 tc;
+ u8 bos;
+ u8 ttl;
+ } mpls_mangle;
+ struct {
+ s32 prio;
+ u64 basetime;
+ u64 cycletime;
+ u64 cycletimeext;
+ u32 num_entries;
+ struct action_gate_entry *entries;
+ } gate;
+ struct { /* FLOW_ACTION_PPPOE_PUSH */
+ u16 sid;
+ } pppoe;
+ };
+ struct flow_action_cookie *cookie; /* user defined action cookie */
+};
+
+struct flow_action {
+ unsigned int num_entries;
+ struct flow_action_entry entries[];
+};
+
+static inline bool flow_action_has_entries(const struct flow_action *action)
+{
+ return action->num_entries;
+}
+
+/**
+ * flow_offload_has_one_action() - check if exactly one action is present
+ * @action: tc filter flow offload action
+ *
+ * Returns true if exactly one action is present.
+ */
+static inline bool flow_offload_has_one_action(const struct flow_action *action)
+{
+ return action->num_entries == 1;
+}
+
+static inline bool flow_action_is_last_entry(const struct flow_action *action,
+ const struct flow_action_entry *entry)
+{
+ return entry == &action->entries[action->num_entries - 1];
+}
+
+#define flow_action_for_each(__i, __act, __actions) \
+ for (__i = 0, __act = &(__actions)->entries[0]; \
+ __i < (__actions)->num_entries; \
+ __act = &(__actions)->entries[++__i])
+
+static inline bool
+flow_action_mixed_hw_stats_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack)
+{
+ const struct flow_action_entry *action_entry;
+ u8 last_hw_stats;
+ int i;
+
+ if (flow_offload_has_one_action(action))
+ return true;
+
+ flow_action_for_each(i, action_entry, action) {
+ if (i && action_entry->hw_stats != last_hw_stats) {
+ NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
+ return false;
+ }
+ last_hw_stats = action_entry->hw_stats;
+ }
+ return true;
+}
+
+static inline const struct flow_action_entry *
+flow_action_first_entry_get(const struct flow_action *action)
+{
+ WARN_ON(!flow_action_has_entries(action));
+ return &action->entries[0];
+}
+
+static inline bool
+__flow_action_hw_stats_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack,
+ bool check_allow_bit,
+ enum flow_action_hw_stats_bit allow_bit)
+{
+ const struct flow_action_entry *action_entry;
+
+ if (!flow_action_has_entries(action))
+ return true;
+ if (!flow_action_mixed_hw_stats_check(action, extack))
+ return false;
+
+ action_entry = flow_action_first_entry_get(action);
+
+ /* Zero is not a legal value for hw_stats, catch anyone passing it */
+ WARN_ON_ONCE(!action_entry->hw_stats);
+
+ if (!check_allow_bit &&
+ ~action_entry->hw_stats & FLOW_ACTION_HW_STATS_ANY) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
+ return false;
+ } else if (check_allow_bit &&
+ !(action_entry->hw_stats & BIT(allow_bit))) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver does not support selected HW stats type");
+ return false;
+ }
+ return true;
+}
+
+static inline bool
+flow_action_hw_stats_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack,
+ enum flow_action_hw_stats_bit allow_bit)
+{
+ return __flow_action_hw_stats_check(action, extack, true, allow_bit);
+}
+
+static inline bool
+flow_action_basic_hw_stats_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack)
+{
+ return __flow_action_hw_stats_check(action, extack, false, 0);
+}
+
+struct flow_rule {
+ struct flow_match match;
+ struct flow_action action;
+};
+
+struct flow_rule *flow_rule_alloc(unsigned int num_actions);
+
+static inline bool flow_rule_match_key(const struct flow_rule *rule,
+ enum flow_dissector_key_id key)
+{
+ return dissector_uses_key(rule->match.dissector, key);
+}
+
+struct flow_stats {
+ u64 pkts;
+ u64 bytes;
+ u64 drops;
+ u64 lastused;
+ enum flow_action_hw_stats used_hw_stats;
+ bool used_hw_stats_valid;
+};
+
+static inline void flow_stats_update(struct flow_stats *flow_stats,
+ u64 bytes, u64 pkts,
+ u64 drops, u64 lastused,
+ enum flow_action_hw_stats used_hw_stats)
+{
+ flow_stats->pkts += pkts;
+ flow_stats->bytes += bytes;
+ flow_stats->drops += drops;
+ flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused);
+
+ /* The driver should pass value with a maximum of one bit set.
+ * Passing FLOW_ACTION_HW_STATS_ANY is invalid.
+ */
+ WARN_ON(used_hw_stats == FLOW_ACTION_HW_STATS_ANY);
+ flow_stats->used_hw_stats |= used_hw_stats;
+ flow_stats->used_hw_stats_valid = true;
+}
+
+enum flow_block_command {
+ FLOW_BLOCK_BIND,
+ FLOW_BLOCK_UNBIND,
+};
+
+enum flow_block_binder_type {
+ FLOW_BLOCK_BINDER_TYPE_UNSPEC,
+ FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
+ FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
+ FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
+ FLOW_BLOCK_BINDER_TYPE_RED_MARK,
+};
+
+struct flow_block {
+ struct list_head cb_list;
+};
+
+struct netlink_ext_ack;
+
+struct flow_block_offload {
+ enum flow_block_command command;
+ enum flow_block_binder_type binder_type;
+ bool block_shared;
+ bool unlocked_driver_cb;
+ struct net *net;
+ struct flow_block *block;
+ struct list_head cb_list;
+ struct list_head *driver_block_list;
+ struct netlink_ext_ack *extack;
+ struct Qdisc *sch;
+ struct list_head *cb_list_head;
+};
+
+enum tc_setup_type;
+typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+
+struct flow_block_cb;
+
+struct flow_block_indr {
+ struct list_head list;
+ struct net_device *dev;
+ struct Qdisc *sch;
+ enum flow_block_binder_type binder_type;
+ void *data;
+ void *cb_priv;
+ void (*cleanup)(struct flow_block_cb *block_cb);
+};
+
+struct flow_block_cb {
+ struct list_head driver_list;
+ struct list_head list;
+ flow_setup_cb_t *cb;
+ void *cb_ident;
+ void *cb_priv;
+ void (*release)(void *cb_priv);
+ struct flow_block_indr indr;
+ unsigned int refcnt;
+};
+
+struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
+ void *cb_ident, void *cb_priv,
+ void (*release)(void *cb_priv));
+struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
+ void *cb_ident, void *cb_priv,
+ void (*release)(void *cb_priv),
+ struct flow_block_offload *bo,
+ struct net_device *dev,
+ struct Qdisc *sch, void *data,
+ void *indr_cb_priv,
+ void (*cleanup)(struct flow_block_cb *block_cb));
+void flow_block_cb_free(struct flow_block_cb *block_cb);
+
+struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
+ flow_setup_cb_t *cb, void *cb_ident);
+
+void *flow_block_cb_priv(struct flow_block_cb *block_cb);
+void flow_block_cb_incref(struct flow_block_cb *block_cb);
+unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb);
+
+static inline void flow_block_cb_add(struct flow_block_cb *block_cb,
+ struct flow_block_offload *offload)
+{
+ list_add_tail(&block_cb->list, &offload->cb_list);
+}
+
+static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
+ struct flow_block_offload *offload)
+{
+ list_move(&block_cb->list, &offload->cb_list);
+}
+
+static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb,
+ struct flow_block_offload *offload)
+{
+ list_del(&block_cb->indr.list);
+ list_move(&block_cb->list, &offload->cb_list);
+}
+
+bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
+ struct list_head *driver_block_list);
+
+int flow_block_cb_setup_simple(struct flow_block_offload *f,
+ struct list_head *driver_list,
+ flow_setup_cb_t *cb,
+ void *cb_ident, void *cb_priv, bool ingress_only);
+
+enum flow_cls_command {
+ FLOW_CLS_REPLACE,
+ FLOW_CLS_DESTROY,
+ FLOW_CLS_STATS,
+ FLOW_CLS_TMPLT_CREATE,
+ FLOW_CLS_TMPLT_DESTROY,
+};
+
+struct flow_cls_common_offload {
+ u32 chain_index;
+ __be16 protocol;
+ u32 prio;
+ struct netlink_ext_ack *extack;
+};
+
+struct flow_cls_offload {
+ struct flow_cls_common_offload common;
+ enum flow_cls_command command;
+ unsigned long cookie;
+ struct flow_rule *rule;
+ struct flow_stats stats;
+ u32 classid;
+};
+
+enum offload_act_command {
+ FLOW_ACT_REPLACE,
+ FLOW_ACT_DESTROY,
+ FLOW_ACT_STATS,
+};
+
+struct flow_offload_action {
+ struct netlink_ext_ack *extack; /* NULL in FLOW_ACT_STATS process*/
+ enum offload_act_command command;
+ enum flow_action_id id;
+ u32 index;
+ struct flow_stats stats;
+ struct flow_action action;
+};
+
+struct flow_offload_action *offload_action_alloc(unsigned int num_actions);
+
+static inline struct flow_rule *
+flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
+{
+ return flow_cmd->rule;
+}
+
+static inline void flow_block_init(struct flow_block *flow_block)
+{
+ INIT_LIST_HEAD(&flow_block->cb_list);
+}
+
+typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv,
+ enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb));
+
+int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
+void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
+ void (*release)(void *cb_priv));
+int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
+ enum tc_setup_type type, void *data,
+ struct flow_block_offload *bo,
+ void (*cleanup)(struct flow_block_cb *block_cb));
+bool flow_indr_dev_exists(void);
+
+#endif /* _NET_FLOW_OFFLOAD_H */
diff --git a/include/net/fou.h b/include/net/fou.h
new file mode 100644
index 000000000..80f56e275
--- /dev/null
+++ b/include/net/fou.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_FOU_H
+#define __NET_FOU_H
+
+#include <linux/skbuff.h>
+
+#include <net/flow.h>
+#include <net/gue.h>
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+
+size_t fou_encap_hlen(struct ip_tunnel_encap *e);
+size_t gue_encap_hlen(struct ip_tunnel_encap *e);
+
+int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, __be16 *sport, int type);
+int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, __be16 *sport, int type);
+
+#endif
diff --git a/include/net/fq.h b/include/net/fq.h
new file mode 100644
index 000000000..07b5aff6e
--- /dev/null
+++ b/include/net/fq.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc
+ *
+ * Based on net/sched/sch_fq_codel.c
+ */
+#ifndef __NET_SCHED_FQ_H
+#define __NET_SCHED_FQ_H
+
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+struct fq_tin;
+
+/**
+ * struct fq_flow - per traffic flow queue
+ *
+ * @tin: owner of this flow. Used to manage collisions, i.e. when a packet
+ * hashes to an index which points to a flow that is already owned by a
+ * different tin the packet is destined to. In such case the implementer
+ * must provide a fallback flow
+ * @flowchain: can be linked to fq_tin's new_flows or old_flows. Used for DRR++
+ * (deficit round robin) based round robin queuing similar to the one
+ * found in net/sched/sch_fq_codel.c
+ * @queue: sk_buff queue to hold packets
+ * @backlog: number of bytes pending in the queue. The number of packets can be
+ * found in @queue.qlen
+ * @deficit: used for DRR++
+ */
+struct fq_flow {
+ struct fq_tin *tin;
+ struct list_head flowchain;
+ struct sk_buff_head queue;
+ u32 backlog;
+ int deficit;
+};
+
+/**
+ * struct fq_tin - a logical container of fq_flows
+ *
+ * Used to group fq_flows into a logical aggregate. DRR++ scheme is used to
+ * pull interleaved packets out of the associated flows.
+ *
+ * @new_flows: linked list of fq_flow
+ * @old_flows: linked list of fq_flow
+ */
+struct fq_tin {
+ struct list_head new_flows;
+ struct list_head old_flows;
+ struct list_head tin_list;
+ struct fq_flow default_flow;
+ u32 backlog_bytes;
+ u32 backlog_packets;
+ u32 overlimit;
+ u32 collisions;
+ u32 flows;
+ u32 tx_bytes;
+ u32 tx_packets;
+};
+
+/**
+ * struct fq - main container for fair queuing purposes
+ *
+ * @limit: max number of packets that can be queued across all flows
+ * @backlog: number of packets queued across all flows
+ */
+struct fq {
+ struct fq_flow *flows;
+ unsigned long *flows_bitmap;
+
+ struct list_head tin_backlog;
+ spinlock_t lock;
+ u32 flows_cnt;
+ u32 limit;
+ u32 memory_limit;
+ u32 memory_usage;
+ u32 quantum;
+ u32 backlog;
+ u32 overlimit;
+ u32 overmemory;
+ u32 collisions;
+};
+
+typedef struct sk_buff *fq_tin_dequeue_t(struct fq *,
+ struct fq_tin *,
+ struct fq_flow *flow);
+
+typedef void fq_skb_free_t(struct fq *,
+ struct fq_tin *,
+ struct fq_flow *,
+ struct sk_buff *);
+
+/* Return %true to filter (drop) the frame. */
+typedef bool fq_skb_filter_t(struct fq *,
+ struct fq_tin *,
+ struct fq_flow *,
+ struct sk_buff *,
+ void *);
+
+typedef struct fq_flow *fq_flow_get_default_t(struct fq *,
+ struct fq_tin *,
+ int idx,
+ struct sk_buff *);
+
+#endif
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
new file mode 100644
index 000000000..524b510f1
--- /dev/null
+++ b/include/net/fq_impl.h
@@ -0,0 +1,389 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc
+ *
+ * Based on net/sched/sch_fq_codel.c
+ */
+#ifndef __NET_SCHED_FQ_IMPL_H
+#define __NET_SCHED_FQ_IMPL_H
+
+#include <net/fq.h>
+
+/* functions that are embedded into includer */
+
+
+static void
+__fq_adjust_removal(struct fq *fq, struct fq_flow *flow, unsigned int packets,
+ unsigned int bytes, unsigned int truesize)
+{
+ struct fq_tin *tin = flow->tin;
+ int idx;
+
+ tin->backlog_bytes -= bytes;
+ tin->backlog_packets -= packets;
+ flow->backlog -= bytes;
+ fq->backlog -= packets;
+ fq->memory_usage -= truesize;
+
+ if (flow->backlog)
+ return;
+
+ if (flow == &tin->default_flow) {
+ list_del_init(&tin->tin_list);
+ return;
+ }
+
+ idx = flow - fq->flows;
+ __clear_bit(idx, fq->flows_bitmap);
+}
+
+static void fq_adjust_removal(struct fq *fq,
+ struct fq_flow *flow,
+ struct sk_buff *skb)
+{
+ __fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize);
+}
+
+static struct sk_buff *fq_flow_dequeue(struct fq *fq,
+ struct fq_flow *flow)
+{
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&fq->lock);
+
+ skb = __skb_dequeue(&flow->queue);
+ if (!skb)
+ return NULL;
+
+ fq_adjust_removal(fq, flow, skb);
+
+ return skb;
+}
+
+static int fq_flow_drop(struct fq *fq, struct fq_flow *flow,
+ fq_skb_free_t free_func)
+{
+ unsigned int packets = 0, bytes = 0, truesize = 0;
+ struct fq_tin *tin = flow->tin;
+ struct sk_buff *skb;
+ int pending;
+
+ lockdep_assert_held(&fq->lock);
+
+ pending = min_t(int, 32, skb_queue_len(&flow->queue) / 2);
+ do {
+ skb = __skb_dequeue(&flow->queue);
+ if (!skb)
+ break;
+
+ packets++;
+ bytes += skb->len;
+ truesize += skb->truesize;
+ free_func(fq, tin, flow, skb);
+ } while (packets < pending);
+
+ __fq_adjust_removal(fq, flow, packets, bytes, truesize);
+
+ return packets;
+}
+
+static struct sk_buff *fq_tin_dequeue(struct fq *fq,
+ struct fq_tin *tin,
+ fq_tin_dequeue_t dequeue_func)
+{
+ struct fq_flow *flow;
+ struct list_head *head;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&fq->lock);
+
+begin:
+ head = &tin->new_flows;
+ if (list_empty(head)) {
+ head = &tin->old_flows;
+ if (list_empty(head))
+ return NULL;
+ }
+
+ flow = list_first_entry(head, struct fq_flow, flowchain);
+
+ if (flow->deficit <= 0) {
+ flow->deficit += fq->quantum;
+ list_move_tail(&flow->flowchain,
+ &tin->old_flows);
+ goto begin;
+ }
+
+ skb = dequeue_func(fq, tin, flow);
+ if (!skb) {
+ /* force a pass through old_flows to prevent starvation */
+ if ((head == &tin->new_flows) &&
+ !list_empty(&tin->old_flows)) {
+ list_move_tail(&flow->flowchain, &tin->old_flows);
+ } else {
+ list_del_init(&flow->flowchain);
+ flow->tin = NULL;
+ }
+ goto begin;
+ }
+
+ flow->deficit -= skb->len;
+ tin->tx_bytes += skb->len;
+ tin->tx_packets++;
+
+ return skb;
+}
+
+static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
+{
+ u32 hash = skb_get_hash(skb);
+
+ return reciprocal_scale(hash, fq->flows_cnt);
+}
+
+static struct fq_flow *fq_flow_classify(struct fq *fq,
+ struct fq_tin *tin, u32 idx,
+ struct sk_buff *skb)
+{
+ struct fq_flow *flow;
+
+ lockdep_assert_held(&fq->lock);
+
+ flow = &fq->flows[idx];
+ if (flow->tin && flow->tin != tin) {
+ flow = &tin->default_flow;
+ tin->collisions++;
+ fq->collisions++;
+ }
+
+ if (!flow->tin)
+ tin->flows++;
+
+ return flow;
+}
+
+static struct fq_flow *fq_find_fattest_flow(struct fq *fq)
+{
+ struct fq_tin *tin;
+ struct fq_flow *flow = NULL;
+ u32 len = 0;
+ int i;
+
+ for_each_set_bit(i, fq->flows_bitmap, fq->flows_cnt) {
+ struct fq_flow *cur = &fq->flows[i];
+ unsigned int cur_len;
+
+ cur_len = cur->backlog;
+ if (cur_len <= len)
+ continue;
+
+ flow = cur;
+ len = cur_len;
+ }
+
+ list_for_each_entry(tin, &fq->tin_backlog, tin_list) {
+ unsigned int cur_len = tin->default_flow.backlog;
+
+ if (cur_len <= len)
+ continue;
+
+ flow = &tin->default_flow;
+ len = cur_len;
+ }
+
+ return flow;
+}
+
+static void fq_tin_enqueue(struct fq *fq,
+ struct fq_tin *tin, u32 idx,
+ struct sk_buff *skb,
+ fq_skb_free_t free_func)
+{
+ struct fq_flow *flow;
+ bool oom;
+
+ lockdep_assert_held(&fq->lock);
+
+ flow = fq_flow_classify(fq, tin, idx, skb);
+
+ if (!flow->backlog) {
+ if (flow != &tin->default_flow)
+ __set_bit(idx, fq->flows_bitmap);
+ else if (list_empty(&tin->tin_list))
+ list_add(&tin->tin_list, &fq->tin_backlog);
+ }
+
+ flow->tin = tin;
+ flow->backlog += skb->len;
+ tin->backlog_bytes += skb->len;
+ tin->backlog_packets++;
+ fq->memory_usage += skb->truesize;
+ fq->backlog++;
+
+ if (list_empty(&flow->flowchain)) {
+ flow->deficit = fq->quantum;
+ list_add_tail(&flow->flowchain,
+ &tin->new_flows);
+ }
+
+ __skb_queue_tail(&flow->queue, skb);
+ oom = (fq->memory_usage > fq->memory_limit);
+ while (fq->backlog > fq->limit || oom) {
+ flow = fq_find_fattest_flow(fq);
+ if (!flow)
+ return;
+
+ if (!fq_flow_drop(fq, flow, free_func))
+ return;
+
+ flow->tin->overlimit++;
+ fq->overlimit++;
+ if (oom) {
+ fq->overmemory++;
+ oom = (fq->memory_usage > fq->memory_limit);
+ }
+ }
+}
+
+static void fq_flow_filter(struct fq *fq,
+ struct fq_flow *flow,
+ fq_skb_filter_t filter_func,
+ void *filter_data,
+ fq_skb_free_t free_func)
+{
+ struct fq_tin *tin = flow->tin;
+ struct sk_buff *skb, *tmp;
+
+ lockdep_assert_held(&fq->lock);
+
+ skb_queue_walk_safe(&flow->queue, skb, tmp) {
+ if (!filter_func(fq, tin, flow, skb, filter_data))
+ continue;
+
+ __skb_unlink(skb, &flow->queue);
+ fq_adjust_removal(fq, flow, skb);
+ free_func(fq, tin, flow, skb);
+ }
+}
+
+static void fq_tin_filter(struct fq *fq,
+ struct fq_tin *tin,
+ fq_skb_filter_t filter_func,
+ void *filter_data,
+ fq_skb_free_t free_func)
+{
+ struct fq_flow *flow;
+
+ lockdep_assert_held(&fq->lock);
+
+ list_for_each_entry(flow, &tin->new_flows, flowchain)
+ fq_flow_filter(fq, flow, filter_func, filter_data, free_func);
+ list_for_each_entry(flow, &tin->old_flows, flowchain)
+ fq_flow_filter(fq, flow, filter_func, filter_data, free_func);
+}
+
+static void fq_flow_reset(struct fq *fq,
+ struct fq_flow *flow,
+ fq_skb_free_t free_func)
+{
+ struct fq_tin *tin = flow->tin;
+ struct sk_buff *skb;
+
+ while ((skb = fq_flow_dequeue(fq, flow)))
+ free_func(fq, tin, flow, skb);
+
+ if (!list_empty(&flow->flowchain)) {
+ list_del_init(&flow->flowchain);
+ if (list_empty(&tin->new_flows) &&
+ list_empty(&tin->old_flows))
+ list_del_init(&tin->tin_list);
+ }
+
+ flow->tin = NULL;
+
+ WARN_ON_ONCE(flow->backlog);
+}
+
+static void fq_tin_reset(struct fq *fq,
+ struct fq_tin *tin,
+ fq_skb_free_t free_func)
+{
+ struct list_head *head;
+ struct fq_flow *flow;
+
+ for (;;) {
+ head = &tin->new_flows;
+ if (list_empty(head)) {
+ head = &tin->old_flows;
+ if (list_empty(head))
+ break;
+ }
+
+ flow = list_first_entry(head, struct fq_flow, flowchain);
+ fq_flow_reset(fq, flow, free_func);
+ }
+
+ WARN_ON_ONCE(!list_empty(&tin->tin_list));
+ WARN_ON_ONCE(tin->backlog_bytes);
+ WARN_ON_ONCE(tin->backlog_packets);
+}
+
+static void fq_flow_init(struct fq_flow *flow)
+{
+ INIT_LIST_HEAD(&flow->flowchain);
+ __skb_queue_head_init(&flow->queue);
+}
+
+static void fq_tin_init(struct fq_tin *tin)
+{
+ INIT_LIST_HEAD(&tin->new_flows);
+ INIT_LIST_HEAD(&tin->old_flows);
+ INIT_LIST_HEAD(&tin->tin_list);
+ fq_flow_init(&tin->default_flow);
+}
+
+static int fq_init(struct fq *fq, int flows_cnt)
+{
+ int i;
+
+ memset(fq, 0, sizeof(fq[0]));
+ spin_lock_init(&fq->lock);
+ INIT_LIST_HEAD(&fq->tin_backlog);
+ fq->flows_cnt = max_t(u32, flows_cnt, 1);
+ fq->quantum = 300;
+ fq->limit = 8192;
+ fq->memory_limit = 16 << 20; /* 16 MBytes */
+
+ fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
+ if (!fq->flows)
+ return -ENOMEM;
+
+ fq->flows_bitmap = bitmap_zalloc(fq->flows_cnt, GFP_KERNEL);
+ if (!fq->flows_bitmap) {
+ kvfree(fq->flows);
+ fq->flows = NULL;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < fq->flows_cnt; i++)
+ fq_flow_init(&fq->flows[i]);
+
+ return 0;
+}
+
+static void fq_reset(struct fq *fq,
+ fq_skb_free_t free_func)
+{
+ int i;
+
+ for (i = 0; i < fq->flows_cnt; i++)
+ fq_flow_reset(fq, &fq->flows[i], free_func);
+
+ kvfree(fq->flows);
+ fq->flows = NULL;
+
+ bitmap_free(fq->flows_bitmap);
+ fq->flows_bitmap = NULL;
+}
+
+#endif
diff --git a/include/net/garp.h b/include/net/garp.h
new file mode 100644
index 000000000..59a07b171
--- /dev/null
+++ b/include/net/garp.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_GARP_H
+#define _NET_GARP_H
+
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <net/stp.h>
+
+#define GARP_PROTOCOL_ID 0x1
+#define GARP_END_MARK 0x0
+
+struct garp_pdu_hdr {
+ __be16 protocol;
+};
+
+struct garp_msg_hdr {
+ u8 attrtype;
+};
+
+enum garp_attr_event {
+ GARP_LEAVE_ALL,
+ GARP_JOIN_EMPTY,
+ GARP_JOIN_IN,
+ GARP_LEAVE_EMPTY,
+ GARP_LEAVE_IN,
+ GARP_EMPTY,
+};
+
+struct garp_attr_hdr {
+ u8 len;
+ u8 event;
+ u8 data[];
+};
+
+struct garp_skb_cb {
+ u8 cur_type;
+};
+
+static inline struct garp_skb_cb *garp_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct garp_skb_cb) >
+ sizeof_field(struct sk_buff, cb));
+ return (struct garp_skb_cb *)skb->cb;
+}
+
+enum garp_applicant_state {
+ GARP_APPLICANT_INVALID,
+ GARP_APPLICANT_VA,
+ GARP_APPLICANT_AA,
+ GARP_APPLICANT_QA,
+ GARP_APPLICANT_LA,
+ GARP_APPLICANT_VP,
+ GARP_APPLICANT_AP,
+ GARP_APPLICANT_QP,
+ GARP_APPLICANT_VO,
+ GARP_APPLICANT_AO,
+ GARP_APPLICANT_QO,
+ __GARP_APPLICANT_MAX
+};
+#define GARP_APPLICANT_MAX (__GARP_APPLICANT_MAX - 1)
+
+enum garp_event {
+ GARP_EVENT_REQ_JOIN,
+ GARP_EVENT_REQ_LEAVE,
+ GARP_EVENT_R_JOIN_IN,
+ GARP_EVENT_R_JOIN_EMPTY,
+ GARP_EVENT_R_EMPTY,
+ GARP_EVENT_R_LEAVE_IN,
+ GARP_EVENT_R_LEAVE_EMPTY,
+ GARP_EVENT_TRANSMIT_PDU,
+ __GARP_EVENT_MAX
+};
+#define GARP_EVENT_MAX (__GARP_EVENT_MAX - 1)
+
+enum garp_action {
+ GARP_ACTION_NONE,
+ GARP_ACTION_S_JOIN_IN,
+ GARP_ACTION_S_LEAVE_EMPTY,
+};
+
+struct garp_attr {
+ struct rb_node node;
+ enum garp_applicant_state state;
+ u8 type;
+ u8 dlen;
+ unsigned char data[];
+};
+
+enum garp_applications {
+ GARP_APPLICATION_GVRP,
+ __GARP_APPLICATION_MAX
+};
+#define GARP_APPLICATION_MAX (__GARP_APPLICATION_MAX - 1)
+
+struct garp_application {
+ enum garp_applications type;
+ unsigned int maxattr;
+ struct stp_proto proto;
+};
+
+struct garp_applicant {
+ struct garp_application *app;
+ struct net_device *dev;
+ struct timer_list join_timer;
+
+ spinlock_t lock;
+ struct sk_buff_head queue;
+ struct sk_buff *pdu;
+ struct rb_root gid;
+ struct rcu_head rcu;
+};
+
+struct garp_port {
+ struct garp_applicant __rcu *applicants[GARP_APPLICATION_MAX + 1];
+ struct rcu_head rcu;
+};
+
+int garp_register_application(struct garp_application *app);
+void garp_unregister_application(struct garp_application *app);
+
+int garp_init_applicant(struct net_device *dev, struct garp_application *app);
+void garp_uninit_applicant(struct net_device *dev,
+ struct garp_application *app);
+
+int garp_request_join(const struct net_device *dev,
+ const struct garp_application *app, const void *data,
+ u8 len, u8 type);
+void garp_request_leave(const struct net_device *dev,
+ const struct garp_application *app,
+ const void *data, u8 len, u8 type);
+
+#endif /* _NET_GARP_H */
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
new file mode 100644
index 000000000..7aa2b8e1f
--- /dev/null
+++ b/include/net/gen_stats.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_GEN_STATS_H
+#define __NET_GEN_STATS_H
+
+#include <linux/gen_stats.h>
+#include <linux/socket.h>
+#include <linux/rtnetlink.h>
+#include <linux/pkt_sched.h>
+
+/* Throughput stats.
+ * Must be initialized beforehand with gnet_stats_basic_sync_init().
+ *
+ * If no reads can ever occur parallel to writes (e.g. stack-allocated
+ * bstats), then the internal stat values can be written to and read
+ * from directly. Otherwise, use _bstats_set/update() for writes and
+ * gnet_stats_add_basic() for reads.
+ */
+struct gnet_stats_basic_sync {
+ u64_stats_t bytes;
+ u64_stats_t packets;
+ struct u64_stats_sync syncp;
+} __aligned(2 * sizeof(u64));
+
+struct net_rate_estimator;
+
+struct gnet_dump {
+ spinlock_t * lock;
+ struct sk_buff * skb;
+ struct nlattr * tail;
+
+ /* Backward compatibility */
+ int compat_tc_stats;
+ int compat_xstats;
+ int padattr;
+ void * xstats;
+ int xstats_len;
+ struct tc_stats tc_stats;
+};
+
+void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
+int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
+ struct gnet_dump *d, int padattr);
+
+int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
+ int tc_stats_type, int xstats_type,
+ spinlock_t *lock, struct gnet_dump *d,
+ int padattr);
+
+int gnet_stats_copy_basic(struct gnet_dump *d,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
+void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
+int gnet_stats_copy_basic_hw(struct gnet_dump *d,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
+int gnet_stats_copy_rate_est(struct gnet_dump *d,
+ struct net_rate_estimator __rcu **ptr);
+int gnet_stats_copy_queue(struct gnet_dump *d,
+ struct gnet_stats_queue __percpu *cpu_q,
+ struct gnet_stats_queue *q, __u32 qlen);
+void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
+ const struct gnet_stats_queue __percpu *cpu_q,
+ const struct gnet_stats_queue *q);
+int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
+
+int gnet_stats_finish_copy(struct gnet_dump *d);
+
+int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu_bstats,
+ struct net_rate_estimator __rcu **rate_est,
+ spinlock_t *lock,
+ bool running, struct nlattr *opt);
+void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
+int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu_bstats,
+ struct net_rate_estimator __rcu **ptr,
+ spinlock_t *lock,
+ bool running, struct nlattr *opt);
+bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
+bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
+ struct gnet_stats_rate_est64 *sample);
+#endif
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
new file mode 100644
index 000000000..b9e5a22ae
--- /dev/null
+++ b/include/net/genetlink.h
@@ -0,0 +1,465 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_GENERIC_NETLINK_H
+#define __NET_GENERIC_NETLINK_H
+
+#include <linux/genetlink.h>
+#include <net/netlink.h>
+#include <net/net_namespace.h>
+
+#define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN)
+
+/**
+ * struct genl_multicast_group - generic netlink multicast group
+ * @name: name of the multicast group, names are per-family
+ * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
+ * @cap_sys_admin: whether %CAP_SYS_ADMIN is required for binding
+ */
+struct genl_multicast_group {
+ char name[GENL_NAMSIZ];
+ u8 flags;
+ u8 cap_sys_admin:1;
+};
+
+struct genl_ops;
+struct genl_info;
+
+/**
+ * struct genl_family - generic netlink family
+ * @id: protocol family identifier (private)
+ * @hdrsize: length of user specific header in bytes
+ * @name: name of family
+ * @version: protocol version
+ * @maxattr: maximum number of attributes supported
+ * @policy: netlink policy
+ * @netnsok: set to true if the family can handle network
+ * namespaces and should be presented in all of them
+ * @parallel_ops: operations can be called in parallel and aren't
+ * synchronized by the core genetlink code
+ * @pre_doit: called before an operation's doit callback, it may
+ * do additional, common, filtering and return an error
+ * @post_doit: called after an operation's doit callback, it may
+ * undo operations done by pre_doit, for example release locks
+ * @module: pointer to the owning module (set to THIS_MODULE)
+ * @mcgrps: multicast groups used by this family
+ * @n_mcgrps: number of multicast groups
+ * @resv_start_op: first operation for which reserved fields of the header
+ * can be validated and policies are required (see below);
+ * new families should leave this field at zero
+ * @mcgrp_offset: starting number of multicast group IDs in this family
+ * (private)
+ * @ops: the operations supported by this family
+ * @n_ops: number of operations supported by this family
+ * @small_ops: the small-struct operations supported by this family
+ * @n_small_ops: number of small-struct operations supported by this family
+ *
+ * Attribute policies (the combination of @policy and @maxattr fields)
+ * can be attached at the family level or at the operation level.
+ * If both are present the per-operation policy takes precedence.
+ * For operations before @resv_start_op lack of policy means that the core
+ * will perform no attribute parsing or validation. For newer operations
+ * if policy is not provided core will reject all TLV attributes.
+ */
+struct genl_family {
+ int id; /* private */
+ unsigned int hdrsize;
+ char name[GENL_NAMSIZ];
+ unsigned int version;
+ unsigned int maxattr;
+ unsigned int mcgrp_offset; /* private */
+ u8 netnsok:1;
+ u8 parallel_ops:1;
+ u8 n_ops;
+ u8 n_small_ops;
+ u8 n_mcgrps;
+ u8 resv_start_op;
+ const struct nla_policy *policy;
+ int (*pre_doit)(const struct genl_ops *ops,
+ struct sk_buff *skb,
+ struct genl_info *info);
+ void (*post_doit)(const struct genl_ops *ops,
+ struct sk_buff *skb,
+ struct genl_info *info);
+ const struct genl_ops * ops;
+ const struct genl_small_ops *small_ops;
+ const struct genl_multicast_group *mcgrps;
+ struct module *module;
+};
+
+/**
+ * struct genl_info - receiving information
+ * @snd_seq: sending sequence number
+ * @snd_portid: netlink portid of sender
+ * @nlhdr: netlink message header
+ * @genlhdr: generic netlink message header
+ * @userhdr: user specific header
+ * @attrs: netlink attributes
+ * @_net: network namespace
+ * @user_ptr: user pointers
+ * @extack: extended ACK report struct
+ */
+struct genl_info {
+ u32 snd_seq;
+ u32 snd_portid;
+ struct nlmsghdr * nlhdr;
+ struct genlmsghdr * genlhdr;
+ void * userhdr;
+ struct nlattr ** attrs;
+ possible_net_t _net;
+ void * user_ptr[2];
+ struct netlink_ext_ack *extack;
+};
+
+static inline struct net *genl_info_net(struct genl_info *info)
+{
+ return read_pnet(&info->_net);
+}
+
+static inline void genl_info_net_set(struct genl_info *info, struct net *net)
+{
+ write_pnet(&info->_net, net);
+}
+
+#define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg)
+
+/* Report that a root attribute is missing */
+#define GENL_REQ_ATTR_CHECK(info, attr) ({ \
+ struct genl_info *__info = (info); \
+ \
+ NL_REQ_ATTR_CHECK(__info->extack, NULL, __info->attrs, (attr)); \
+})
+
+enum genl_validate_flags {
+ GENL_DONT_VALIDATE_STRICT = BIT(0),
+ GENL_DONT_VALIDATE_DUMP = BIT(1),
+ GENL_DONT_VALIDATE_DUMP_STRICT = BIT(2),
+};
+
+/**
+ * struct genl_small_ops - generic netlink operations (small version)
+ * @cmd: command identifier
+ * @internal_flags: flags used by the family
+ * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
+ * @validate: validation flags from enum genl_validate_flags
+ * @doit: standard command callback
+ * @dumpit: callback for dumpers
+ *
+ * This is a cut-down version of struct genl_ops for users who don't need
+ * most of the ancillary infra and want to save space.
+ */
+struct genl_small_ops {
+ int (*doit)(struct sk_buff *skb, struct genl_info *info);
+ int (*dumpit)(struct sk_buff *skb, struct netlink_callback *cb);
+ u8 cmd;
+ u8 internal_flags;
+ u8 flags;
+ u8 validate;
+};
+
+/**
+ * struct genl_ops - generic netlink operations
+ * @cmd: command identifier
+ * @internal_flags: flags used by the family
+ * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
+ * @maxattr: maximum number of attributes supported
+ * @policy: netlink policy (takes precedence over family policy)
+ * @validate: validation flags from enum genl_validate_flags
+ * @doit: standard command callback
+ * @start: start callback for dumps
+ * @dumpit: callback for dumpers
+ * @done: completion callback for dumps
+ */
+struct genl_ops {
+ int (*doit)(struct sk_buff *skb,
+ struct genl_info *info);
+ int (*start)(struct netlink_callback *cb);
+ int (*dumpit)(struct sk_buff *skb,
+ struct netlink_callback *cb);
+ int (*done)(struct netlink_callback *cb);
+ const struct nla_policy *policy;
+ unsigned int maxattr;
+ u8 cmd;
+ u8 internal_flags;
+ u8 flags;
+ u8 validate;
+};
+
+/**
+ * struct genl_dumpit_info - info that is available during dumpit op call
+ * @family: generic netlink family - for internal genl code usage
+ * @op: generic netlink ops - for internal genl code usage
+ * @attrs: netlink attributes
+ */
+struct genl_dumpit_info {
+ const struct genl_family *family;
+ struct genl_ops op;
+ struct nlattr **attrs;
+};
+
+static inline const struct genl_dumpit_info *
+genl_dumpit_info(struct netlink_callback *cb)
+{
+ return cb->data;
+}
+
+int genl_register_family(struct genl_family *family);
+int genl_unregister_family(const struct genl_family *family);
+void genl_notify(const struct genl_family *family, struct sk_buff *skb,
+ struct genl_info *info, u32 group, gfp_t flags);
+
+void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
+ const struct genl_family *family, int flags, u8 cmd);
+
+/**
+ * genlmsg_nlhdr - Obtain netlink header from user specified header
+ * @user_hdr: user header as returned from genlmsg_put()
+ *
+ * Returns pointer to netlink header.
+ */
+static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr)
+{
+ return (struct nlmsghdr *)((char *)user_hdr -
+ GENL_HDRLEN -
+ NLMSG_HDRLEN);
+}
+
+/**
+ * genlmsg_parse_deprecated - parse attributes of a genetlink message
+ * @nlh: netlink message header
+ * @family: genetlink message family
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * @extack: extended ACK report struct
+ */
+static inline int genlmsg_parse_deprecated(const struct nlmsghdr *nlh,
+ const struct genl_family *family,
+ struct nlattr *tb[], int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype,
+ policy, NL_VALIDATE_LIBERAL, extack);
+}
+
+/**
+ * genlmsg_parse - parse attributes of a genetlink message
+ * @nlh: netlink message header
+ * @family: genetlink message family
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * @extack: extended ACK report struct
+ */
+static inline int genlmsg_parse(const struct nlmsghdr *nlh,
+ const struct genl_family *family,
+ struct nlattr *tb[], int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype,
+ policy, NL_VALIDATE_STRICT, extack);
+}
+
+/**
+ * genl_dump_check_consistent - check if sequence is consistent and advertise if not
+ * @cb: netlink callback structure that stores the sequence number
+ * @user_hdr: user header as returned from genlmsg_put()
+ *
+ * Cf. nl_dump_check_consistent(), this just provides a wrapper to make it
+ * simpler to use with generic netlink.
+ */
+static inline void genl_dump_check_consistent(struct netlink_callback *cb,
+ void *user_hdr)
+{
+ nl_dump_check_consistent(cb, genlmsg_nlhdr(user_hdr));
+}
+
+/**
+ * genlmsg_put_reply - Add generic netlink header to a reply message
+ * @skb: socket buffer holding the message
+ * @info: receiver info
+ * @family: generic netlink family
+ * @flags: netlink message flags
+ * @cmd: generic netlink command
+ *
+ * Returns pointer to user specific header
+ */
+static inline void *genlmsg_put_reply(struct sk_buff *skb,
+ struct genl_info *info,
+ const struct genl_family *family,
+ int flags, u8 cmd)
+{
+ return genlmsg_put(skb, info->snd_portid, info->snd_seq, family,
+ flags, cmd);
+}
+
+/**
+ * genlmsg_end - Finalize a generic netlink message
+ * @skb: socket buffer the message is stored in
+ * @hdr: user specific header
+ */
+static inline void genlmsg_end(struct sk_buff *skb, void *hdr)
+{
+ nlmsg_end(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
+}
+
+/**
+ * genlmsg_cancel - Cancel construction of a generic netlink message
+ * @skb: socket buffer the message is stored in
+ * @hdr: generic netlink message header
+ */
+static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
+{
+ if (hdr)
+ nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
+}
+
+/**
+ * genlmsg_multicast_netns - multicast a netlink message to a specific netns
+ * @family: the generic netlink family
+ * @net: the net namespace
+ * @skb: netlink message as socket buffer
+ * @portid: own netlink portid to avoid sending to yourself
+ * @group: offset of multicast group in groups array
+ * @flags: allocation flags
+ */
+static inline int genlmsg_multicast_netns(const struct genl_family *family,
+ struct net *net, struct sk_buff *skb,
+ u32 portid, unsigned int group, gfp_t flags)
+{
+ if (WARN_ON_ONCE(group >= family->n_mcgrps))
+ return -EINVAL;
+ group = family->mcgrp_offset + group;
+ return nlmsg_multicast(net->genl_sock, skb, portid, group, flags);
+}
+
+/**
+ * genlmsg_multicast - multicast a netlink message to the default netns
+ * @family: the generic netlink family
+ * @skb: netlink message as socket buffer
+ * @portid: own netlink portid to avoid sending to yourself
+ * @group: offset of multicast group in groups array
+ * @flags: allocation flags
+ */
+static inline int genlmsg_multicast(const struct genl_family *family,
+ struct sk_buff *skb, u32 portid,
+ unsigned int group, gfp_t flags)
+{
+ return genlmsg_multicast_netns(family, &init_net, skb,
+ portid, group, flags);
+}
+
+/**
+ * genlmsg_multicast_allns - multicast a netlink message to all net namespaces
+ * @family: the generic netlink family
+ * @skb: netlink message as socket buffer
+ * @portid: own netlink portid to avoid sending to yourself
+ * @group: offset of multicast group in groups array
+ * @flags: allocation flags
+ *
+ * This function must hold the RTNL or rcu_read_lock().
+ */
+int genlmsg_multicast_allns(const struct genl_family *family,
+ struct sk_buff *skb, u32 portid,
+ unsigned int group, gfp_t flags);
+
+/**
+ * genlmsg_unicast - unicast a netlink message
+ * @net: network namespace to look up @portid in
+ * @skb: netlink message as socket buffer
+ * @portid: netlink portid of the destination socket
+ */
+static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 portid)
+{
+ return nlmsg_unicast(net->genl_sock, skb, portid);
+}
+
+/**
+ * genlmsg_reply - reply to a request
+ * @skb: netlink message to be sent back
+ * @info: receiver information
+ */
+static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
+{
+ return genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
+}
+
+/**
+ * genlmsg_data - head of message payload
+ * @gnlh: genetlink message header
+ */
+static inline void *genlmsg_data(const struct genlmsghdr *gnlh)
+{
+ return ((unsigned char *) gnlh + GENL_HDRLEN);
+}
+
+/**
+ * genlmsg_len - length of message payload
+ * @gnlh: genetlink message header
+ */
+static inline int genlmsg_len(const struct genlmsghdr *gnlh)
+{
+ struct nlmsghdr *nlh = (struct nlmsghdr *)((unsigned char *)gnlh -
+ NLMSG_HDRLEN);
+ return (nlh->nlmsg_len - GENL_HDRLEN - NLMSG_HDRLEN);
+}
+
+/**
+ * genlmsg_msg_size - length of genetlink message not including padding
+ * @payload: length of message payload
+ */
+static inline int genlmsg_msg_size(int payload)
+{
+ return GENL_HDRLEN + payload;
+}
+
+/**
+ * genlmsg_total_size - length of genetlink message including padding
+ * @payload: length of message payload
+ */
+static inline int genlmsg_total_size(int payload)
+{
+ return NLMSG_ALIGN(genlmsg_msg_size(payload));
+}
+
+/**
+ * genlmsg_new - Allocate a new generic netlink message
+ * @payload: size of the message payload
+ * @flags: the type of memory to allocate.
+ */
+static inline struct sk_buff *genlmsg_new(size_t payload, gfp_t flags)
+{
+ return nlmsg_new(genlmsg_total_size(payload), flags);
+}
+
+/**
+ * genl_set_err - report error to genetlink broadcast listeners
+ * @family: the generic netlink family
+ * @net: the network namespace to report the error to
+ * @portid: the PORTID of a process that we want to skip (if any)
+ * @group: the broadcast group that will notice the error
+ * (this is the offset of the multicast group in the groups array)
+ * @code: error code, must be negative (as usual in kernelspace)
+ *
+ * This function returns the number of broadcast listeners that have set the
+ * NETLINK_RECV_NO_ENOBUFS socket option.
+ */
+static inline int genl_set_err(const struct genl_family *family,
+ struct net *net, u32 portid,
+ u32 group, int code)
+{
+ if (WARN_ON_ONCE(group >= family->n_mcgrps))
+ return -EINVAL;
+ group = family->mcgrp_offset + group;
+ return netlink_set_err(net->genl_sock, portid, group, code);
+}
+
+static inline int genl_has_listeners(const struct genl_family *family,
+ struct net *net, unsigned int group)
+{
+ if (WARN_ON_ONCE(group >= family->n_mcgrps))
+ return -EINVAL;
+ group = family->mcgrp_offset + group;
+ return netlink_has_listeners(net->genl_sock, group);
+}
+#endif /* __NET_GENERIC_NETLINK_H */
diff --git a/include/net/geneve.h b/include/net/geneve.h
new file mode 100644
index 000000000..bced0b1d9
--- /dev/null
+++ b/include/net/geneve.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_GENEVE_H
+#define __NET_GENEVE_H 1
+
+#include <net/udp_tunnel.h>
+
+#define GENEVE_UDP_PORT 6081
+
+/* Geneve Header:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Ver| Opt Len |O|C| Rsvd. | Protocol Type |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Virtual Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Variable Length Options |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Option Header:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Option Class | Type |R|R|R| Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Variable Option Data |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+struct geneve_opt {
+ __be16 opt_class;
+ u8 type;
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ u8 length:5;
+ u8 r3:1;
+ u8 r2:1;
+ u8 r1:1;
+#else
+ u8 r1:1;
+ u8 r2:1;
+ u8 r3:1;
+ u8 length:5;
+#endif
+ u8 opt_data[];
+};
+
+#define GENEVE_CRIT_OPT_TYPE (1 << 7)
+
+struct genevehdr {
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ u8 opt_len:6;
+ u8 ver:2;
+ u8 rsvd1:6;
+ u8 critical:1;
+ u8 oam:1;
+#else
+ u8 ver:2;
+ u8 opt_len:6;
+ u8 oam:1;
+ u8 critical:1;
+ u8 rsvd1:6;
+#endif
+ __be16 proto_type;
+ u8 vni[3];
+ u8 rsvd2;
+ struct geneve_opt options[];
+};
+
+static inline bool netif_is_geneve(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "geneve");
+}
+
+#ifdef CONFIG_INET
+struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
+ u8 name_assign_type, u16 dst_port);
+#endif /*ifdef CONFIG_INET */
+
+#endif /*ifdef__NET_GENEVE_H */
diff --git a/include/net/gre.h b/include/net/gre.h
new file mode 100644
index 000000000..4e209708b
--- /dev/null
+++ b/include/net/gre.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GRE_H
+#define __LINUX_GRE_H
+
+#include <linux/skbuff.h>
+#include <net/ip_tunnels.h>
+
+struct gre_base_hdr {
+ __be16 flags;
+ __be16 protocol;
+} __packed;
+
+struct gre_full_hdr {
+ struct gre_base_hdr fixed_header;
+ __be16 csum;
+ __be16 reserved1;
+ __be32 key;
+ __be32 seq;
+} __packed;
+#define GRE_HEADER_SECTION 4
+
+#define GREPROTO_CISCO 0
+#define GREPROTO_PPTP 1
+#define GREPROTO_MAX 2
+#define GRE_IP_PROTO_MAX 2
+
+struct gre_protocol {
+ int (*handler)(struct sk_buff *skb);
+ void (*err_handler)(struct sk_buff *skb, u32 info);
+};
+
+int gre_add_protocol(const struct gre_protocol *proto, u8 version);
+int gre_del_protocol(const struct gre_protocol *proto, u8 version);
+
+struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
+ u8 name_assign_type);
+int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+ bool *csum_err, __be16 proto, int nhs);
+
+static inline bool netif_is_gretap(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "gretap");
+}
+
+static inline bool netif_is_ip6gretap(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "ip6gretap");
+}
+
+static inline int gre_calc_hlen(__be16 o_flags)
+{
+ int addend = 4;
+
+ if (o_flags & TUNNEL_CSUM)
+ addend += 4;
+ if (o_flags & TUNNEL_KEY)
+ addend += 4;
+ if (o_flags & TUNNEL_SEQ)
+ addend += 4;
+ return addend;
+}
+
+static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
+{
+ __be16 tflags = 0;
+
+ if (flags & GRE_CSUM)
+ tflags |= TUNNEL_CSUM;
+ if (flags & GRE_ROUTING)
+ tflags |= TUNNEL_ROUTING;
+ if (flags & GRE_KEY)
+ tflags |= TUNNEL_KEY;
+ if (flags & GRE_SEQ)
+ tflags |= TUNNEL_SEQ;
+ if (flags & GRE_STRICT)
+ tflags |= TUNNEL_STRICT;
+ if (flags & GRE_REC)
+ tflags |= TUNNEL_REC;
+ if (flags & GRE_VERSION)
+ tflags |= TUNNEL_VERSION;
+
+ return tflags;
+}
+
+static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags)
+{
+ __be16 flags = 0;
+
+ if (tflags & TUNNEL_CSUM)
+ flags |= GRE_CSUM;
+ if (tflags & TUNNEL_ROUTING)
+ flags |= GRE_ROUTING;
+ if (tflags & TUNNEL_KEY)
+ flags |= GRE_KEY;
+ if (tflags & TUNNEL_SEQ)
+ flags |= GRE_SEQ;
+ if (tflags & TUNNEL_STRICT)
+ flags |= GRE_STRICT;
+ if (tflags & TUNNEL_REC)
+ flags |= GRE_REC;
+ if (tflags & TUNNEL_VERSION)
+ flags |= GRE_VERSION;
+
+ return flags;
+}
+
+static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
+ __be16 flags, __be16 proto,
+ __be32 key, __be32 seq)
+{
+ struct gre_base_hdr *greh;
+
+ skb_push(skb, hdr_len);
+
+ skb_set_inner_protocol(skb, proto);
+ skb_reset_transport_header(skb);
+ greh = (struct gre_base_hdr *)skb->data;
+ greh->flags = gre_tnl_flags_to_gre_flags(flags);
+ greh->protocol = proto;
+
+ if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
+ __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+
+ if (flags & TUNNEL_SEQ) {
+ *ptr = seq;
+ ptr--;
+ }
+ if (flags & TUNNEL_KEY) {
+ *ptr = key;
+ ptr--;
+ }
+ if (flags & TUNNEL_CSUM &&
+ !(skb_shinfo(skb)->gso_type &
+ (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
+ *ptr = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ *(__sum16 *)ptr = csum_fold(lco_csum(skb));
+ } else {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = sizeof(*greh);
+ }
+ }
+ }
+}
+
+#endif
diff --git a/include/net/gro.h b/include/net/gro.h
new file mode 100644
index 000000000..a4fab7062
--- /dev/null
+++ b/include/net/gro.h
@@ -0,0 +1,450 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _NET_IPV6_GRO_H
+#define _NET_IPV6_GRO_H
+
+#include <linux/indirect_call_wrapper.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <linux/skbuff.h>
+#include <net/udp.h>
+
+struct napi_gro_cb {
+ /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
+ void *frag0;
+
+ /* Length of frag0. */
+ unsigned int frag0_len;
+
+ /* This indicates where we are processing relative to skb->data. */
+ int data_offset;
+
+ /* This is non-zero if the packet cannot be merged with the new skb. */
+ u16 flush;
+
+ /* Save the IP ID here and check when we get to the transport layer */
+ u16 flush_id;
+
+ /* Number of segments aggregated. */
+ u16 count;
+
+ /* Used in ipv6_gro_receive() and foo-over-udp */
+ u16 proto;
+
+ /* jiffies when first packet was created/queued */
+ unsigned long age;
+
+/* Used in napi_gro_cb::free */
+#define NAPI_GRO_FREE 1
+#define NAPI_GRO_FREE_STOLEN_HEAD 2
+ /* portion of the cb set to zero at every gro iteration */
+ struct_group(zeroed,
+
+ /* Start offset for remote checksum offload */
+ u16 gro_remcsum_start;
+
+ /* This is non-zero if the packet may be of the same flow. */
+ u8 same_flow:1;
+
+ /* Used in tunnel GRO receive */
+ u8 encap_mark:1;
+
+ /* GRO checksum is valid */
+ u8 csum_valid:1;
+
+ /* Number of checksums via CHECKSUM_UNNECESSARY */
+ u8 csum_cnt:3;
+
+ /* Free the skb? */
+ u8 free:2;
+
+ /* Used in foo-over-udp, set in udp[46]_gro_receive */
+ u8 is_ipv6:1;
+
+ /* Used in GRE, set in fou/gue_gro_receive */
+ u8 is_fou:1;
+
+ /* Used to determine if flush_id can be ignored */
+ u8 is_atomic:1;
+
+ /* Number of gro_receive callbacks this packet already went through */
+ u8 recursion_counter:4;
+
+ /* GRO is done by frag_list pointer chaining. */
+ u8 is_flist:1;
+ );
+
+ /* used to support CHECKSUM_COMPLETE for tunneling protocols */
+ __wsum csum;
+
+ /* used in skb_gro_receive() slow path */
+ struct sk_buff *last;
+};
+
+#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
+static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
+ struct list_head *head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(head, skb);
+}
+
+typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
+ struct sk_buff *);
+static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
+ struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(sk, head, skb);
+}
+
+static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
+{
+ return NAPI_GRO_CB(skb)->data_offset;
+}
+
+static inline unsigned int skb_gro_len(const struct sk_buff *skb)
+{
+ return skb->len - NAPI_GRO_CB(skb)->data_offset;
+}
+
+static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
+{
+ NAPI_GRO_CB(skb)->data_offset += len;
+}
+
+static inline void *skb_gro_header_fast(struct sk_buff *skb,
+ unsigned int offset)
+{
+ return NAPI_GRO_CB(skb)->frag0 + offset;
+}
+
+static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
+{
+ return NAPI_GRO_CB(skb)->frag0_len < hlen;
+}
+
+static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
+{
+ NAPI_GRO_CB(skb)->frag0 = NULL;
+ NAPI_GRO_CB(skb)->frag0_len = 0;
+}
+
+static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
+ unsigned int offset)
+{
+ if (!pskb_may_pull(skb, hlen))
+ return NULL;
+
+ skb_gro_frag0_invalidate(skb);
+ return skb->data + offset;
+}
+
+static inline void *skb_gro_header(struct sk_buff *skb,
+ unsigned int hlen, unsigned int offset)
+{
+ void *ptr;
+
+ ptr = skb_gro_header_fast(skb, offset);
+ if (skb_gro_header_hard(skb, hlen))
+ ptr = skb_gro_header_slow(skb, hlen, offset);
+ return ptr;
+}
+
+static inline void *skb_gro_network_header(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
+ skb_network_offset(skb);
+}
+
+static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ const struct iphdr *iph = skb_gro_network_header(skb);
+
+ return csum_tcpudp_nofold(iph->saddr, iph->daddr,
+ skb_gro_len(skb), proto, 0);
+}
+
+static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
+ const void *start, unsigned int len)
+{
+ if (NAPI_GRO_CB(skb)->csum_valid)
+ NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len,
+ wsum_negate(NAPI_GRO_CB(skb)->csum)));
+}
+
+/* GRO checksum functions. These are logical equivalents of the normal
+ * checksum functions (in skbuff.h) except that they operate on the GRO
+ * offsets and fields in sk_buff.
+ */
+
+__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
+
+static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
+}
+
+static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
+ bool zero_okay,
+ __sum16 check)
+{
+ return ((skb->ip_summed != CHECKSUM_PARTIAL ||
+ skb_checksum_start_offset(skb) <
+ skb_gro_offset(skb)) &&
+ !skb_at_gro_remcsum_start(skb) &&
+ NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ (!zero_okay || check));
+}
+
+static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
+ __wsum psum)
+{
+ if (NAPI_GRO_CB(skb)->csum_valid &&
+ !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
+ return 0;
+
+ NAPI_GRO_CB(skb)->csum = psum;
+
+ return __skb_gro_checksum_complete(skb);
+}
+
+static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
+{
+ if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
+ /* Consume a checksum from CHECKSUM_UNNECESSARY */
+ NAPI_GRO_CB(skb)->csum_cnt--;
+ } else {
+ /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
+ * verified a new top level checksum or an encapsulated one
+ * during GRO. This saves work if we fallback to normal path.
+ */
+ __skb_incr_checksum_unnecessary(skb);
+ }
+}
+
+#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
+ compute_pseudo) \
+({ \
+ __sum16 __ret = 0; \
+ if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
+ __ret = __skb_gro_checksum_validate_complete(skb, \
+ compute_pseudo(skb, proto)); \
+ if (!__ret) \
+ skb_gro_incr_csum_unnecessary(skb); \
+ __ret; \
+})
+
+#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
+ __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
+
+#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
+ compute_pseudo) \
+ __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
+
+#define skb_gro_checksum_simple_validate(skb) \
+ __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
+
+static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ !NAPI_GRO_CB(skb)->csum_valid);
+}
+
+static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
+ __wsum pseudo)
+{
+ NAPI_GRO_CB(skb)->csum = ~pseudo;
+ NAPI_GRO_CB(skb)->csum_valid = 1;
+}
+
+#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
+do { \
+ if (__skb_gro_checksum_convert_check(skb)) \
+ __skb_gro_checksum_convert(skb, \
+ compute_pseudo(skb, proto)); \
+} while (0)
+
+struct gro_remcsum {
+ int offset;
+ __wsum delta;
+};
+
+static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
+{
+ grc->offset = 0;
+ grc->delta = 0;
+}
+
+static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+ unsigned int off, size_t hdrlen,
+ int start, int offset,
+ struct gro_remcsum *grc,
+ bool nopartial)
+{
+ __wsum delta;
+ size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
+
+ BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
+
+ if (!nopartial) {
+ NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
+ return ptr;
+ }
+
+ ptr = skb_gro_header(skb, off + plen, off);
+ if (!ptr)
+ return NULL;
+
+ delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
+ start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+
+ grc->offset = off + hdrlen + offset;
+ grc->delta = delta;
+
+ return ptr;
+}
+
+static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
+ struct gro_remcsum *grc)
+{
+ void *ptr;
+ size_t plen = grc->offset + sizeof(u16);
+
+ if (!grc->delta)
+ return;
+
+ ptr = skb_gro_header(skb, plen, grc->offset);
+ if (!ptr)
+ return;
+
+ remcsum_unadjust((__sum16 *)ptr, grc->delta);
+}
+
+#ifdef CONFIG_XFRM_OFFLOAD
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
+{
+ if (PTR_ERR(pp) != -EINPROGRESS)
+ NAPI_GRO_CB(skb)->flush |= flush;
+}
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+ struct sk_buff *pp,
+ int flush,
+ struct gro_remcsum *grc)
+{
+ if (PTR_ERR(pp) != -EINPROGRESS) {
+ NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_remcsum_cleanup(skb, grc);
+ skb->remcsum_offload = 0;
+ }
+}
+#else
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
+{
+ NAPI_GRO_CB(skb)->flush |= flush;
+}
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+ struct sk_buff *pp,
+ int flush,
+ struct gro_remcsum *grc)
+{
+ NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_remcsum_cleanup(skb, grc);
+ skb->remcsum_offload = 0;
+}
+#endif
+
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
+
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
+
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
+
+#define indirect_call_gro_receive_inet(cb, f2, f1, head, skb) \
+({ \
+ unlikely(gro_recursion_inc_test(skb)) ? \
+ NAPI_GRO_CB(skb)->flush |= 1, NULL : \
+ INDIRECT_CALL_INET(cb, f2, f1, head, skb); \
+})
+
+struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ struct udphdr *uh, struct sock *sk);
+int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
+
+static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
+{
+ struct udphdr *uh;
+ unsigned int hlen, off;
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*uh);
+ uh = skb_gro_header(skb, hlen, off);
+
+ return uh;
+}
+
+static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ const struct ipv6hdr *iph = skb_gro_network_header(skb);
+
+ return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
+ skb_gro_len(skb), proto, 0));
+}
+
+int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
+
+/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
+static inline void gro_normal_list(struct napi_struct *napi)
+{
+ if (!napi->rx_count)
+ return;
+ netif_receive_skb_list_internal(&napi->rx_list);
+ INIT_LIST_HEAD(&napi->rx_list);
+ napi->rx_count = 0;
+}
+
+/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
+ * pass the whole batch up to the stack.
+ */
+static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
+{
+ list_add_tail(&skb->list, &napi->rx_list);
+ napi->rx_count += segs;
+ if (napi->rx_count >= READ_ONCE(gro_normal_batch))
+ gro_normal_list(napi);
+}
+
+
+#endif /* _NET_IPV6_GRO_H */
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
new file mode 100644
index 000000000..596688b67
--- /dev/null
+++ b/include/net/gro_cells.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_GRO_CELLS_H
+#define _NET_GRO_CELLS_H
+
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+
+struct gro_cell;
+
+struct gro_cells {
+ struct gro_cell __percpu *cells;
+};
+
+int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb);
+int gro_cells_init(struct gro_cells *gcells, struct net_device *dev);
+void gro_cells_destroy(struct gro_cells *gcells);
+
+#endif
diff --git a/include/net/gtp.h b/include/net/gtp.h
new file mode 100644
index 000000000..2a503f035
--- /dev/null
+++ b/include/net/gtp.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _GTP_H_
+#define _GTP_H_
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <net/rtnetlink.h>
+
+/* General GTP protocol related definitions. */
+
+#define GTP0_PORT 3386
+#define GTP1U_PORT 2152
+
+/* GTP messages types */
+#define GTP_ECHO_REQ 1 /* Echo Request */
+#define GTP_ECHO_RSP 2 /* Echo Response */
+#define GTP_TPDU 255
+
+#define GTPIE_RECOVERY 14
+
+struct gtp0_header { /* According to GSM TS 09.60. */
+ __u8 flags;
+ __u8 type;
+ __be16 length;
+ __be16 seq;
+ __be16 flow;
+ __u8 number;
+ __u8 spare[3];
+ __be64 tid;
+} __attribute__ ((packed));
+
+struct gtp1_header { /* According to 3GPP TS 29.060. */
+ __u8 flags;
+ __u8 type;
+ __be16 length;
+ __be32 tid;
+} __attribute__ ((packed));
+
+struct gtp1_header_long { /* According to 3GPP TS 29.060. */
+ __u8 flags;
+ __u8 type;
+ __be16 length;
+ __be32 tid;
+ __be16 seq;
+ __u8 npdu;
+ __u8 next;
+} __packed;
+
+/* GTP Information Element */
+struct gtp_ie {
+ __u8 tag;
+ __u8 val;
+} __packed;
+
+struct gtp0_packet {
+ struct gtp0_header gtp0_h;
+ struct gtp_ie ie;
+} __packed;
+
+struct gtp1u_packet {
+ struct gtp1_header_long gtp1u_h;
+ struct gtp_ie ie;
+} __packed;
+
+struct gtp_pdu_session_info { /* According to 3GPP TS 38.415. */
+ u8 pdu_type;
+ u8 qfi;
+};
+
+static inline bool netif_is_gtp(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "gtp");
+}
+
+#define GTP1_F_NPDU 0x01
+#define GTP1_F_SEQ 0x02
+#define GTP1_F_EXTHDR 0x04
+#define GTP1_F_MASK 0x07
+
+#endif
diff --git a/include/net/gue.h b/include/net/gue.h
new file mode 100644
index 000000000..dfca298be
--- /dev/null
+++ b/include/net/gue.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_GUE_H
+#define __NET_GUE_H
+
+/* Definitions for the GUE header, standard and private flags, lengths
+ * of optional fields are below.
+ *
+ * Diagram of GUE header:
+ *
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Ver|C| Hlen | Proto/ctype | Standard flags |P|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * ~ Fields (optional) ~
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Private flags (optional, P bit is set) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * ~ Private fields (optional) ~
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * C bit indicates control message when set, data message when unset.
+ * For a control message, proto/ctype is interpreted as a type of
+ * control message. For data messages, proto/ctype is the IP protocol
+ * of the next header.
+ *
+ * P bit indicates private flags field is present. The private flags
+ * may refer to options placed after this field.
+ */
+
+#include <asm/byteorder.h>
+#include <linux/types.h>
+
+struct guehdr {
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 hlen:5,
+ control:1,
+ version:2;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u8 version:2,
+ control:1,
+ hlen:5;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 proto_ctype;
+ __be16 flags;
+ };
+ __be32 word;
+ };
+};
+
+/* Standard flags in GUE header */
+
+#define GUE_FLAG_PRIV htons(1<<0) /* Private flags are in options */
+#define GUE_LEN_PRIV 4
+
+#define GUE_FLAGS_ALL (GUE_FLAG_PRIV)
+
+/* Private flags in the private option extension */
+
+#define GUE_PFLAG_REMCSUM htonl(1U << 31)
+#define GUE_PLEN_REMCSUM 4
+
+#define GUE_PFLAGS_ALL (GUE_PFLAG_REMCSUM)
+
+/* Functions to compute options length corresponding to flags.
+ * If we ever have a lot of flags this can be potentially be
+ * converted to a more optimized algorithm (table lookup
+ * for instance).
+ */
+static inline size_t guehdr_flags_len(__be16 flags)
+{
+ return ((flags & GUE_FLAG_PRIV) ? GUE_LEN_PRIV : 0);
+}
+
+static inline size_t guehdr_priv_flags_len(__be32 flags)
+{
+ return 0;
+}
+
+/* Validate standard and private flags. Returns non-zero (meaning invalid)
+ * if there is an unknown standard or private flags, or the options length for
+ * the flags exceeds the options length specific in hlen of the GUE header.
+ */
+static inline int validate_gue_flags(struct guehdr *guehdr, size_t optlen)
+{
+ __be16 flags = guehdr->flags;
+ size_t len;
+
+ if (flags & ~GUE_FLAGS_ALL)
+ return 1;
+
+ len = guehdr_flags_len(flags);
+ if (len > optlen)
+ return 1;
+
+ if (flags & GUE_FLAG_PRIV) {
+ /* Private flags are last four bytes accounted in
+ * guehdr_flags_len
+ */
+ __be32 pflags = *(__be32 *)((void *)&guehdr[1] +
+ len - GUE_LEN_PRIV);
+
+ if (pflags & ~GUE_PFLAGS_ALL)
+ return 1;
+
+ len += guehdr_priv_flags_len(pflags);
+ if (len > optlen)
+ return 1;
+ }
+
+ return 0;
+}
+
+#endif
diff --git a/include/net/hwbm.h b/include/net/hwbm.h
new file mode 100644
index 000000000..aa495dece
--- /dev/null
+++ b/include/net/hwbm.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _HWBM_H
+#define _HWBM_H
+
+#include <linux/mutex.h>
+
+struct hwbm_pool {
+ /* Capacity of the pool */
+ int size;
+ /* Size of the buffers managed */
+ int frag_size;
+ /* Number of buffers currently used by this pool */
+ int buf_num;
+ /* constructor called during alocation */
+ int (*construct)(struct hwbm_pool *bm_pool, void *buf);
+ /* protect acces to the buffer counter*/
+ struct mutex buf_lock;
+ /* private data */
+ void *priv;
+};
+#ifdef CONFIG_HWBM
+void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf);
+int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp);
+int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num);
+#else
+static inline void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {}
+
+static inline int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp)
+{ return 0; }
+
+static inline int hwbm_pool_add(struct hwbm_pool *bm_pool,
+ unsigned int buf_num)
+{ return 0; }
+#endif /* CONFIG_HWBM */
+#endif /* _HWBM_H */
diff --git a/include/net/icmp.h b/include/net/icmp.h
new file mode 100644
index 000000000..caddf4a59
--- /dev/null
+++ b/include/net/icmp.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the ICMP module.
+ *
+ * Version: @(#)icmp.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ */
+#ifndef _ICMP_H
+#define _ICMP_H
+
+#include <linux/icmp.h>
+
+#include <net/inet_sock.h>
+#include <net/snmp.h>
+#include <net/ip.h>
+
+struct icmp_err {
+ int errno;
+ unsigned int fatal:1;
+};
+
+extern const struct icmp_err icmp_err_convert[];
+#define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field)
+#define __ICMP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.icmp_statistics, field)
+#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256)
+#define ICMPMSGIN_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
+
+struct dst_entry;
+struct net_proto_family;
+struct sk_buff;
+struct net;
+
+void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ const struct ip_options *opt);
+static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+ __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
+}
+
+#if IS_ENABLED(CONFIG_NF_NAT)
+void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+#else
+static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+ struct ip_options opts = { 0 };
+ __icmp_send(skb_in, type, code, info, &opts);
+}
+#endif
+
+int icmp_rcv(struct sk_buff *skb);
+int icmp_err(struct sk_buff *skb, u32 info);
+int icmp_init(void);
+void icmp_out_count(struct net *net, unsigned char type);
+bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr);
+
+#endif /* _ICMP_H */
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
new file mode 100644
index 000000000..598f53d2a
--- /dev/null
+++ b/include/net/ieee80211_radiotap.h
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018-2019, 2021 Intel Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __RADIOTAP_H
+#define __RADIOTAP_H
+
+#include <linux/kernel.h>
+#include <asm/unaligned.h>
+
+/**
+ * struct ieee82011_radiotap_header - base radiotap header
+ */
+struct ieee80211_radiotap_header {
+ /**
+ * @it_version: radiotap version, always 0
+ */
+ uint8_t it_version;
+
+ /**
+ * @it_pad: padding (or alignment)
+ */
+ uint8_t it_pad;
+
+ /**
+ * @it_len: overall radiotap header length
+ */
+ __le16 it_len;
+
+ /**
+ * @it_present: (first) present word
+ */
+ __le32 it_present;
+
+ /**
+ * @it_optional: all remaining presence bitmaps
+ */
+ __le32 it_optional[];
+} __packed;
+
+/* version is always 0 */
+#define PKTHDR_RADIOTAP_VERSION 0
+
+/* see the radiotap website for the descriptions */
+enum ieee80211_radiotap_presence {
+ IEEE80211_RADIOTAP_TSFT = 0,
+ IEEE80211_RADIOTAP_FLAGS = 1,
+ IEEE80211_RADIOTAP_RATE = 2,
+ IEEE80211_RADIOTAP_CHANNEL = 3,
+ IEEE80211_RADIOTAP_FHSS = 4,
+ IEEE80211_RADIOTAP_DBM_ANTSIGNAL = 5,
+ IEEE80211_RADIOTAP_DBM_ANTNOISE = 6,
+ IEEE80211_RADIOTAP_LOCK_QUALITY = 7,
+ IEEE80211_RADIOTAP_TX_ATTENUATION = 8,
+ IEEE80211_RADIOTAP_DB_TX_ATTENUATION = 9,
+ IEEE80211_RADIOTAP_DBM_TX_POWER = 10,
+ IEEE80211_RADIOTAP_ANTENNA = 11,
+ IEEE80211_RADIOTAP_DB_ANTSIGNAL = 12,
+ IEEE80211_RADIOTAP_DB_ANTNOISE = 13,
+ IEEE80211_RADIOTAP_RX_FLAGS = 14,
+ IEEE80211_RADIOTAP_TX_FLAGS = 15,
+ IEEE80211_RADIOTAP_RTS_RETRIES = 16,
+ IEEE80211_RADIOTAP_DATA_RETRIES = 17,
+ /* 18 is XChannel, but it's not defined yet */
+ IEEE80211_RADIOTAP_MCS = 19,
+ IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
+ IEEE80211_RADIOTAP_VHT = 21,
+ IEEE80211_RADIOTAP_TIMESTAMP = 22,
+ IEEE80211_RADIOTAP_HE = 23,
+ IEEE80211_RADIOTAP_HE_MU = 24,
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU = 26,
+ IEEE80211_RADIOTAP_LSIG = 27,
+
+ /* valid in every it_present bitmap, even vendor namespaces */
+ IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
+ IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30,
+ IEEE80211_RADIOTAP_EXT = 31
+};
+
+/* for IEEE80211_RADIOTAP_FLAGS */
+enum ieee80211_radiotap_flags {
+ IEEE80211_RADIOTAP_F_CFP = 0x01,
+ IEEE80211_RADIOTAP_F_SHORTPRE = 0x02,
+ IEEE80211_RADIOTAP_F_WEP = 0x04,
+ IEEE80211_RADIOTAP_F_FRAG = 0x08,
+ IEEE80211_RADIOTAP_F_FCS = 0x10,
+ IEEE80211_RADIOTAP_F_DATAPAD = 0x20,
+ IEEE80211_RADIOTAP_F_BADFCS = 0x40,
+};
+
+/* for IEEE80211_RADIOTAP_CHANNEL */
+enum ieee80211_radiotap_channel_flags {
+ IEEE80211_CHAN_CCK = 0x0020,
+ IEEE80211_CHAN_OFDM = 0x0040,
+ IEEE80211_CHAN_2GHZ = 0x0080,
+ IEEE80211_CHAN_5GHZ = 0x0100,
+ IEEE80211_CHAN_DYN = 0x0400,
+ IEEE80211_CHAN_HALF = 0x4000,
+ IEEE80211_CHAN_QUARTER = 0x8000,
+};
+
+/* for IEEE80211_RADIOTAP_RX_FLAGS */
+enum ieee80211_radiotap_rx_flags {
+ IEEE80211_RADIOTAP_F_RX_BADPLCP = 0x0002,
+};
+
+/* for IEEE80211_RADIOTAP_TX_FLAGS */
+enum ieee80211_radiotap_tx_flags {
+ IEEE80211_RADIOTAP_F_TX_FAIL = 0x0001,
+ IEEE80211_RADIOTAP_F_TX_CTS = 0x0002,
+ IEEE80211_RADIOTAP_F_TX_RTS = 0x0004,
+ IEEE80211_RADIOTAP_F_TX_NOACK = 0x0008,
+ IEEE80211_RADIOTAP_F_TX_NOSEQNO = 0x0010,
+ IEEE80211_RADIOTAP_F_TX_ORDER = 0x0020,
+};
+
+/* for IEEE80211_RADIOTAP_MCS "have" flags */
+enum ieee80211_radiotap_mcs_have {
+ IEEE80211_RADIOTAP_MCS_HAVE_BW = 0x01,
+ IEEE80211_RADIOTAP_MCS_HAVE_MCS = 0x02,
+ IEEE80211_RADIOTAP_MCS_HAVE_GI = 0x04,
+ IEEE80211_RADIOTAP_MCS_HAVE_FMT = 0x08,
+ IEEE80211_RADIOTAP_MCS_HAVE_FEC = 0x10,
+ IEEE80211_RADIOTAP_MCS_HAVE_STBC = 0x20,
+};
+
+enum ieee80211_radiotap_mcs_flags {
+ IEEE80211_RADIOTAP_MCS_BW_MASK = 0x03,
+ IEEE80211_RADIOTAP_MCS_BW_20 = 0,
+ IEEE80211_RADIOTAP_MCS_BW_40 = 1,
+ IEEE80211_RADIOTAP_MCS_BW_20L = 2,
+ IEEE80211_RADIOTAP_MCS_BW_20U = 3,
+
+ IEEE80211_RADIOTAP_MCS_SGI = 0x04,
+ IEEE80211_RADIOTAP_MCS_FMT_GF = 0x08,
+ IEEE80211_RADIOTAP_MCS_FEC_LDPC = 0x10,
+ IEEE80211_RADIOTAP_MCS_STBC_MASK = 0x60,
+ IEEE80211_RADIOTAP_MCS_STBC_1 = 1,
+ IEEE80211_RADIOTAP_MCS_STBC_2 = 2,
+ IEEE80211_RADIOTAP_MCS_STBC_3 = 3,
+ IEEE80211_RADIOTAP_MCS_STBC_SHIFT = 5,
+};
+
+/* for IEEE80211_RADIOTAP_AMPDU_STATUS */
+enum ieee80211_radiotap_ampdu_flags {
+ IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN = 0x0001,
+ IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN = 0x0002,
+ IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN = 0x0004,
+ IEEE80211_RADIOTAP_AMPDU_IS_LAST = 0x0008,
+ IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR = 0x0010,
+ IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN = 0x0020,
+ IEEE80211_RADIOTAP_AMPDU_EOF = 0x0040,
+ IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN = 0x0080,
+};
+
+/* for IEEE80211_RADIOTAP_VHT */
+enum ieee80211_radiotap_vht_known {
+ IEEE80211_RADIOTAP_VHT_KNOWN_STBC = 0x0001,
+ IEEE80211_RADIOTAP_VHT_KNOWN_TXOP_PS_NA = 0x0002,
+ IEEE80211_RADIOTAP_VHT_KNOWN_GI = 0x0004,
+ IEEE80211_RADIOTAP_VHT_KNOWN_SGI_NSYM_DIS = 0x0008,
+ IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM = 0x0010,
+ IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED = 0x0020,
+ IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH = 0x0040,
+ IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID = 0x0080,
+ IEEE80211_RADIOTAP_VHT_KNOWN_PARTIAL_AID = 0x0100,
+};
+
+enum ieee80211_radiotap_vht_flags {
+ IEEE80211_RADIOTAP_VHT_FLAG_STBC = 0x01,
+ IEEE80211_RADIOTAP_VHT_FLAG_TXOP_PS_NA = 0x02,
+ IEEE80211_RADIOTAP_VHT_FLAG_SGI = 0x04,
+ IEEE80211_RADIOTAP_VHT_FLAG_SGI_NSYM_M10_9 = 0x08,
+ IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM = 0x10,
+ IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED = 0x20,
+};
+
+enum ieee80211_radiotap_vht_coding {
+ IEEE80211_RADIOTAP_CODING_LDPC_USER0 = 0x01,
+ IEEE80211_RADIOTAP_CODING_LDPC_USER1 = 0x02,
+ IEEE80211_RADIOTAP_CODING_LDPC_USER2 = 0x04,
+ IEEE80211_RADIOTAP_CODING_LDPC_USER3 = 0x08,
+};
+
+/* for IEEE80211_RADIOTAP_TIMESTAMP */
+enum ieee80211_radiotap_timestamp_unit_spos {
+ IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK = 0x000F,
+ IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MS = 0x0000,
+ IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US = 0x0001,
+ IEEE80211_RADIOTAP_TIMESTAMP_UNIT_NS = 0x0003,
+ IEEE80211_RADIOTAP_TIMESTAMP_SPOS_MASK = 0x00F0,
+ IEEE80211_RADIOTAP_TIMESTAMP_SPOS_BEGIN_MDPU = 0x0000,
+ IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ = 0x0010,
+ IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_PPDU = 0x0020,
+ IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU = 0x0030,
+ IEEE80211_RADIOTAP_TIMESTAMP_SPOS_UNKNOWN = 0x00F0,
+};
+
+enum ieee80211_radiotap_timestamp_flags {
+ IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT = 0x00,
+ IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT = 0x01,
+ IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY = 0x02,
+};
+
+struct ieee80211_radiotap_he {
+ __le16 data1, data2, data3, data4, data5, data6;
+};
+
+enum ieee80211_radiotap_he_bits {
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MASK = 3,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU = 0,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_EXT_SU = 1,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU = 2,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG = 3,
+
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN = 0x0004,
+ IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN = 0x0008,
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN = 0x0010,
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN = 0x0020,
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN = 0x0040,
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN = 0x0080,
+ IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN = 0x0100,
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN = 0x0200,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN = 0x0400,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN = 0x0800,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN = 0x1000,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN = 0x2000,
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN = 0x0001,
+ IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN = 0x0002,
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN = 0x0004,
+ IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN = 0x0008,
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN = 0x0010,
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN = 0x0020,
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN = 0x0040,
+ IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN = 0x0080,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET = 0x3f00,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR = 0x003f,
+ IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE = 0x0040,
+ IEEE80211_RADIOTAP_HE_DATA3_UL_DL = 0x0080,
+ IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS = 0x0f00,
+ IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM = 0x1000,
+ IEEE80211_RADIOTAP_HE_DATA3_CODING = 0x2000,
+ IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA3_STBC = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID = 0x7ff0,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1 = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2 = 0x00f0,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3 = 0x0f00,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4 = 0xf000,
+
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ = 0,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ = 1,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ = 2,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ = 3,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_26T = 4,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_52T = 5,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_106T = 6,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_242T = 7,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_484T = 8,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_996T = 9,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_2x996T = 10,
+
+ IEEE80211_RADIOTAP_HE_DATA5_GI = 0x0030,
+ IEEE80211_RADIOTAP_HE_DATA5_GI_0_8 = 0,
+ IEEE80211_RADIOTAP_HE_DATA5_GI_1_6 = 1,
+ IEEE80211_RADIOTAP_HE_DATA5_GI_3_2 = 2,
+
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE = 0x00c0,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN = 0,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X = 1,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X = 2,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X = 3,
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS = 0x0700,
+ IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD = 0x3000,
+ IEEE80211_RADIOTAP_HE_DATA5_TXBF = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA6_NSTS = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA6_DOPPLER = 0x0010,
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN = 0x0020,
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW = 0x00c0,
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_20MHZ = 0,
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_40MHZ = 1,
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_80MHZ = 2,
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_160MHZ = 3,
+ IEEE80211_RADIOTAP_HE_DATA6_TXOP = 0x7f00,
+ IEEE80211_RADIOTAP_HE_DATA6_MIDAMBLE_PDCTY = 0x8000,
+};
+
+struct ieee80211_radiotap_he_mu {
+ __le16 flags1, flags2;
+ u8 ru_ch1[4];
+ u8 ru_ch2[4];
+};
+
+enum ieee80211_radiotap_he_mu_bits {
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS = 0x000f,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN = 0x0010,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM = 0x0020,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN = 0x0040,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN = 0x0080,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN = 0x0100,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN = 0x0200,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN = 0x1000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU = 0x2000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN = 0x4000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW = 0x0003,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_20MHZ = 0x0000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_40MHZ = 0x0001,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_80MHZ = 0x0002,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_160MHZ = 0x0003,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN = 0x0004,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP = 0x0008,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS = 0x00f0,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW = 0x0300,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN= 0x0400,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 0x0800,
+};
+
+enum ieee80211_radiotap_lsig_data1 {
+ IEEE80211_RADIOTAP_LSIG_DATA1_RATE_KNOWN = 0x0001,
+ IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN = 0x0002,
+};
+
+enum ieee80211_radiotap_lsig_data2 {
+ IEEE80211_RADIOTAP_LSIG_DATA2_RATE = 0x000f,
+ IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH = 0xfff0,
+};
+
+struct ieee80211_radiotap_lsig {
+ __le16 data1, data2;
+};
+
+enum ieee80211_radiotap_zero_len_psdu_type {
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING = 0,
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED = 1,
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR = 0xff,
+};
+
+/**
+ * ieee80211_get_radiotap_len - get radiotap header length
+ */
+static inline u16 ieee80211_get_radiotap_len(const char *data)
+{
+ const struct ieee80211_radiotap_header *hdr = (const void *)data;
+
+ return get_unaligned_le16(&hdr->it_len);
+}
+
+#endif /* __RADIOTAP_H */
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
new file mode 100644
index 000000000..03b64bf87
--- /dev/null
+++ b/include/net/ieee802154_netdev.h
@@ -0,0 +1,384 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * An interface between IEEE802.15.4 device and rest of the kernel.
+ *
+ * Copyright (C) 2007-2012 Siemens AG
+ *
+ * Written by:
+ * Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Maxim Osipov <maxim.osipov@siemens.com>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#ifndef IEEE802154_NETDEVICE_H
+#define IEEE802154_NETDEVICE_H
+
+#define IEEE802154_REQUIRED_SIZE(struct_type, member) \
+ (offsetof(typeof(struct_type), member) + \
+ sizeof(((typeof(struct_type) *)(NULL))->member))
+
+#define IEEE802154_ADDR_OFFSET \
+ offsetof(typeof(struct sockaddr_ieee802154), addr)
+
+#define IEEE802154_MIN_NAMELEN (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, addr_type))
+
+#define IEEE802154_NAMELEN_SHORT (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, short_addr))
+
+#define IEEE802154_NAMELEN_LONG (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, hwaddr))
+
+#include <net/af_ieee802154.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ieee802154.h>
+
+#include <net/cfg802154.h>
+
+struct ieee802154_sechdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 level:3,
+ key_id_mode:2,
+ reserved:3;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved:3,
+ key_id_mode:2,
+ level:3;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ u8 key_id;
+ __le32 frame_counter;
+ union {
+ __le32 short_src;
+ __le64 extended_src;
+ };
+};
+
+struct ieee802154_hdr_fc {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u16 type:3,
+ security_enabled:1,
+ frame_pending:1,
+ ack_request:1,
+ intra_pan:1,
+ reserved:3,
+ dest_addr_mode:2,
+ version:2,
+ source_addr_mode:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u16 reserved:1,
+ intra_pan:1,
+ ack_request:1,
+ frame_pending:1,
+ security_enabled:1,
+ type:3,
+ source_addr_mode:2,
+ version:2,
+ dest_addr_mode:2,
+ reserved2:2;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+};
+
+struct ieee802154_hdr {
+ struct ieee802154_hdr_fc fc;
+ u8 seq;
+ struct ieee802154_addr source;
+ struct ieee802154_addr dest;
+ struct ieee802154_sechdr sec;
+};
+
+/* pushes hdr onto the skb. fields of hdr->fc that can be calculated from
+ * the contents of hdr will be, and the actual value of those bits in
+ * hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame
+ * version, if SECEN is set.
+ */
+int ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr);
+
+/* pulls the entire 802.15.4 header off of the skb, including the security
+ * header, and performs pan id decompression
+ */
+int ieee802154_hdr_pull(struct sk_buff *skb, struct ieee802154_hdr *hdr);
+
+/* parses the frame control, sequence number of address fields in a given skb
+ * and stores them into hdr, performing pan id decompression and length checks
+ * to be suitable for use in header_ops.parse
+ */
+int ieee802154_hdr_peek_addrs(const struct sk_buff *skb,
+ struct ieee802154_hdr *hdr);
+
+/* parses the full 802.15.4 header a given skb and stores them into hdr,
+ * performing pan id decompression and length checks to be suitable for use in
+ * header_ops.parse
+ */
+int ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr);
+
+int ieee802154_max_payload(const struct ieee802154_hdr *hdr);
+
+static inline int
+ieee802154_sechdr_authtag_len(const struct ieee802154_sechdr *sec)
+{
+ switch (sec->level) {
+ case IEEE802154_SCF_SECLEVEL_MIC32:
+ case IEEE802154_SCF_SECLEVEL_ENC_MIC32:
+ return 4;
+ case IEEE802154_SCF_SECLEVEL_MIC64:
+ case IEEE802154_SCF_SECLEVEL_ENC_MIC64:
+ return 8;
+ case IEEE802154_SCF_SECLEVEL_MIC128:
+ case IEEE802154_SCF_SECLEVEL_ENC_MIC128:
+ return 16;
+ case IEEE802154_SCF_SECLEVEL_NONE:
+ case IEEE802154_SCF_SECLEVEL_ENC:
+ default:
+ return 0;
+ }
+}
+
+static inline int ieee802154_hdr_length(struct sk_buff *skb)
+{
+ struct ieee802154_hdr hdr;
+ int len = ieee802154_hdr_pull(skb, &hdr);
+
+ if (len > 0)
+ skb_push(skb, len);
+
+ return len;
+}
+
+static inline bool ieee802154_addr_equal(const struct ieee802154_addr *a1,
+ const struct ieee802154_addr *a2)
+{
+ if (a1->pan_id != a2->pan_id || a1->mode != a2->mode)
+ return false;
+
+ if ((a1->mode == IEEE802154_ADDR_LONG &&
+ a1->extended_addr != a2->extended_addr) ||
+ (a1->mode == IEEE802154_ADDR_SHORT &&
+ a1->short_addr != a2->short_addr))
+ return false;
+
+ return true;
+}
+
+static inline __le64 ieee802154_devaddr_from_raw(const void *raw)
+{
+ u64 temp;
+
+ memcpy(&temp, raw, IEEE802154_ADDR_LEN);
+ return (__force __le64)swab64(temp);
+}
+
+static inline void ieee802154_devaddr_to_raw(void *raw, __le64 addr)
+{
+ u64 temp = swab64((__force u64)addr);
+
+ memcpy(raw, &temp, IEEE802154_ADDR_LEN);
+}
+
+static inline int
+ieee802154_sockaddr_check_size(struct sockaddr_ieee802154 *daddr, int len)
+{
+ struct ieee802154_addr_sa *sa;
+ int ret = 0;
+
+ sa = &daddr->addr;
+ if (len < IEEE802154_MIN_NAMELEN)
+ return -EINVAL;
+ switch (sa->addr_type) {
+ case IEEE802154_ADDR_NONE:
+ break;
+ case IEEE802154_ADDR_SHORT:
+ if (len < IEEE802154_NAMELEN_SHORT)
+ ret = -EINVAL;
+ break;
+ case IEEE802154_ADDR_LONG:
+ if (len < IEEE802154_NAMELEN_LONG)
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static inline void ieee802154_addr_from_sa(struct ieee802154_addr *a,
+ const struct ieee802154_addr_sa *sa)
+{
+ a->mode = sa->addr_type;
+ a->pan_id = cpu_to_le16(sa->pan_id);
+
+ switch (a->mode) {
+ case IEEE802154_ADDR_SHORT:
+ a->short_addr = cpu_to_le16(sa->short_addr);
+ break;
+ case IEEE802154_ADDR_LONG:
+ a->extended_addr = ieee802154_devaddr_from_raw(sa->hwaddr);
+ break;
+ }
+}
+
+static inline void ieee802154_addr_to_sa(struct ieee802154_addr_sa *sa,
+ const struct ieee802154_addr *a)
+{
+ sa->addr_type = a->mode;
+ sa->pan_id = le16_to_cpu(a->pan_id);
+
+ switch (a->mode) {
+ case IEEE802154_ADDR_SHORT:
+ sa->short_addr = le16_to_cpu(a->short_addr);
+ break;
+ case IEEE802154_ADDR_LONG:
+ ieee802154_devaddr_to_raw(sa->hwaddr, a->extended_addr);
+ break;
+ }
+}
+
+/*
+ * A control block of skb passed between the ARPHRD_IEEE802154 device
+ * and other stack parts.
+ */
+struct ieee802154_mac_cb {
+ u8 lqi;
+ u8 type;
+ bool ackreq;
+ bool secen;
+ bool secen_override;
+ u8 seclevel;
+ bool seclevel_override;
+ struct ieee802154_addr source;
+ struct ieee802154_addr dest;
+};
+
+static inline struct ieee802154_mac_cb *mac_cb(struct sk_buff *skb)
+{
+ return (struct ieee802154_mac_cb *)skb->cb;
+}
+
+static inline struct ieee802154_mac_cb *mac_cb_init(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb));
+
+ memset(skb->cb, 0, sizeof(struct ieee802154_mac_cb));
+ return mac_cb(skb);
+}
+
+enum {
+ IEEE802154_LLSEC_DEVKEY_IGNORE,
+ IEEE802154_LLSEC_DEVKEY_RESTRICT,
+ IEEE802154_LLSEC_DEVKEY_RECORD,
+
+ __IEEE802154_LLSEC_DEVKEY_MAX,
+};
+
+#define IEEE802154_MAC_SCAN_ED 0
+#define IEEE802154_MAC_SCAN_ACTIVE 1
+#define IEEE802154_MAC_SCAN_PASSIVE 2
+#define IEEE802154_MAC_SCAN_ORPHAN 3
+
+struct ieee802154_mac_params {
+ s8 transmit_power;
+ u8 min_be;
+ u8 max_be;
+ u8 csma_retries;
+ s8 frame_retries;
+
+ bool lbt;
+ struct wpan_phy_cca cca;
+ s32 cca_ed_level;
+};
+
+struct wpan_phy;
+
+enum {
+ IEEE802154_LLSEC_PARAM_ENABLED = BIT(0),
+ IEEE802154_LLSEC_PARAM_FRAME_COUNTER = BIT(1),
+ IEEE802154_LLSEC_PARAM_OUT_LEVEL = BIT(2),
+ IEEE802154_LLSEC_PARAM_OUT_KEY = BIT(3),
+ IEEE802154_LLSEC_PARAM_KEY_SOURCE = BIT(4),
+ IEEE802154_LLSEC_PARAM_PAN_ID = BIT(5),
+ IEEE802154_LLSEC_PARAM_HWADDR = BIT(6),
+ IEEE802154_LLSEC_PARAM_COORD_HWADDR = BIT(7),
+ IEEE802154_LLSEC_PARAM_COORD_SHORTADDR = BIT(8),
+};
+
+struct ieee802154_llsec_ops {
+ int (*get_params)(struct net_device *dev,
+ struct ieee802154_llsec_params *params);
+ int (*set_params)(struct net_device *dev,
+ const struct ieee802154_llsec_params *params,
+ int changed);
+
+ int (*add_key)(struct net_device *dev,
+ const struct ieee802154_llsec_key_id *id,
+ const struct ieee802154_llsec_key *key);
+ int (*del_key)(struct net_device *dev,
+ const struct ieee802154_llsec_key_id *id);
+
+ int (*add_dev)(struct net_device *dev,
+ const struct ieee802154_llsec_device *llsec_dev);
+ int (*del_dev)(struct net_device *dev, __le64 dev_addr);
+
+ int (*add_devkey)(struct net_device *dev,
+ __le64 device_addr,
+ const struct ieee802154_llsec_device_key *key);
+ int (*del_devkey)(struct net_device *dev,
+ __le64 device_addr,
+ const struct ieee802154_llsec_device_key *key);
+
+ int (*add_seclevel)(struct net_device *dev,
+ const struct ieee802154_llsec_seclevel *sl);
+ int (*del_seclevel)(struct net_device *dev,
+ const struct ieee802154_llsec_seclevel *sl);
+
+ void (*lock_table)(struct net_device *dev);
+ void (*get_table)(struct net_device *dev,
+ struct ieee802154_llsec_table **t);
+ void (*unlock_table)(struct net_device *dev);
+};
+/*
+ * This should be located at net_device->ml_priv
+ *
+ * get_phy should increment the reference counting on returned phy.
+ * Use wpan_wpy_put to put that reference.
+ */
+struct ieee802154_mlme_ops {
+ /* The following fields are optional (can be NULL). */
+
+ int (*assoc_req)(struct net_device *dev,
+ struct ieee802154_addr *addr,
+ u8 channel, u8 page, u8 cap);
+ int (*assoc_resp)(struct net_device *dev,
+ struct ieee802154_addr *addr,
+ __le16 short_addr, u8 status);
+ int (*disassoc_req)(struct net_device *dev,
+ struct ieee802154_addr *addr,
+ u8 reason);
+ int (*start_req)(struct net_device *dev,
+ struct ieee802154_addr *addr,
+ u8 channel, u8 page, u8 bcn_ord, u8 sf_ord,
+ u8 pan_coord, u8 blx, u8 coord_realign);
+ int (*scan_req)(struct net_device *dev,
+ u8 type, u32 channels, u8 page, u8 duration);
+
+ int (*set_mac_params)(struct net_device *dev,
+ const struct ieee802154_mac_params *params);
+ void (*get_mac_params)(struct net_device *dev,
+ struct ieee802154_mac_params *params);
+
+ const struct ieee802154_llsec_ops *llsec;
+};
+
+static inline struct ieee802154_mlme_ops *
+ieee802154_mlme_ops(const struct net_device *dev)
+{
+ return dev->ml_priv;
+}
+
+#endif
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
new file mode 100644
index 000000000..31bf475ec
--- /dev/null
+++ b/include/net/if_inet6.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * inet6 interface/address list definitions
+ * Linux INET6 implementation
+ *
+ * Authors:
+ * Pedro Roque <roque@di.fc.ul.pt>
+ */
+
+#ifndef _NET_IF_INET6_H
+#define _NET_IF_INET6_H
+
+#include <net/snmp.h>
+#include <linux/ipv6.h>
+#include <linux/refcount.h>
+
+/* inet6_dev.if_flags */
+
+#define IF_RA_OTHERCONF 0x80
+#define IF_RA_MANAGED 0x40
+#define IF_RA_RCVD 0x20
+#define IF_RS_SENT 0x10
+#define IF_READY 0x80000000
+
+enum {
+ INET6_IFADDR_STATE_PREDAD,
+ INET6_IFADDR_STATE_DAD,
+ INET6_IFADDR_STATE_POSTDAD,
+ INET6_IFADDR_STATE_ERRDAD,
+ INET6_IFADDR_STATE_DEAD,
+};
+
+struct inet6_ifaddr {
+ struct in6_addr addr;
+ __u32 prefix_len;
+ __u32 rt_priority;
+
+ /* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */
+ __u32 valid_lft;
+ __u32 prefered_lft;
+ refcount_t refcnt;
+ spinlock_t lock;
+
+ int state;
+
+ __u32 flags;
+ __u8 dad_probes;
+ __u8 stable_privacy_retry;
+
+ __u16 scope;
+ __u64 dad_nonce;
+
+ unsigned long cstamp; /* created timestamp */
+ unsigned long tstamp; /* updated timestamp */
+
+ struct delayed_work dad_work;
+
+ struct inet6_dev *idev;
+ struct fib6_info *rt;
+
+ struct hlist_node addr_lst;
+ struct list_head if_list;
+ /*
+ * Used to safely traverse idev->addr_list in process context
+ * if the idev->lock needed to protect idev->addr_list cannot be held.
+ * In that case, add the items to this list temporarily and iterate
+ * without holding idev->lock.
+ * See addrconf_ifdown and dev_forward_change.
+ */
+ struct list_head if_list_aux;
+
+ struct list_head tmp_list;
+ struct inet6_ifaddr *ifpub;
+ int regen_count;
+
+ bool tokenized;
+
+ u8 ifa_proto;
+
+ struct rcu_head rcu;
+ struct in6_addr peer_addr;
+};
+
+struct ip6_sf_socklist {
+ unsigned int sl_max;
+ unsigned int sl_count;
+ struct rcu_head rcu;
+ struct in6_addr sl_addr[];
+};
+
+#define IP6_SFBLOCK 10 /* allocate this many at once */
+
+struct ipv6_mc_socklist {
+ struct in6_addr addr;
+ int ifindex;
+ unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
+ struct ipv6_mc_socklist __rcu *next;
+ struct ip6_sf_socklist __rcu *sflist;
+ struct rcu_head rcu;
+};
+
+struct ip6_sf_list {
+ struct ip6_sf_list __rcu *sf_next;
+ struct in6_addr sf_addr;
+ unsigned long sf_count[2]; /* include/exclude counts */
+ unsigned char sf_gsresp; /* include in g & s response? */
+ unsigned char sf_oldin; /* change state */
+ unsigned char sf_crcount; /* retrans. left to send */
+ struct rcu_head rcu;
+};
+
+#define MAF_TIMER_RUNNING 0x01
+#define MAF_LAST_REPORTER 0x02
+#define MAF_LOADED 0x04
+#define MAF_NOREPORT 0x08
+#define MAF_GSQUERY 0x10
+
+struct ifmcaddr6 {
+ struct in6_addr mca_addr;
+ struct inet6_dev *idev;
+ struct ifmcaddr6 __rcu *next;
+ struct ip6_sf_list __rcu *mca_sources;
+ struct ip6_sf_list __rcu *mca_tomb;
+ unsigned int mca_sfmode;
+ unsigned char mca_crcount;
+ unsigned long mca_sfcount[2];
+ struct delayed_work mca_work;
+ unsigned int mca_flags;
+ int mca_users;
+ refcount_t mca_refcnt;
+ unsigned long mca_cstamp;
+ unsigned long mca_tstamp;
+ struct rcu_head rcu;
+};
+
+/* Anycast stuff */
+
+struct ipv6_ac_socklist {
+ struct in6_addr acl_addr;
+ int acl_ifindex;
+ struct ipv6_ac_socklist *acl_next;
+};
+
+struct ifacaddr6 {
+ struct in6_addr aca_addr;
+ struct fib6_info *aca_rt;
+ struct ifacaddr6 *aca_next;
+ struct hlist_node aca_addr_lst;
+ int aca_users;
+ refcount_t aca_refcnt;
+ unsigned long aca_cstamp;
+ unsigned long aca_tstamp;
+ struct rcu_head rcu;
+};
+
+#define IFA_HOST IPV6_ADDR_LOOPBACK
+#define IFA_LINK IPV6_ADDR_LINKLOCAL
+#define IFA_SITE IPV6_ADDR_SITELOCAL
+
+struct ipv6_devstat {
+ struct proc_dir_entry *proc_dir_entry;
+ DEFINE_SNMP_STAT(struct ipstats_mib, ipv6);
+ DEFINE_SNMP_STAT_ATOMIC(struct icmpv6_mib_device, icmpv6dev);
+ DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib_device, icmpv6msgdev);
+};
+
+struct inet6_dev {
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
+ struct list_head addr_list;
+
+ struct ifmcaddr6 __rcu *mc_list;
+ struct ifmcaddr6 __rcu *mc_tomb;
+
+ unsigned char mc_qrv; /* Query Robustness Variable */
+ unsigned char mc_gq_running;
+ unsigned char mc_ifc_count;
+ unsigned char mc_dad_count;
+
+ unsigned long mc_v1_seen; /* Max time we stay in MLDv1 mode */
+ unsigned long mc_qi; /* Query Interval */
+ unsigned long mc_qri; /* Query Response Interval */
+ unsigned long mc_maxdelay;
+
+ struct delayed_work mc_gq_work; /* general query work */
+ struct delayed_work mc_ifc_work; /* interface change work */
+ struct delayed_work mc_dad_work; /* dad complete mc work */
+ struct delayed_work mc_query_work; /* mld query work */
+ struct delayed_work mc_report_work; /* mld report work */
+
+ struct sk_buff_head mc_query_queue; /* mld query queue */
+ struct sk_buff_head mc_report_queue; /* mld report queue */
+
+ spinlock_t mc_query_lock; /* mld query queue lock */
+ spinlock_t mc_report_lock; /* mld query report lock */
+ struct mutex mc_lock; /* mld global lock */
+
+ struct ifacaddr6 *ac_list;
+ rwlock_t lock;
+ refcount_t refcnt;
+ __u32 if_flags;
+ int dead;
+
+ u32 desync_factor;
+ struct list_head tempaddr_list;
+
+ struct in6_addr token;
+
+ struct neigh_parms *nd_parms;
+ struct ipv6_devconf cnf;
+ struct ipv6_devstat stats;
+
+ struct timer_list rs_timer;
+ __s32 rs_interval; /* in jiffies */
+ __u8 rs_probes;
+
+ unsigned long tstamp; /* ipv6InterfaceTable update timestamp */
+ struct rcu_head rcu;
+
+ unsigned int ra_mtu;
+};
+
+static inline void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf)
+{
+ /*
+ * +-------+-------+-------+-------+-------+-------+
+ * | 33 | 33 | DST13 | DST14 | DST15 | DST16 |
+ * +-------+-------+-------+-------+-------+-------+
+ */
+
+ buf[0]= 0x33;
+ buf[1]= 0x33;
+
+ memcpy(buf + 2, &addr->s6_addr32[3], sizeof(__u32));
+}
+
+static inline void ipv6_arcnet_mc_map(const struct in6_addr *addr, char *buf)
+{
+ buf[0] = 0x00;
+}
+
+static inline void ipv6_ib_mc_map(const struct in6_addr *addr,
+ const unsigned char *broadcast, char *buf)
+{
+ unsigned char scope = broadcast[5] & 0xF;
+
+ buf[0] = 0; /* Reserved */
+ buf[1] = 0xff; /* Multicast QPN */
+ buf[2] = 0xff;
+ buf[3] = 0xff;
+ buf[4] = 0xff;
+ buf[5] = 0x10 | scope; /* scope from broadcast address */
+ buf[6] = 0x60; /* IPv6 signature */
+ buf[7] = 0x1b;
+ buf[8] = broadcast[8]; /* P_Key */
+ buf[9] = broadcast[9];
+ memcpy(buf + 10, addr->s6_addr + 6, 10);
+}
+
+static inline int ipv6_ipgre_mc_map(const struct in6_addr *addr,
+ const unsigned char *broadcast, char *buf)
+{
+ if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0) {
+ memcpy(buf, broadcast, 4);
+ } else {
+ /* v4mapped? */
+ if ((addr->s6_addr32[0] | addr->s6_addr32[1] |
+ (addr->s6_addr32[2] ^ htonl(0x0000ffff))) != 0)
+ return -EINVAL;
+ memcpy(buf, &addr->s6_addr32[3], 4);
+ }
+ return 0;
+}
+
+#endif
diff --git a/include/net/ife.h b/include/net/ife.h
new file mode 100644
index 000000000..7e2538d85
--- /dev/null
+++ b/include/net/ife.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_IFE_H
+#define __NET_IFE_H
+
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <uapi/linux/ife.h>
+
+#if IS_ENABLED(CONFIG_NET_IFE)
+
+void *ife_encode(struct sk_buff *skb, u16 metalen);
+void *ife_decode(struct sk_buff *skb, u16 *metalen);
+
+void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype,
+ u16 *dlen, u16 *totlen);
+int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
+ const void *dval);
+
+void *ife_tlv_meta_next(void *skbdata);
+
+#else
+
+static inline void *ife_encode(struct sk_buff *skb, u16 metalen)
+{
+ return NULL;
+}
+
+static inline void *ife_decode(struct sk_buff *skb, u16 *metalen)
+{
+ return NULL;
+}
+
+static inline void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen,
+ u16 *totlen)
+{
+ return NULL;
+}
+
+static inline int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
+ const void *dval)
+{
+ return 0;
+}
+
+static inline void *ife_tlv_meta_next(void *skbdata)
+{
+ return NULL;
+}
+
+#endif
+
+#endif /* __NET_IFE_H */
diff --git a/include/net/ila.h b/include/net/ila.h
new file mode 100644
index 000000000..73ebe5eab
--- /dev/null
+++ b/include/net/ila.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * ILA kernel interface
+ *
+ * Copyright (c) 2015 Tom Herbert <tom@herbertland.com>
+ */
+
+#ifndef _NET_ILA_H
+#define _NET_ILA_H
+
+struct sk_buff;
+
+int ila_xlat_outgoing(struct sk_buff *skb);
+int ila_xlat_incoming(struct sk_buff *skb);
+
+#endif /* _NET_ILA_H */
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
new file mode 100644
index 000000000..025bd8d3c
--- /dev/null
+++ b/include/net/inet6_connection_sock.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * NET Generic infrastructure for INET6 connection oriented protocols.
+ *
+ * Authors: Many people, see the TCPv6 sources
+ *
+ * From code originally in TCPv6
+ */
+#ifndef _INET6_CONNECTION_SOCK_H
+#define _INET6_CONNECTION_SOCK_H
+
+#include <linux/types.h>
+
+struct flowi;
+struct flowi6;
+struct request_sock;
+struct sk_buff;
+struct sock;
+struct sockaddr;
+
+struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
+ const struct request_sock *req, u8 proto);
+
+void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
+
+int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
+
+struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
+#endif /* _INET6_CONNECTION_SOCK_H */
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
new file mode 100644
index 000000000..56f128658
--- /dev/null
+++ b/include/net/inet6_hashtables.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Authors: Lotsa people, from code originally in tcp
+ */
+
+#ifndef _INET6_HASHTABLES_H
+#define _INET6_HASHTABLES_H
+
+
+#if IS_ENABLED(CONFIG_IPV6)
+#include <linux/in6.h>
+#include <linux/ipv6.h>
+#include <linux/types.h>
+#include <linux/jhash.h>
+
+#include <net/inet_sock.h>
+
+#include <net/ipv6.h>
+#include <net/netns/hash.h>
+
+struct inet_hashinfo;
+
+static inline unsigned int __inet6_ehashfn(const u32 lhash,
+ const u16 lport,
+ const u32 fhash,
+ const __be16 fport,
+ const u32 initval)
+{
+ const u32 ports = (((u32)lport) << 16) | (__force u32)fport;
+ return jhash_3words(lhash, fhash, ports, initval);
+}
+
+/*
+ * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
+ * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
+ *
+ * The sockhash lock must be held as a reader here.
+ */
+struct sock *__inet6_lookup_established(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const struct in6_addr *saddr,
+ const __be16 sport,
+ const struct in6_addr *daddr,
+ const u16 hnum, const int dif,
+ const int sdif);
+
+struct sock *inet6_lookup_listener(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb, int doff,
+ const struct in6_addr *saddr,
+ const __be16 sport,
+ const struct in6_addr *daddr,
+ const unsigned short hnum,
+ const int dif, const int sdif);
+
+static inline struct sock *__inet6_lookup(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb, int doff,
+ const struct in6_addr *saddr,
+ const __be16 sport,
+ const struct in6_addr *daddr,
+ const u16 hnum,
+ const int dif, const int sdif,
+ bool *refcounted)
+{
+ struct sock *sk = __inet6_lookup_established(net, hashinfo, saddr,
+ sport, daddr, hnum,
+ dif, sdif);
+ *refcounted = true;
+ if (sk)
+ return sk;
+ *refcounted = false;
+ return inet6_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
+ daddr, hnum, dif, sdif);
+}
+
+static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb, int doff,
+ const __be16 sport,
+ const __be16 dport,
+ int iif, int sdif,
+ bool *refcounted)
+{
+ struct sock *sk = skb_steal_sock(skb, refcounted);
+
+ if (sk)
+ return sk;
+
+ return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
+ doff, &ipv6_hdr(skb)->saddr, sport,
+ &ipv6_hdr(skb)->daddr, ntohs(dport),
+ iif, sdif, refcounted);
+}
+
+struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb, int doff,
+ const struct in6_addr *saddr, const __be16 sport,
+ const struct in6_addr *daddr, const __be16 dport,
+ const int dif);
+
+int inet6_hash(struct sock *sk);
+
+static inline bool inet6_match(struct net *net, const struct sock *sk,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ const __portpair ports,
+ const int dif, const int sdif)
+{
+ if (!net_eq(sock_net(sk), net) ||
+ sk->sk_family != AF_INET6 ||
+ sk->sk_portpair != ports ||
+ !ipv6_addr_equal(&sk->sk_v6_daddr, saddr) ||
+ !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
+ return false;
+
+ /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */
+ return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif,
+ sdif);
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
+#endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
new file mode 100644
index 000000000..4673bbfd2
--- /dev/null
+++ b/include/net/inet_common.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _INET_COMMON_H
+#define _INET_COMMON_H
+
+#include <linux/indirect_call_wrapper.h>
+#include <linux/net.h>
+#include <linux/netdev_features.h>
+#include <linux/types.h>
+#include <net/sock.h>
+
+extern const struct proto_ops inet_stream_ops;
+extern const struct proto_ops inet_dgram_ops;
+
+/*
+ * INET4 prototypes used by INET6
+ */
+
+struct msghdr;
+struct net;
+struct page;
+struct sock;
+struct sockaddr;
+struct socket;
+
+int inet_release(struct socket *sock);
+int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags);
+int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags, int is_sendmsg);
+int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags);
+int inet_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern);
+int inet_send_prepare(struct sock *sk);
+int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
+void inet_splice_eof(struct socket *sock);
+ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
+ size_t size, int flags);
+int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
+int inet_shutdown(struct socket *sock, int how);
+int inet_listen(struct socket *sock, int backlog);
+void inet_sock_destruct(struct sock *sk);
+int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+/* Don't allocate port at this moment, defer to connect. */
+#define BIND_FORCE_ADDRESS_NO_PORT (1 << 0)
+/* Grab and release socket lock. */
+#define BIND_WITH_LOCK (1 << 1)
+/* Called from BPF program. */
+#define BIND_FROM_BPF (1 << 2)
+/* Skip CAP_NET_BIND_SERVICE check. */
+#define BIND_NO_CAP_NET_BIND_SERVICE (1 << 3)
+int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
+ u32 flags);
+int inet_getname(struct socket *sock, struct sockaddr *uaddr,
+ int peer);
+int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+int inet_ctl_sock_create(struct sock **sk, unsigned short family,
+ unsigned short type, unsigned char protocol,
+ struct net *net);
+int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
+ int *addr_len);
+
+struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb);
+int inet_gro_complete(struct sk_buff *skb, int nhoff);
+struct sk_buff *inet_gso_segment(struct sk_buff *skb,
+ netdev_features_t features);
+
+static inline void inet_ctl_sock_destroy(struct sock *sk)
+{
+ if (sk)
+ sock_release(sk->sk_socket);
+}
+
+#define indirect_call_gro_receive(f2, f1, cb, head, skb) \
+({ \
+ unlikely(gro_recursion_inc_test(skb)) ? \
+ NAPI_GRO_CB(skb)->flush |= 1, NULL : \
+ INDIRECT_CALL_2(cb, f2, f1, head, skb); \
+})
+
+#endif
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
new file mode 100644
index 000000000..080968d6e
--- /dev/null
+++ b/include/net/inet_connection_sock.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * NET Generic infrastructure for INET connection oriented protocols.
+ *
+ * Definitions for inet_connection_sock
+ *
+ * Authors: Many people, see the TCP sources
+ *
+ * From code originally in TCP
+ */
+#ifndef _INET_CONNECTION_SOCK_H
+#define _INET_CONNECTION_SOCK_H
+
+#include <linux/compiler.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/poll.h>
+#include <linux/kernel.h>
+#include <linux/sockptr.h>
+
+#include <net/inet_sock.h>
+#include <net/request_sock.h>
+
+/* Cancel timers, when they are not required. */
+#undef INET_CSK_CLEAR_TIMERS
+
+struct inet_bind_bucket;
+struct inet_bind2_bucket;
+struct tcp_congestion_ops;
+
+/*
+ * Pointers to address related TCP functions
+ * (i.e. things that depend on the address family)
+ */
+struct inet_connection_sock_af_ops {
+ int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
+ void (*send_check)(struct sock *sk, struct sk_buff *skb);
+ int (*rebuild_header)(struct sock *sk);
+ void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
+ int (*conn_request)(struct sock *sk, struct sk_buff *skb);
+ struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct dst_entry *dst,
+ struct request_sock *req_unhash,
+ bool *own_req);
+ u16 net_header_len;
+ u16 net_frag_header_len;
+ u16 sockaddr_len;
+ int (*setsockopt)(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
+ int (*getsockopt)(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+ void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
+ void (*mtu_reduced)(struct sock *sk);
+};
+
+/** inet_connection_sock - INET connection oriented sock
+ *
+ * @icsk_accept_queue: FIFO of established children
+ * @icsk_bind_hash: Bind node
+ * @icsk_bind2_hash: Bind node in the bhash2 table
+ * @icsk_timeout: Timeout
+ * @icsk_retransmit_timer: Resend (no ack)
+ * @icsk_rto: Retransmit timeout
+ * @icsk_pmtu_cookie Last pmtu seen by socket
+ * @icsk_ca_ops Pluggable congestion control hook
+ * @icsk_af_ops Operations which are AF_INET{4,6} specific
+ * @icsk_ulp_ops Pluggable ULP control hook
+ * @icsk_ulp_data ULP private data
+ * @icsk_clean_acked Clean acked data hook
+ * @icsk_ca_state: Congestion control state
+ * @icsk_retransmits: Number of unrecovered [RTO] timeouts
+ * @icsk_pending: Scheduled timer event
+ * @icsk_backoff: Backoff
+ * @icsk_syn_retries: Number of allowed SYN (or equivalent) retries
+ * @icsk_probes_out: unanswered 0 window probes
+ * @icsk_ext_hdr_len: Network protocol overhead (IP/IPv6 options)
+ * @icsk_ack: Delayed ACK control data
+ * @icsk_mtup; MTU probing control data
+ * @icsk_probes_tstamp: Probe timestamp (cleared by non-zero window ack)
+ * @icsk_user_timeout: TCP_USER_TIMEOUT value
+ */
+struct inet_connection_sock {
+ /* inet_sock has to be the first member! */
+ struct inet_sock icsk_inet;
+ struct request_sock_queue icsk_accept_queue;
+ struct inet_bind_bucket *icsk_bind_hash;
+ struct inet_bind2_bucket *icsk_bind2_hash;
+ unsigned long icsk_timeout;
+ struct timer_list icsk_retransmit_timer;
+ struct timer_list icsk_delack_timer;
+ __u32 icsk_rto;
+ __u32 icsk_rto_min;
+ __u32 icsk_delack_max;
+ __u32 icsk_pmtu_cookie;
+ const struct tcp_congestion_ops *icsk_ca_ops;
+ const struct inet_connection_sock_af_ops *icsk_af_ops;
+ const struct tcp_ulp_ops *icsk_ulp_ops;
+ void __rcu *icsk_ulp_data;
+ void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
+ unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
+ __u8 icsk_ca_state:5,
+ icsk_ca_initialized:1,
+ icsk_ca_setsockopt:1,
+ icsk_ca_dst_locked:1;
+ __u8 icsk_retransmits;
+ __u8 icsk_pending;
+ __u8 icsk_backoff;
+ __u8 icsk_syn_retries;
+ __u8 icsk_probes_out;
+ __u16 icsk_ext_hdr_len;
+ struct {
+ __u8 pending; /* ACK is pending */
+ __u8 quick; /* Scheduled number of quick acks */
+ __u8 pingpong; /* The session is interactive */
+ __u8 retry; /* Number of attempts */
+ __u32 ato; /* Predicted tick of soft clock */
+ unsigned long timeout; /* Currently scheduled timeout */
+ __u32 lrcvtime; /* timestamp of last received data packet */
+ __u16 last_seg_size; /* Size of last incoming segment */
+ __u16 rcv_mss; /* MSS used for delayed ACK decisions */
+ } icsk_ack;
+ struct {
+ /* Range of MTUs to search */
+ int search_high;
+ int search_low;
+
+ /* Information on the current probe. */
+ u32 probe_size:31,
+ /* Is the MTUP feature enabled for this connection? */
+ enabled:1;
+
+ u32 probe_timestamp;
+ } icsk_mtup;
+ u32 icsk_probes_tstamp;
+ u32 icsk_user_timeout;
+
+ u64 icsk_ca_priv[104 / sizeof(u64)];
+#define ICSK_CA_PRIV_SIZE sizeof_field(struct inet_connection_sock, icsk_ca_priv)
+};
+
+#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
+#define ICSK_TIME_DACK 2 /* Delayed ack timer */
+#define ICSK_TIME_PROBE0 3 /* Zero window probe timer */
+#define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */
+#define ICSK_TIME_REO_TIMEOUT 6 /* Reordering timer */
+
+static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
+{
+ return (struct inet_connection_sock *)sk;
+}
+
+static inline void *inet_csk_ca(const struct sock *sk)
+{
+ return (void *)inet_csk(sk)->icsk_ca_priv;
+}
+
+struct sock *inet_csk_clone_lock(const struct sock *sk,
+ const struct request_sock *req,
+ const gfp_t priority);
+
+enum inet_csk_ack_state_t {
+ ICSK_ACK_SCHED = 1,
+ ICSK_ACK_TIMER = 2,
+ ICSK_ACK_PUSHED = 4,
+ ICSK_ACK_PUSHED2 = 8,
+ ICSK_ACK_NOW = 16 /* Send the next ACK immediately (once) */
+};
+
+void inet_csk_init_xmit_timers(struct sock *sk,
+ void (*retransmit_handler)(struct timer_list *),
+ void (*delack_handler)(struct timer_list *),
+ void (*keepalive_handler)(struct timer_list *));
+void inet_csk_clear_xmit_timers(struct sock *sk);
+
+static inline void inet_csk_schedule_ack(struct sock *sk)
+{
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
+}
+
+static inline int inet_csk_ack_scheduled(const struct sock *sk)
+{
+ return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
+}
+
+static inline void inet_csk_delack_init(struct sock *sk)
+{
+ memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
+}
+
+void inet_csk_delete_keepalive_timer(struct sock *sk);
+void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
+
+static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
+ icsk->icsk_pending = 0;
+#ifdef INET_CSK_CLEAR_TIMERS
+ sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
+#endif
+ } else if (what == ICSK_TIME_DACK) {
+ icsk->icsk_ack.pending = 0;
+ icsk->icsk_ack.retry = 0;
+#ifdef INET_CSK_CLEAR_TIMERS
+ sk_stop_timer(sk, &icsk->icsk_delack_timer);
+#endif
+ } else {
+ pr_debug("inet_csk BUG: unknown timer value\n");
+ }
+}
+
+/*
+ * Reset the retransmission timer
+ */
+static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
+ unsigned long when,
+ const unsigned long max_when)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (when > max_when) {
+ pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
+ sk, what, when, (void *)_THIS_IP_);
+ when = max_when;
+ }
+
+ if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
+ what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) {
+ icsk->icsk_pending = what;
+ icsk->icsk_timeout = jiffies + when;
+ sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
+ } else if (what == ICSK_TIME_DACK) {
+ icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
+ icsk->icsk_ack.timeout = jiffies + when;
+ sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
+ } else {
+ pr_debug("inet_csk BUG: unknown timer value\n");
+ }
+}
+
+static inline unsigned long
+inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
+ unsigned long max_when)
+{
+ u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff;
+
+ return (unsigned long)min_t(u64, when, max_when);
+}
+
+struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
+
+int inet_csk_get_port(struct sock *sk, unsigned short snum);
+
+struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
+ const struct request_sock *req);
+struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
+ struct sock *newsk,
+ const struct request_sock *req);
+
+struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
+ struct request_sock *req,
+ struct sock *child);
+void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+ unsigned long timeout);
+struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
+ struct request_sock *req,
+ bool own_req);
+
+static inline void inet_csk_reqsk_queue_added(struct sock *sk)
+{
+ reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
+}
+
+static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
+{
+ return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
+}
+
+static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
+{
+ return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
+}
+
+bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
+void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
+
+static inline unsigned long
+reqsk_timeout(struct request_sock *req, unsigned long max_timeout)
+{
+ u64 timeout = (u64)req->timeout << req->num_timeout;
+
+ return (unsigned long)min_t(u64, timeout, max_timeout);
+}
+
+static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
+{
+ /* The below has to be done to allow calling inet_csk_destroy_sock */
+ sock_set_flag(sk, SOCK_DEAD);
+ this_cpu_inc(*sk->sk_prot->orphan_count);
+}
+
+void inet_csk_destroy_sock(struct sock *sk);
+void inet_csk_prepare_forced_close(struct sock *sk);
+
+/*
+ * LISTEN is a special case for poll..
+ */
+static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
+{
+ return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
+ (EPOLLIN | EPOLLRDNORM) : 0;
+}
+
+int inet_csk_listen_start(struct sock *sk);
+void inet_csk_listen_stop(struct sock *sk);
+
+void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
+
+/* update the fast reuse flag when adding a socket */
+void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
+ struct sock *sk);
+
+struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
+
+#define TCP_PINGPONG_THRESH 1
+
+static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
+{
+ inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH;
+}
+
+static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
+{
+ inet_csk(sk)->icsk_ack.pingpong = 0;
+}
+
+static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
+{
+ return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
+}
+
+static inline bool inet_csk_has_ulp(struct sock *sk)
+{
+ return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
+}
+
+static inline void inet_init_csk_locks(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ spin_lock_init(&icsk->icsk_accept_queue.rskq_lock);
+ spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock);
+}
+
+#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_dscp.h b/include/net/inet_dscp.h
new file mode 100644
index 000000000..72f250dff
--- /dev/null
+++ b/include/net/inet_dscp.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * inet_dscp.h: helpers for handling differentiated services codepoints (DSCP)
+ *
+ * DSCP is defined in RFC 2474:
+ *
+ * 0 1 2 3 4 5 6 7
+ * +---+---+---+---+---+---+---+---+
+ * | DSCP | CU |
+ * +---+---+---+---+---+---+---+---+
+ *
+ * DSCP: differentiated services codepoint
+ * CU: currently unused
+ *
+ * The whole DSCP + CU bits form the DS field.
+ * The DS field is also commonly called TOS or Traffic Class (for IPv6).
+ *
+ * Note: the CU bits are now used for Explicit Congestion Notification
+ * (RFC 3168).
+ */
+
+#ifndef _INET_DSCP_H
+#define _INET_DSCP_H
+
+#include <linux/types.h>
+
+/* Special type for storing DSCP values.
+ *
+ * A dscp_t variable stores a DS field with the CU (ECN) bits cleared.
+ * Using dscp_t allows to strictly separate DSCP and ECN bits, thus avoiding
+ * bugs where ECN bits are erroneously taken into account during FIB lookups
+ * or policy routing.
+ *
+ * Note: to get the real DSCP value contained in a dscp_t variable one would
+ * have to do a bit shift after calling inet_dscp_to_dsfield(). We could have
+ * a helper for that, but there's currently no users.
+ */
+typedef u8 __bitwise dscp_t;
+
+#define INET_DSCP_MASK 0xfc
+
+static inline dscp_t inet_dsfield_to_dscp(__u8 dsfield)
+{
+ return (__force dscp_t)(dsfield & INET_DSCP_MASK);
+}
+
+static inline __u8 inet_dscp_to_dsfield(dscp_t dscp)
+{
+ return (__force __u8)dscp;
+}
+
+static inline bool inet_validate_dscp(__u8 val)
+{
+ return !(val & ~INET_DSCP_MASK);
+}
+
+#endif /* _INET_DSCP_H */
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
new file mode 100644
index 000000000..ea3239346
--- /dev/null
+++ b/include/net/inet_ecn.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _INET_ECN_H_
+#define _INET_ECN_H_
+
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+
+#include <net/inet_sock.h>
+#include <net/dsfield.h>
+#include <net/checksum.h>
+
+enum {
+ INET_ECN_NOT_ECT = 0,
+ INET_ECN_ECT_1 = 1,
+ INET_ECN_ECT_0 = 2,
+ INET_ECN_CE = 3,
+ INET_ECN_MASK = 3,
+};
+
+extern int sysctl_tunnel_ecn_log;
+
+static inline int INET_ECN_is_ce(__u8 dsfield)
+{
+ return (dsfield & INET_ECN_MASK) == INET_ECN_CE;
+}
+
+static inline int INET_ECN_is_not_ect(__u8 dsfield)
+{
+ return (dsfield & INET_ECN_MASK) == INET_ECN_NOT_ECT;
+}
+
+static inline int INET_ECN_is_capable(__u8 dsfield)
+{
+ return dsfield & INET_ECN_ECT_0;
+}
+
+/*
+ * RFC 3168 9.1.1
+ * The full-functionality option for ECN encapsulation is to copy the
+ * ECN codepoint of the inside header to the outside header on
+ * encapsulation if the inside header is not-ECT or ECT, and to set the
+ * ECN codepoint of the outside header to ECT(0) if the ECN codepoint of
+ * the inside header is CE.
+ */
+static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
+{
+ outer &= ~INET_ECN_MASK;
+ outer |= !INET_ECN_is_ce(inner) ? (inner & INET_ECN_MASK) :
+ INET_ECN_ECT_0;
+ return outer;
+}
+
+static inline void INET_ECN_xmit(struct sock *sk)
+{
+ inet_sk(sk)->tos |= INET_ECN_ECT_0;
+ if (inet6_sk(sk) != NULL)
+ inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
+}
+
+static inline void INET_ECN_dontxmit(struct sock *sk)
+{
+ inet_sk(sk)->tos &= ~INET_ECN_MASK;
+ if (inet6_sk(sk) != NULL)
+ inet6_sk(sk)->tclass &= ~INET_ECN_MASK;
+}
+
+#define IP6_ECN_flow_init(label) do { \
+ (label) &= ~htonl(INET_ECN_MASK << 20); \
+ } while (0)
+
+#define IP6_ECN_flow_xmit(sk, label) do { \
+ if (INET_ECN_is_capable(inet6_sk(sk)->tclass)) \
+ (label) |= htonl(INET_ECN_ECT_0 << 20); \
+ } while (0)
+
+static inline int IP_ECN_set_ce(struct iphdr *iph)
+{
+ u32 ecn = (iph->tos + 1) & INET_ECN_MASK;
+ __be16 check_add;
+
+ /*
+ * After the last operation we have (in binary):
+ * INET_ECN_NOT_ECT => 01
+ * INET_ECN_ECT_1 => 10
+ * INET_ECN_ECT_0 => 11
+ * INET_ECN_CE => 00
+ */
+ if (!(ecn & 2))
+ return !ecn;
+
+ /*
+ * The following gives us:
+ * INET_ECN_ECT_1 => check += htons(0xFFFD)
+ * INET_ECN_ECT_0 => check += htons(0xFFFE)
+ */
+ check_add = (__force __be16)((__force u16)htons(0xFFFB) +
+ (__force u16)htons(ecn));
+
+ iph->check = csum16_add(iph->check, check_add);
+ iph->tos |= INET_ECN_CE;
+ return 1;
+}
+
+static inline int IP_ECN_set_ect1(struct iphdr *iph)
+{
+ if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
+ return 0;
+
+ iph->check = csum16_add(iph->check, htons(0x1));
+ iph->tos ^= INET_ECN_MASK;
+ return 1;
+}
+
+static inline void IP_ECN_clear(struct iphdr *iph)
+{
+ iph->tos &= ~INET_ECN_MASK;
+}
+
+static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner)
+{
+ dscp &= ~INET_ECN_MASK;
+ ipv4_change_dsfield(inner, INET_ECN_MASK, dscp);
+}
+
+struct ipv6hdr;
+
+/* Note:
+ * IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE,
+ * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE
+ * In IPv6 case, no checksum compensates the change in IPv6 header,
+ * so we have to update skb->csum.
+ */
+static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
+{
+ __be32 from, to;
+
+ if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph)))
+ return 0;
+
+ from = *(__be32 *)iph;
+ to = from | htonl(INET_ECN_CE << 20);
+ *(__be32 *)iph = to;
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
+ (__force __wsum)to);
+ return 1;
+}
+
+static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph)
+{
+ __be32 from, to;
+
+ if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0)
+ return 0;
+
+ from = *(__be32 *)iph;
+ to = from ^ htonl(INET_ECN_MASK << 20);
+ *(__be32 *)iph = to;
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
+ (__force __wsum)to);
+ return 1;
+}
+
+static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
+{
+ dscp &= ~INET_ECN_MASK;
+ ipv6_change_dsfield(inner, INET_ECN_MASK, dscp);
+}
+
+static inline int INET_ECN_set_ce(struct sk_buff *skb)
+{
+ switch (skb_protocol(skb, true)) {
+ case cpu_to_be16(ETH_P_IP):
+ if (skb_network_header(skb) + sizeof(struct iphdr) <=
+ skb_tail_pointer(skb))
+ return IP_ECN_set_ce(ip_hdr(skb));
+ break;
+
+ case cpu_to_be16(ETH_P_IPV6):
+ if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
+ skb_tail_pointer(skb))
+ return IP6_ECN_set_ce(skb, ipv6_hdr(skb));
+ break;
+ }
+
+ return 0;
+}
+
+static inline int skb_get_dsfield(struct sk_buff *skb)
+{
+ switch (skb_protocol(skb, true)) {
+ case cpu_to_be16(ETH_P_IP):
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ break;
+ return ipv4_get_dsfield(ip_hdr(skb));
+
+ case cpu_to_be16(ETH_P_IPV6):
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ break;
+ return ipv6_get_dsfield(ipv6_hdr(skb));
+ }
+
+ return -1;
+}
+
+static inline int INET_ECN_set_ect1(struct sk_buff *skb)
+{
+ switch (skb_protocol(skb, true)) {
+ case cpu_to_be16(ETH_P_IP):
+ if (skb_network_header(skb) + sizeof(struct iphdr) <=
+ skb_tail_pointer(skb))
+ return IP_ECN_set_ect1(ip_hdr(skb));
+ break;
+
+ case cpu_to_be16(ETH_P_IPV6):
+ if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
+ skb_tail_pointer(skb))
+ return IP6_ECN_set_ect1(skb, ipv6_hdr(skb));
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * RFC 6040 4.2
+ * To decapsulate the inner header at the tunnel egress, a compliant
+ * tunnel egress MUST set the outgoing ECN field to the codepoint at the
+ * intersection of the appropriate arriving inner header (row) and outer
+ * header (column) in Figure 4
+ *
+ * +---------+------------------------------------------------+
+ * |Arriving | Arriving Outer Header |
+ * | Inner +---------+------------+------------+------------+
+ * | Header | Not-ECT | ECT(0) | ECT(1) | CE |
+ * +---------+---------+------------+------------+------------+
+ * | Not-ECT | Not-ECT |Not-ECT(!!!)|Not-ECT(!!!)| <drop>(!!!)|
+ * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE |
+ * | ECT(1) | ECT(1) | ECT(1) (!) | ECT(1) | CE |
+ * | CE | CE | CE | CE(!!!)| CE |
+ * +---------+---------+------------+------------+------------+
+ *
+ * Figure 4: New IP in IP Decapsulation Behaviour
+ *
+ * returns 0 on success
+ * 1 if something is broken and should be logged (!!! above)
+ * 2 if packet should be dropped
+ */
+static inline int __INET_ECN_decapsulate(__u8 outer, __u8 inner, bool *set_ce)
+{
+ if (INET_ECN_is_not_ect(inner)) {
+ switch (outer & INET_ECN_MASK) {
+ case INET_ECN_NOT_ECT:
+ return 0;
+ case INET_ECN_ECT_0:
+ case INET_ECN_ECT_1:
+ return 1;
+ case INET_ECN_CE:
+ return 2;
+ }
+ }
+
+ *set_ce = INET_ECN_is_ce(outer);
+ return 0;
+}
+
+static inline int INET_ECN_decapsulate(struct sk_buff *skb,
+ __u8 outer, __u8 inner)
+{
+ bool set_ce = false;
+ int rc;
+
+ rc = __INET_ECN_decapsulate(outer, inner, &set_ce);
+ if (!rc) {
+ if (set_ce)
+ INET_ECN_set_ce(skb);
+ else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1)
+ INET_ECN_set_ect1(skb);
+ }
+
+ return rc;
+}
+
+static inline int IP_ECN_decapsulate(const struct iphdr *oiph,
+ struct sk_buff *skb)
+{
+ __u8 inner;
+
+ switch (skb_protocol(skb, true)) {
+ case htons(ETH_P_IP):
+ inner = ip_hdr(skb)->tos;
+ break;
+ case htons(ETH_P_IPV6):
+ inner = ipv6_get_dsfield(ipv6_hdr(skb));
+ break;
+ default:
+ return 0;
+ }
+
+ return INET_ECN_decapsulate(skb, oiph->tos, inner);
+}
+
+static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h,
+ struct sk_buff *skb)
+{
+ __u8 inner;
+
+ switch (skb_protocol(skb, true)) {
+ case htons(ETH_P_IP):
+ inner = ip_hdr(skb)->tos;
+ break;
+ case htons(ETH_P_IPV6):
+ inner = ipv6_get_dsfield(ipv6_hdr(skb));
+ break;
+ default:
+ return 0;
+ }
+
+ return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner);
+}
+#endif
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
new file mode 100644
index 000000000..0b0876610
--- /dev/null
+++ b/include/net/inet_frag.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_FRAG_H__
+#define __NET_FRAG_H__
+
+#include <linux/rhashtable-types.h>
+#include <linux/completion.h>
+#include <linux/in6.h>
+#include <linux/rbtree_types.h>
+#include <linux/refcount.h>
+
+/* Per netns frag queues directory */
+struct fqdir {
+ /* sysctls */
+ long high_thresh;
+ long low_thresh;
+ int timeout;
+ int max_dist;
+ struct inet_frags *f;
+ struct net *net;
+ bool dead;
+
+ struct rhashtable rhashtable ____cacheline_aligned_in_smp;
+
+ /* Keep atomic mem on separate cachelines in structs that include it */
+ atomic_long_t mem ____cacheline_aligned_in_smp;
+ struct work_struct destroy_work;
+ struct llist_node free_list;
+};
+
+/**
+ * fragment queue flags
+ *
+ * @INET_FRAG_FIRST_IN: first fragment has arrived
+ * @INET_FRAG_LAST_IN: final fragment has arrived
+ * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
+ * @INET_FRAG_HASH_DEAD: inet_frag_kill() has not removed fq from rhashtable
+ */
+enum {
+ INET_FRAG_FIRST_IN = BIT(0),
+ INET_FRAG_LAST_IN = BIT(1),
+ INET_FRAG_COMPLETE = BIT(2),
+ INET_FRAG_HASH_DEAD = BIT(3),
+};
+
+struct frag_v4_compare_key {
+ __be32 saddr;
+ __be32 daddr;
+ u32 user;
+ u32 vif;
+ __be16 id;
+ u16 protocol;
+};
+
+struct frag_v6_compare_key {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ u32 user;
+ __be32 id;
+ u32 iif;
+};
+
+/**
+ * struct inet_frag_queue - fragment queue
+ *
+ * @node: rhash node
+ * @key: keys identifying this frag.
+ * @timer: queue expiration timer
+ * @lock: spinlock protecting this frag
+ * @refcnt: reference count of the queue
+ * @rb_fragments: received fragments rb-tree root
+ * @fragments_tail: received fragments tail
+ * @last_run_head: the head of the last "run". see ip_fragment.c
+ * @stamp: timestamp of the last received fragment
+ * @len: total length of the original datagram
+ * @meat: length of received fragments so far
+ * @mono_delivery_time: stamp has a mono delivery time (EDT)
+ * @flags: fragment queue flags
+ * @max_size: maximum received fragment size
+ * @fqdir: pointer to struct fqdir
+ * @rcu: rcu head for freeing deferall
+ */
+struct inet_frag_queue {
+ struct rhash_head node;
+ union {
+ struct frag_v4_compare_key v4;
+ struct frag_v6_compare_key v6;
+ } key;
+ struct timer_list timer;
+ spinlock_t lock;
+ refcount_t refcnt;
+ struct rb_root rb_fragments;
+ struct sk_buff *fragments_tail;
+ struct sk_buff *last_run_head;
+ ktime_t stamp;
+ int len;
+ int meat;
+ u8 mono_delivery_time;
+ __u8 flags;
+ u16 max_size;
+ struct fqdir *fqdir;
+ struct rcu_head rcu;
+};
+
+struct inet_frags {
+ unsigned int qsize;
+
+ void (*constructor)(struct inet_frag_queue *q,
+ const void *arg);
+ void (*destructor)(struct inet_frag_queue *);
+ void (*frag_expire)(struct timer_list *t);
+ struct kmem_cache *frags_cachep;
+ const char *frags_cache_name;
+ struct rhashtable_params rhash_params;
+ refcount_t refcnt;
+ struct completion completion;
+};
+
+int inet_frags_init(struct inet_frags *);
+void inet_frags_fini(struct inet_frags *);
+
+int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net);
+
+static inline void fqdir_pre_exit(struct fqdir *fqdir)
+{
+ /* Prevent creation of new frags.
+ * Pairs with READ_ONCE() in inet_frag_find().
+ */
+ WRITE_ONCE(fqdir->high_thresh, 0);
+
+ /* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire()
+ * and ip6frag_expire_frag_queue().
+ */
+ WRITE_ONCE(fqdir->dead, true);
+}
+void fqdir_exit(struct fqdir *fqdir);
+
+void inet_frag_kill(struct inet_frag_queue *q);
+void inet_frag_destroy(struct inet_frag_queue *q);
+struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key);
+
+/* Free all skbs in the queue; return the sum of their truesizes. */
+unsigned int inet_frag_rbtree_purge(struct rb_root *root);
+
+static inline void inet_frag_put(struct inet_frag_queue *q)
+{
+ if (refcount_dec_and_test(&q->refcnt))
+ inet_frag_destroy(q);
+}
+
+/* Memory Tracking Functions. */
+
+static inline long frag_mem_limit(const struct fqdir *fqdir)
+{
+ return atomic_long_read(&fqdir->mem);
+}
+
+static inline void sub_frag_mem_limit(struct fqdir *fqdir, long val)
+{
+ atomic_long_sub(val, &fqdir->mem);
+}
+
+static inline void add_frag_mem_limit(struct fqdir *fqdir, long val)
+{
+ atomic_long_add(val, &fqdir->mem);
+}
+
+/* RFC 3168 support :
+ * We want to check ECN values of all fragments, do detect invalid combinations.
+ * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
+ */
+#define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
+#define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
+#define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
+#define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
+
+extern const u8 ip_frag_ecn_table[16];
+
+/* Return values of inet_frag_queue_insert() */
+#define IPFRAG_OK 0
+#define IPFRAG_DUP 1
+#define IPFRAG_OVERLAP 2
+int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+ int offset, int end);
+void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ struct sk_buff *parent);
+void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+ void *reasm_data, bool try_coalesce);
+struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
+
+#endif
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
new file mode 100644
index 000000000..99bd823e9
--- /dev/null
+++ b/include/net/inet_hashtables.h
@@ -0,0 +1,479 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Authors: Lotsa people, from code originally in tcp
+ */
+
+#ifndef _INET_HASHTABLES_H
+#define _INET_HASHTABLES_H
+
+
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <net/inet_connection_sock.h>
+#include <net/inet_sock.h>
+#include <net/ip.h>
+#include <net/sock.h>
+#include <net/route.h>
+#include <net/tcp_states.h>
+#include <net/netns/hash.h>
+
+#include <linux/refcount.h>
+#include <asm/byteorder.h>
+
+/* This is for all connections with a full identity, no wildcards.
+ * The 'e' prefix stands for Establish, but we really put all sockets
+ * but LISTEN ones.
+ */
+struct inet_ehash_bucket {
+ struct hlist_nulls_head chain;
+};
+
+/* There are a few simple rules, which allow for local port reuse by
+ * an application. In essence:
+ *
+ * 1) Sockets bound to different interfaces may share a local port.
+ * Failing that, goto test 2.
+ * 2) If all sockets have sk->sk_reuse set, and none of them are in
+ * TCP_LISTEN state, the port may be shared.
+ * Failing that, goto test 3.
+ * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
+ * address, and none of them are the same, the port may be
+ * shared.
+ * Failing this, the port cannot be shared.
+ *
+ * The interesting point, is test #2. This is what an FTP server does
+ * all day. To optimize this case we use a specific flag bit defined
+ * below. As we add sockets to a bind bucket list, we perform a
+ * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
+ * As long as all sockets added to a bind bucket pass this test,
+ * the flag bit will be set.
+ * The resulting situation is that tcp_v[46]_verify_bind() can just check
+ * for this flag bit, if it is set and the socket trying to bind has
+ * sk->sk_reuse set, we don't even have to walk the owners list at all,
+ * we return that it is ok to bind this socket to the requested local port.
+ *
+ * Sounds like a lot of work, but it is worth it. In a more naive
+ * implementation (ie. current FreeBSD etc.) the entire list of ports
+ * must be walked for each data port opened by an ftp server. Needless
+ * to say, this does not scale at all. With a couple thousand FTP
+ * users logged onto your box, isn't it nice to know that new data
+ * ports are created in O(1) time? I thought so. ;-) -DaveM
+ */
+#define FASTREUSEPORT_ANY 1
+#define FASTREUSEPORT_STRICT 2
+
+struct inet_bind_bucket {
+ possible_net_t ib_net;
+ int l3mdev;
+ unsigned short port;
+ signed char fastreuse;
+ signed char fastreuseport;
+ kuid_t fastuid;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr fast_v6_rcv_saddr;
+#endif
+ __be32 fast_rcv_saddr;
+ unsigned short fast_sk_family;
+ bool fast_ipv6_only;
+ struct hlist_node node;
+ struct hlist_head owners;
+};
+
+struct inet_bind2_bucket {
+ possible_net_t ib_net;
+ int l3mdev;
+ unsigned short port;
+#if IS_ENABLED(CONFIG_IPV6)
+ unsigned short family;
+#endif
+ union {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr v6_rcv_saddr;
+#endif
+ __be32 rcv_saddr;
+ };
+ /* Node in the bhash2 inet_bind_hashbucket chain */
+ struct hlist_node node;
+ /* List of sockets hashed to this bucket */
+ struct hlist_head owners;
+ /* bhash has twsk in owners, but bhash2 has twsk in
+ * deathrow not to add a member in struct sock_common.
+ */
+ struct hlist_head deathrow;
+};
+
+static inline struct net *ib_net(const struct inet_bind_bucket *ib)
+{
+ return read_pnet(&ib->ib_net);
+}
+
+static inline struct net *ib2_net(const struct inet_bind2_bucket *ib)
+{
+ return read_pnet(&ib->ib_net);
+}
+
+#define inet_bind_bucket_for_each(tb, head) \
+ hlist_for_each_entry(tb, head, node)
+
+struct inet_bind_hashbucket {
+ spinlock_t lock;
+ struct hlist_head chain;
+};
+
+/* Sockets can be hashed in established or listening table.
+ * We must use different 'nulls' end-of-chain value for all hash buckets :
+ * A socket might transition from ESTABLISH to LISTEN state without
+ * RCU grace period. A lookup in ehash table needs to handle this case.
+ */
+#define LISTENING_NULLS_BASE (1U << 29)
+struct inet_listen_hashbucket {
+ spinlock_t lock;
+ struct hlist_nulls_head nulls_head;
+};
+
+/* This is for listening sockets, thus all sockets which possess wildcards. */
+#define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
+
+struct inet_hashinfo {
+ /* This is for sockets with full identity only. Sockets here will
+ * always be without wildcards and will have the following invariant:
+ *
+ * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
+ *
+ */
+ struct inet_ehash_bucket *ehash;
+ spinlock_t *ehash_locks;
+ unsigned int ehash_mask;
+ unsigned int ehash_locks_mask;
+
+ /* Ok, let's try this, I give up, we do need a local binding
+ * TCP hash as well as the others for fast bind/connect.
+ */
+ struct kmem_cache *bind_bucket_cachep;
+ /* This bind table is hashed by local port */
+ struct inet_bind_hashbucket *bhash;
+ struct kmem_cache *bind2_bucket_cachep;
+ /* This bind table is hashed by local port and sk->sk_rcv_saddr (ipv4)
+ * or sk->sk_v6_rcv_saddr (ipv6). This 2nd bind table is used
+ * primarily for expediting bind conflict resolution.
+ */
+ struct inet_bind_hashbucket *bhash2;
+ unsigned int bhash_size;
+
+ /* The 2nd listener table hashed by local port and address */
+ unsigned int lhash2_mask;
+ struct inet_listen_hashbucket *lhash2;
+
+ bool pernet;
+};
+
+static inline struct inet_hashinfo *tcp_or_dccp_get_hashinfo(const struct sock *sk)
+{
+#if IS_ENABLED(CONFIG_IP_DCCP)
+ return sk->sk_prot->h.hashinfo ? :
+ sock_net(sk)->ipv4.tcp_death_row.hashinfo;
+#else
+ return sock_net(sk)->ipv4.tcp_death_row.hashinfo;
+#endif
+}
+
+static inline struct inet_listen_hashbucket *
+inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
+{
+ return &h->lhash2[hash & h->lhash2_mask];
+}
+
+static inline struct inet_ehash_bucket *inet_ehash_bucket(
+ struct inet_hashinfo *hashinfo,
+ unsigned int hash)
+{
+ return &hashinfo->ehash[hash & hashinfo->ehash_mask];
+}
+
+static inline spinlock_t *inet_ehash_lockp(
+ struct inet_hashinfo *hashinfo,
+ unsigned int hash)
+{
+ return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
+}
+
+int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
+
+static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h)
+{
+ kfree(h->lhash2);
+ h->lhash2 = NULL;
+}
+
+static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
+{
+ kvfree(hashinfo->ehash_locks);
+ hashinfo->ehash_locks = NULL;
+}
+
+struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo,
+ unsigned int ehash_entries);
+void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo);
+
+struct inet_bind_bucket *
+inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
+ struct inet_bind_hashbucket *head,
+ const unsigned short snum, int l3mdev);
+void inet_bind_bucket_destroy(struct kmem_cache *cachep,
+ struct inet_bind_bucket *tb);
+
+bool inet_bind_bucket_match(const struct inet_bind_bucket *tb,
+ const struct net *net, unsigned short port,
+ int l3mdev);
+
+struct inet_bind2_bucket *
+inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net,
+ struct inet_bind_hashbucket *head,
+ unsigned short port, int l3mdev,
+ const struct sock *sk);
+
+void inet_bind2_bucket_destroy(struct kmem_cache *cachep,
+ struct inet_bind2_bucket *tb);
+
+struct inet_bind2_bucket *
+inet_bind2_bucket_find(const struct inet_bind_hashbucket *head,
+ const struct net *net,
+ unsigned short port, int l3mdev,
+ const struct sock *sk);
+
+bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb,
+ const struct net *net, unsigned short port,
+ int l3mdev, const struct sock *sk);
+
+static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
+ const u32 bhash_size)
+{
+ return (lport + net_hash_mix(net)) & (bhash_size - 1);
+}
+
+static inline struct inet_bind_hashbucket *
+inet_bhashfn_portaddr(const struct inet_hashinfo *hinfo, const struct sock *sk,
+ const struct net *net, unsigned short port)
+{
+ u32 hash;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ hash = ipv6_portaddr_hash(net, &sk->sk_v6_rcv_saddr, port);
+ else
+#endif
+ hash = ipv4_portaddr_hash(net, sk->sk_rcv_saddr, port);
+ return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
+}
+
+struct inet_bind_hashbucket *
+inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port);
+
+/* This should be called whenever a socket's sk_rcv_saddr (ipv4) or
+ * sk_v6_rcv_saddr (ipv6) changes after it has been binded. The socket's
+ * rcv_saddr field should already have been updated when this is called.
+ */
+int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family);
+void inet_bhash2_reset_saddr(struct sock *sk);
+
+void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
+ struct inet_bind2_bucket *tb2, unsigned short port);
+
+/* Caller must disable local BH processing. */
+int __inet_inherit_port(const struct sock *sk, struct sock *child);
+
+void inet_put_port(struct sock *sk);
+
+void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
+ unsigned long numentries, int scale,
+ unsigned long low_limit,
+ unsigned long high_limit);
+int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
+
+bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
+bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
+ bool *found_dup_sk);
+int __inet_hash(struct sock *sk, struct sock *osk);
+int inet_hash(struct sock *sk);
+void inet_unhash(struct sock *sk);
+
+struct sock *__inet_lookup_listener(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb, int doff,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr,
+ const unsigned short hnum,
+ const int dif, const int sdif);
+
+static inline struct sock *inet_lookup_listener(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb, int doff,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, __be16 dport, int dif, int sdif)
+{
+ return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
+ daddr, ntohs(dport), dif, sdif);
+}
+
+/* Socket demux engine toys. */
+/* What happens here is ugly; there's a pair of adjacent fields in
+ struct inet_sock; __be16 dport followed by __u16 num. We want to
+ search by pair, so we combine the keys into a single 32bit value
+ and compare with 32bit value read from &...->dport. Let's at least
+ make sure that it's not mixed with anything else...
+ On 64bit targets we combine comparisons with pair of adjacent __be32
+ fields in the same way.
+*/
+#ifdef __BIG_ENDIAN
+#define INET_COMBINED_PORTS(__sport, __dport) \
+ ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
+#else /* __LITTLE_ENDIAN */
+#define INET_COMBINED_PORTS(__sport, __dport) \
+ ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
+#endif
+
+#ifdef __BIG_ENDIAN
+#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
+ const __addrpair __name = (__force __addrpair) ( \
+ (((__force __u64)(__be32)(__saddr)) << 32) | \
+ ((__force __u64)(__be32)(__daddr)))
+#else /* __LITTLE_ENDIAN */
+#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
+ const __addrpair __name = (__force __addrpair) ( \
+ (((__force __u64)(__be32)(__daddr)) << 32) | \
+ ((__force __u64)(__be32)(__saddr)))
+#endif /* __BIG_ENDIAN */
+
+static inline bool inet_match(struct net *net, const struct sock *sk,
+ const __addrpair cookie, const __portpair ports,
+ int dif, int sdif)
+{
+ if (!net_eq(sock_net(sk), net) ||
+ sk->sk_portpair != ports ||
+ sk->sk_addrpair != cookie)
+ return false;
+
+ /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */
+ return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif,
+ sdif);
+}
+
+/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
+ * not check it for lookups anymore, thanks Alexey. -DaveM
+ */
+struct sock *__inet_lookup_established(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const u16 hnum,
+ const int dif, const int sdif);
+
+static inline struct sock *
+ inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const __be16 dport,
+ const int dif)
+{
+ return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
+ ntohs(dport), dif, 0);
+}
+
+static inline struct sock *__inet_lookup(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb, int doff,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const __be16 dport,
+ const int dif, const int sdif,
+ bool *refcounted)
+{
+ u16 hnum = ntohs(dport);
+ struct sock *sk;
+
+ sk = __inet_lookup_established(net, hashinfo, saddr, sport,
+ daddr, hnum, dif, sdif);
+ *refcounted = true;
+ if (sk)
+ return sk;
+ *refcounted = false;
+ return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
+ sport, daddr, hnum, dif, sdif);
+}
+
+static inline struct sock *inet_lookup(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb, int doff,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const __be16 dport,
+ const int dif)
+{
+ struct sock *sk;
+ bool refcounted;
+
+ sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
+ dport, dif, 0, &refcounted);
+
+ if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
+ sk = NULL;
+ return sk;
+}
+
+static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb,
+ int doff,
+ const __be16 sport,
+ const __be16 dport,
+ const int sdif,
+ bool *refcounted)
+{
+ struct sock *sk = skb_steal_sock(skb, refcounted);
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (sk)
+ return sk;
+
+ return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
+ doff, iph->saddr, sport,
+ iph->daddr, dport, inet_iif(skb), sdif,
+ refcounted);
+}
+
+u32 inet6_ehashfn(const struct net *net,
+ const struct in6_addr *laddr, const u16 lport,
+ const struct in6_addr *faddr, const __be16 fport);
+
+static inline void sk_daddr_set(struct sock *sk, __be32 addr)
+{
+ sk->sk_daddr = addr; /* alias of inet_daddr */
+#if IS_ENABLED(CONFIG_IPV6)
+ ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
+#endif
+}
+
+static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
+{
+ sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
+#if IS_ENABLED(CONFIG_IPV6)
+ ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
+#endif
+}
+
+int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+ struct sock *sk, u64 port_offset,
+ int (*check_established)(struct inet_timewait_death_row *,
+ struct sock *, __u16,
+ struct inet_timewait_sock **));
+
+int inet_hash_connect(struct inet_timewait_death_row *death_row,
+ struct sock *sk);
+#endif /* _INET_HASHTABLES_H */
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
new file mode 100644
index 000000000..c98890e21
--- /dev/null
+++ b/include/net/inet_sock.h
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for inet_sock
+ *
+ * Authors: Many, reorganised here by
+ * Arnaldo Carvalho de Melo <acme@mandriva.com>
+ */
+#ifndef _INET_SOCK_H
+#define _INET_SOCK_H
+
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/jhash.h>
+#include <linux/netdevice.h>
+
+#include <net/flow.h>
+#include <net/sock.h>
+#include <net/request_sock.h>
+#include <net/netns/hash.h>
+#include <net/tcp_states.h>
+#include <net/l3mdev.h>
+
+/** struct ip_options - IP Options
+ *
+ * @faddr - Saved first hop address
+ * @nexthop - Saved nexthop address in LSRR and SSRR
+ * @is_strictroute - Strict source route
+ * @srr_is_hit - Packet destination addr was our one
+ * @is_changed - IP checksum more not valid
+ * @rr_needaddr - Need to record addr of outgoing dev
+ * @ts_needtime - Need to record timestamp
+ * @ts_needaddr - Need to record addr of outgoing dev
+ */
+struct ip_options {
+ __be32 faddr;
+ __be32 nexthop;
+ unsigned char optlen;
+ unsigned char srr;
+ unsigned char rr;
+ unsigned char ts;
+ unsigned char is_strictroute:1,
+ srr_is_hit:1,
+ is_changed:1,
+ rr_needaddr:1,
+ ts_needtime:1,
+ ts_needaddr:1;
+ unsigned char router_alert;
+ unsigned char cipso;
+ unsigned char __pad2;
+ unsigned char __data[];
+};
+
+struct ip_options_rcu {
+ struct rcu_head rcu;
+ struct ip_options opt;
+};
+
+struct ip_options_data {
+ struct ip_options_rcu opt;
+ char data[40];
+};
+
+struct inet_request_sock {
+ struct request_sock req;
+#define ir_loc_addr req.__req_common.skc_rcv_saddr
+#define ir_rmt_addr req.__req_common.skc_daddr
+#define ir_num req.__req_common.skc_num
+#define ir_rmt_port req.__req_common.skc_dport
+#define ir_v6_rmt_addr req.__req_common.skc_v6_daddr
+#define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr
+#define ir_iif req.__req_common.skc_bound_dev_if
+#define ir_cookie req.__req_common.skc_cookie
+#define ireq_net req.__req_common.skc_net
+#define ireq_state req.__req_common.skc_state
+#define ireq_family req.__req_common.skc_family
+
+ u16 snd_wscale : 4,
+ rcv_wscale : 4,
+ tstamp_ok : 1,
+ sack_ok : 1,
+ wscale_ok : 1,
+ ecn_ok : 1,
+ acked : 1,
+ no_srccheck: 1,
+ smc_ok : 1;
+ u32 ir_mark;
+ union {
+ struct ip_options_rcu __rcu *ireq_opt;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct {
+ struct ipv6_txoptions *ipv6_opt;
+ struct sk_buff *pktopts;
+ };
+#endif
+ };
+};
+
+static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
+{
+ return (struct inet_request_sock *)sk;
+}
+
+static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
+{
+ u32 mark = READ_ONCE(sk->sk_mark);
+
+ if (!mark && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept))
+ return skb->mark;
+
+ return mark;
+}
+
+static inline int inet_request_bound_dev_if(const struct sock *sk,
+ struct sk_buff *skb)
+{
+ int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ struct net *net = sock_net(sk);
+
+ if (!bound_dev_if && READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept))
+ return l3mdev_master_ifindex_by_index(net, skb->skb_iif);
+#endif
+
+ return bound_dev_if;
+}
+
+static inline int inet_sk_bound_l3mdev(const struct sock *sk)
+{
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ struct net *net = sock_net(sk);
+
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept))
+ return l3mdev_master_ifindex_by_index(net,
+ sk->sk_bound_dev_if);
+#endif
+
+ return 0;
+}
+
+static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if,
+ int dif, int sdif)
+{
+ if (!bound_dev_if)
+ return !sdif || l3mdev_accept;
+ return bound_dev_if == dif || bound_dev_if == sdif;
+}
+
+static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+ int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept),
+ bound_dev_if, dif, sdif);
+#else
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
+struct inet_cork {
+ unsigned int flags;
+ __be32 addr;
+ struct ip_options *opt;
+ unsigned int fragsize;
+ int length; /* Total length of all frames */
+ struct dst_entry *dst;
+ u8 tx_flags;
+ __u8 ttl;
+ __s16 tos;
+ char priority;
+ __u16 gso_size;
+ u64 transmit_time;
+ u32 mark;
+};
+
+struct inet_cork_full {
+ struct inet_cork base;
+ struct flowi fl;
+};
+
+struct ip_mc_socklist;
+struct ipv6_pinfo;
+struct rtable;
+
+/** struct inet_sock - representation of INET sockets
+ *
+ * @sk - ancestor class
+ * @pinet6 - pointer to IPv6 control block
+ * @inet_daddr - Foreign IPv4 addr
+ * @inet_rcv_saddr - Bound local IPv4 addr
+ * @inet_dport - Destination port
+ * @inet_num - Local port
+ * @inet_saddr - Sending source
+ * @uc_ttl - Unicast TTL
+ * @inet_sport - Source port
+ * @inet_id - ID counter for DF pkts
+ * @tos - TOS
+ * @mc_ttl - Multicasting TTL
+ * @is_icsk - is this an inet_connection_sock?
+ * @uc_index - Unicast outgoing device index
+ * @mc_index - Multicast device index
+ * @mc_list - Group array
+ * @cork - info to build ip hdr on each ip frag while socket is corked
+ */
+struct inet_sock {
+ /* sk and pinet6 has to be the first two members of inet_sock */
+ struct sock sk;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6_pinfo *pinet6;
+#endif
+ /* Socket demultiplex comparisons on incoming packets. */
+#define inet_daddr sk.__sk_common.skc_daddr
+#define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr
+#define inet_dport sk.__sk_common.skc_dport
+#define inet_num sk.__sk_common.skc_num
+
+ __be32 inet_saddr;
+ __s16 uc_ttl;
+ __u16 cmsg_flags;
+ struct ip_options_rcu __rcu *inet_opt;
+ atomic_t inet_id;
+ __be16 inet_sport;
+
+ __u8 tos;
+ __u8 min_ttl;
+ __u8 mc_ttl;
+ __u8 pmtudisc;
+ __u8 recverr:1,
+ is_icsk:1,
+ freebind:1,
+ hdrincl:1,
+ mc_loop:1,
+ transparent:1,
+ mc_all:1,
+ nodefrag:1;
+ __u8 bind_address_no_port:1,
+ recverr_rfc4884:1,
+ defer_connect:1; /* Indicates that fastopen_connect is set
+ * and cookie exists so we defer connect
+ * until first data frame is written
+ */
+ __u8 rcv_tos;
+ __u8 convert_csum;
+ int uc_index;
+ int mc_index;
+ __be32 mc_addr;
+ struct ip_mc_socklist __rcu *mc_list;
+ struct inet_cork_full cork;
+ struct {
+ __u16 lo;
+ __u16 hi;
+ } local_port_range;
+};
+
+#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
+#define IPCORK_ALLFRAG 2 /* always fragment (for ipv6 for now) */
+
+/* cmsg flags for inet */
+#define IP_CMSG_PKTINFO BIT(0)
+#define IP_CMSG_TTL BIT(1)
+#define IP_CMSG_TOS BIT(2)
+#define IP_CMSG_RECVOPTS BIT(3)
+#define IP_CMSG_RETOPTS BIT(4)
+#define IP_CMSG_PASSSEC BIT(5)
+#define IP_CMSG_ORIGDSTADDR BIT(6)
+#define IP_CMSG_CHECKSUM BIT(7)
+#define IP_CMSG_RECVFRAGSIZE BIT(8)
+
+/**
+ * sk_to_full_sk - Access to a full socket
+ * @sk: pointer to a socket
+ *
+ * SYNACK messages might be attached to request sockets.
+ * Some places want to reach the listener in this case.
+ */
+static inline struct sock *sk_to_full_sk(struct sock *sk)
+{
+#ifdef CONFIG_INET
+ if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
+ sk = inet_reqsk(sk)->rsk_listener;
+#endif
+ return sk;
+}
+
+/* sk_to_full_sk() variant with a const argument */
+static inline const struct sock *sk_const_to_full_sk(const struct sock *sk)
+{
+#ifdef CONFIG_INET
+ if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
+ sk = ((const struct request_sock *)sk)->rsk_listener;
+#endif
+ return sk;
+}
+
+static inline struct sock *skb_to_full_sk(const struct sk_buff *skb)
+{
+ return sk_to_full_sk(skb->sk);
+}
+
+static inline struct inet_sock *inet_sk(const struct sock *sk)
+{
+ return (struct inet_sock *)sk;
+}
+
+static inline void __inet_sk_copy_descendant(struct sock *sk_to,
+ const struct sock *sk_from,
+ const int ancestor_size)
+{
+ memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
+ sk_from->sk_prot->obj_size - ancestor_size);
+}
+
+int inet_sk_rebuild_header(struct sock *sk);
+
+/**
+ * inet_sk_state_load - read sk->sk_state for lockless contexts
+ * @sk: socket pointer
+ *
+ * Paired with inet_sk_state_store(). Used in places we don't hold socket lock:
+ * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
+ */
+static inline int inet_sk_state_load(const struct sock *sk)
+{
+ /* state change might impact lockless readers. */
+ return smp_load_acquire(&sk->sk_state);
+}
+
+/**
+ * inet_sk_state_store - update sk->sk_state
+ * @sk: socket pointer
+ * @newstate: new state
+ *
+ * Paired with inet_sk_state_load(). Should be used in contexts where
+ * state change might impact lockless readers.
+ */
+void inet_sk_state_store(struct sock *sk, int newstate);
+
+void inet_sk_set_state(struct sock *sk, int state);
+
+static inline unsigned int __inet_ehashfn(const __be32 laddr,
+ const __u16 lport,
+ const __be32 faddr,
+ const __be16 fport,
+ u32 initval)
+{
+ return jhash_3words((__force __u32) laddr,
+ (__force __u32) faddr,
+ ((__u32) lport) << 16 | (__force __u32)fport,
+ initval);
+}
+
+struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk_listener,
+ bool attach_listener);
+
+static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
+{
+ __u8 flags = 0;
+
+ if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl)
+ flags |= FLOWI_FLAG_ANYSRC;
+ return flags;
+}
+
+static inline void inet_inc_convert_csum(struct sock *sk)
+{
+ inet_sk(sk)->convert_csum++;
+}
+
+static inline void inet_dec_convert_csum(struct sock *sk)
+{
+ if (inet_sk(sk)->convert_csum > 0)
+ inet_sk(sk)->convert_csum--;
+}
+
+static inline bool inet_get_convert_csum(struct sock *sk)
+{
+ return !!inet_sk(sk)->convert_csum;
+}
+
+
+static inline bool inet_can_nonlocal_bind(struct net *net,
+ struct inet_sock *inet)
+{
+ return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) ||
+ inet->freebind || inet->transparent;
+}
+
+static inline bool inet_addr_valid_or_nonlocal(struct net *net,
+ struct inet_sock *inet,
+ __be32 addr,
+ int addr_type)
+{
+ return inet_can_nonlocal_bind(net, inet) ||
+ addr == htonl(INADDR_ANY) ||
+ addr_type == RTN_LOCAL ||
+ addr_type == RTN_MULTICAST ||
+ addr_type == RTN_BROADCAST;
+}
+
+#endif /* _INET_SOCK_H */
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
new file mode 100644
index 000000000..4a8e57840
--- /dev/null
+++ b/include/net/inet_timewait_sock.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for a generic INET TIMEWAIT sock
+ *
+ * From code originally in net/tcp.h
+ */
+#ifndef _INET_TIMEWAIT_SOCK_
+#define _INET_TIMEWAIT_SOCK_
+
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <net/inet_sock.h>
+#include <net/sock.h>
+#include <net/tcp_states.h>
+#include <net/timewait_sock.h>
+
+#include <linux/atomic.h>
+
+struct inet_bind_bucket;
+
+/*
+ * This is a TIME_WAIT sock. It works around the memory consumption
+ * problems of sockets in such a state on heavily loaded servers, but
+ * without violating the protocol specification.
+ */
+struct inet_timewait_sock {
+ /*
+ * Now struct sock also uses sock_common, so please just
+ * don't add nothing before this first member (__tw_common) --acme
+ */
+ struct sock_common __tw_common;
+#define tw_family __tw_common.skc_family
+#define tw_state __tw_common.skc_state
+#define tw_reuse __tw_common.skc_reuse
+#define tw_reuseport __tw_common.skc_reuseport
+#define tw_ipv6only __tw_common.skc_ipv6only
+#define tw_bound_dev_if __tw_common.skc_bound_dev_if
+#define tw_node __tw_common.skc_nulls_node
+#define tw_bind_node __tw_common.skc_bind_node
+#define tw_refcnt __tw_common.skc_refcnt
+#define tw_hash __tw_common.skc_hash
+#define tw_prot __tw_common.skc_prot
+#define tw_net __tw_common.skc_net
+#define tw_daddr __tw_common.skc_daddr
+#define tw_v6_daddr __tw_common.skc_v6_daddr
+#define tw_rcv_saddr __tw_common.skc_rcv_saddr
+#define tw_v6_rcv_saddr __tw_common.skc_v6_rcv_saddr
+#define tw_dport __tw_common.skc_dport
+#define tw_num __tw_common.skc_num
+#define tw_cookie __tw_common.skc_cookie
+#define tw_dr __tw_common.skc_tw_dr
+
+ __u32 tw_mark;
+ volatile unsigned char tw_substate;
+ unsigned char tw_rcv_wscale;
+
+ /* Socket demultiplex comparisons on incoming packets. */
+ /* these three are in inet_sock */
+ __be16 tw_sport;
+ /* And these are ours. */
+ unsigned int tw_transparent : 1,
+ tw_flowlabel : 20,
+ tw_pad : 3, /* 3 bits hole */
+ tw_tos : 8;
+ u32 tw_txhash;
+ u32 tw_priority;
+ struct timer_list tw_timer;
+ struct inet_bind_bucket *tw_tb;
+ struct inet_bind2_bucket *tw_tb2;
+ struct hlist_node tw_bind2_node;
+};
+#define tw_tclass tw_tos
+
+#define twsk_for_each_bound_bhash2(__tw, list) \
+ hlist_for_each_entry(__tw, list, tw_bind2_node)
+
+static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
+{
+ return (struct inet_timewait_sock *)sk;
+}
+
+void inet_twsk_free(struct inet_timewait_sock *tw);
+void inet_twsk_put(struct inet_timewait_sock *tw);
+
+void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+ struct inet_hashinfo *hashinfo);
+
+struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
+ struct inet_timewait_death_row *dr,
+ const int state);
+
+void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+ struct inet_hashinfo *hashinfo);
+
+void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
+ bool rearm);
+
+static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
+{
+ __inet_twsk_schedule(tw, timeo, false);
+}
+
+static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
+{
+ __inet_twsk_schedule(tw, timeo, true);
+}
+
+void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
+
+void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family);
+
+static inline
+struct net *twsk_net(const struct inet_timewait_sock *twsk)
+{
+ return read_pnet(&twsk->tw_net);
+}
+
+static inline
+void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net)
+{
+ write_pnet(&twsk->tw_net, net);
+}
+#endif /* _INET_TIMEWAIT_SOCK_ */
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
new file mode 100644
index 000000000..74ff68856
--- /dev/null
+++ b/include/net/inetpeer.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * INETPEER - A storage for permanent information about peers
+ *
+ * Authors: Andrey V. Savochkin <saw@msu.ru>
+ */
+
+#ifndef _NET_INETPEER_H
+#define _NET_INETPEER_H
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/rtnetlink.h>
+#include <net/ipv6.h>
+#include <linux/atomic.h>
+
+/* IPv4 address key for cache lookups */
+struct ipv4_addr_key {
+ __be32 addr;
+ int vif;
+};
+
+#define INETPEER_MAXKEYSZ (sizeof(struct in6_addr) / sizeof(u32))
+
+struct inetpeer_addr {
+ union {
+ struct ipv4_addr_key a4;
+ struct in6_addr a6;
+ u32 key[INETPEER_MAXKEYSZ];
+ };
+ __u16 family;
+};
+
+struct inet_peer {
+ struct rb_node rb_node;
+ struct inetpeer_addr daddr;
+
+ u32 metrics[RTAX_MAX];
+ u32 rate_tokens; /* rate limiting for ICMP */
+ u32 n_redirects;
+ unsigned long rate_last;
+ /*
+ * Once inet_peer is queued for deletion (refcnt == 0), following field
+ * is not available: rid
+ * We can share memory with rcu_head to help keep inet_peer small.
+ */
+ union {
+ struct {
+ atomic_t rid; /* Frag reception counter */
+ };
+ struct rcu_head rcu;
+ };
+
+ /* following fields might be frequently dirtied */
+ __u32 dtime; /* the time of last use of not referenced entries */
+ refcount_t refcnt;
+};
+
+struct inet_peer_base {
+ struct rb_root rb_root;
+ seqlock_t lock;
+ int total;
+};
+
+void inet_peer_base_init(struct inet_peer_base *);
+
+void inet_initpeers(void) __init;
+
+#define INETPEER_METRICS_NEW (~(u32) 0)
+
+static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
+{
+ iaddr->a4.addr = ip;
+ iaddr->a4.vif = 0;
+ iaddr->family = AF_INET;
+}
+
+static inline __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr)
+{
+ return iaddr->a4.addr;
+}
+
+static inline void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr,
+ struct in6_addr *in6)
+{
+ iaddr->a6 = *in6;
+ iaddr->family = AF_INET6;
+}
+
+static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
+{
+ return &iaddr->a6;
+}
+
+/* can be called with or without local BH being disabled */
+struct inet_peer *inet_getpeer(struct inet_peer_base *base,
+ const struct inetpeer_addr *daddr,
+ int create);
+
+static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
+ __be32 v4daddr,
+ int vif, int create)
+{
+ struct inetpeer_addr daddr;
+
+ daddr.a4.addr = v4daddr;
+ daddr.a4.vif = vif;
+ daddr.family = AF_INET;
+ return inet_getpeer(base, &daddr, create);
+}
+
+static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
+ const struct in6_addr *v6daddr,
+ int create)
+{
+ struct inetpeer_addr daddr;
+
+ daddr.a6 = *v6daddr;
+ daddr.family = AF_INET6;
+ return inet_getpeer(base, &daddr, create);
+}
+
+static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
+ const struct inetpeer_addr *b)
+{
+ int i, n;
+
+ if (a->family == AF_INET)
+ n = sizeof(a->a4) / sizeof(u32);
+ else
+ n = sizeof(a->a6) / sizeof(u32);
+
+ for (i = 0; i < n; i++) {
+ if (a->key[i] == b->key[i])
+ continue;
+ if (a->key[i] < b->key[i])
+ return -1;
+ return 1;
+ }
+
+ return 0;
+}
+
+/* can be called from BH context or outside */
+void inet_putpeer(struct inet_peer *p);
+bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
+
+void inetpeer_invalidate_tree(struct inet_peer_base *);
+
+#endif /* _NET_INETPEER_H */
diff --git a/include/net/ioam6.h b/include/net/ioam6.h
new file mode 100644
index 000000000..781d2d8b2
--- /dev/null
+++ b/include/net/ioam6.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM implementation
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+
+#ifndef _NET_IOAM6_H
+#define _NET_IOAM6_H
+
+#include <linux/net.h>
+#include <linux/ipv6.h>
+#include <linux/ioam6.h>
+#include <linux/rhashtable-types.h>
+
+struct ioam6_namespace {
+ struct rhash_head head;
+ struct rcu_head rcu;
+
+ struct ioam6_schema __rcu *schema;
+
+ __be16 id;
+ __be32 data;
+ __be64 data_wide;
+};
+
+struct ioam6_schema {
+ struct rhash_head head;
+ struct rcu_head rcu;
+
+ struct ioam6_namespace __rcu *ns;
+
+ u32 id;
+ int len;
+ __be32 hdr;
+
+ u8 data[];
+};
+
+struct ioam6_pernet_data {
+ struct mutex lock;
+ struct rhashtable namespaces;
+ struct rhashtable schemas;
+};
+
+static inline struct ioam6_pernet_data *ioam6_pernet(struct net *net)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ return net->ipv6.ioam6_data;
+#else
+ return NULL;
+#endif
+}
+
+struct ioam6_namespace *ioam6_namespace(struct net *net, __be16 id);
+void ioam6_fill_trace_data(struct sk_buff *skb,
+ struct ioam6_namespace *ns,
+ struct ioam6_trace_hdr *trace,
+ bool is_input);
+
+int ioam6_init(void);
+void ioam6_exit(void);
+
+int ioam6_iptunnel_init(void);
+void ioam6_iptunnel_exit(void);
+
+#endif /* _NET_IOAM6_H */
diff --git a/include/net/ip.h b/include/net/ip.h
new file mode 100644
index 000000000..c83c09c65
--- /dev/null
+++ b/include/net/ip.h
@@ -0,0 +1,806 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP module.
+ *
+ * Version: @(#)ip.h 1.0.2 05/07/93
+ *
+ * Authors: Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * Changes:
+ * Mike McLagan : Routing by source
+ */
+#ifndef _IP_H
+#define _IP_H
+
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+#include <linux/jhash.h>
+#include <linux/sockptr.h>
+#include <linux/static_key.h>
+
+#include <net/inet_sock.h>
+#include <net/route.h>
+#include <net/snmp.h>
+#include <net/flow.h>
+#include <net/flow_dissector.h>
+#include <net/netns/hash.h>
+#include <net/lwtunnel.h>
+
+#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
+#define IPV4_MIN_MTU 68 /* RFC 791 */
+
+extern unsigned int sysctl_fib_sync_mem;
+extern unsigned int sysctl_fib_sync_mem_min;
+extern unsigned int sysctl_fib_sync_mem_max;
+
+struct sock;
+
+struct inet_skb_parm {
+ int iif;
+ struct ip_options opt; /* Compiled IP options */
+ u16 flags;
+
+#define IPSKB_FORWARDED BIT(0)
+#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
+#define IPSKB_XFRM_TRANSFORMED BIT(2)
+#define IPSKB_FRAG_COMPLETE BIT(3)
+#define IPSKB_REROUTED BIT(4)
+#define IPSKB_DOREDIRECT BIT(5)
+#define IPSKB_FRAG_PMTU BIT(6)
+#define IPSKB_L3SLAVE BIT(7)
+#define IPSKB_NOPOLICY BIT(8)
+#define IPSKB_MULTIPATH BIT(9)
+
+ u16 frag_max_size;
+};
+
+static inline bool ipv4_l3mdev_skb(u16 flags)
+{
+ return !!(flags & IPSKB_L3SLAVE);
+}
+
+static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
+{
+ return ip_hdr(skb)->ihl * 4;
+}
+
+struct ipcm_cookie {
+ struct sockcm_cookie sockc;
+ __be32 addr;
+ int oif;
+ struct ip_options_rcu *opt;
+ __u8 protocol;
+ __u8 ttl;
+ __s16 tos;
+ char priority;
+ __u16 gso_size;
+};
+
+static inline void ipcm_init(struct ipcm_cookie *ipcm)
+{
+ *ipcm = (struct ipcm_cookie) { .tos = -1 };
+}
+
+static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
+ const struct inet_sock *inet)
+{
+ ipcm_init(ipcm);
+
+ ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark);
+ ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags);
+ ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
+ ipcm->addr = inet->inet_saddr;
+ ipcm->protocol = inet->inet_num;
+}
+
+#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
+#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
+
+/* return enslaved device index if relevant */
+static inline int inet_sdif(const struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
+ return IPCB(skb)->iif;
+#endif
+ return 0;
+}
+
+/* Special input handler for packets caught by router alert option.
+ They are selected only by protocol field, and then processed likely
+ local ones; but only if someone wants them! Otherwise, router
+ not running rsvpd will kill RSVP.
+
+ It is user level problem, what it will make with them.
+ I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
+ but receiver should be enough clever f.e. to forward mtrace requests,
+ sent to multicast group to reach destination designated router.
+ */
+
+struct ip_ra_chain {
+ struct ip_ra_chain __rcu *next;
+ struct sock *sk;
+ union {
+ void (*destructor)(struct sock *);
+ struct sock *saved_sk;
+ };
+ struct rcu_head rcu;
+};
+
+/* IP flags. */
+#define IP_CE 0x8000 /* Flag: "Congestion" */
+#define IP_DF 0x4000 /* Flag: "Don't Fragment" */
+#define IP_MF 0x2000 /* Flag: "More Fragments" */
+#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
+
+#define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
+
+struct msghdr;
+struct net_device;
+struct packet_type;
+struct rtable;
+struct sockaddr;
+
+int igmp_mc_init(void);
+
+/*
+ * Functions provided by ip.c
+ */
+
+int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
+ __be32 saddr, __be32 daddr,
+ struct ip_options_rcu *opt, u8 tos);
+int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+ struct net_device *orig_dev);
+void ip_list_rcv(struct list_head *head, struct packet_type *pt,
+ struct net_device *orig_dev);
+int ip_local_deliver(struct sk_buff *skb);
+void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
+int ip_mr_input(struct sk_buff *skb);
+int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct net *, struct sock *, struct sk_buff *));
+
+struct ip_fraglist_iter {
+ struct sk_buff *frag;
+ struct iphdr *iph;
+ int offset;
+ unsigned int hlen;
+};
+
+void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
+ unsigned int hlen, struct ip_fraglist_iter *iter);
+void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);
+
+static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
+{
+ struct sk_buff *skb = iter->frag;
+
+ iter->frag = skb->next;
+ skb_mark_not_on_list(skb);
+
+ return skb;
+}
+
+struct ip_frag_state {
+ bool DF;
+ unsigned int hlen;
+ unsigned int ll_rs;
+ unsigned int mtu;
+ unsigned int left;
+ int offset;
+ int ptr;
+ __be16 not_last_frag;
+};
+
+void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
+ unsigned int mtu, bool DF, struct ip_frag_state *state);
+struct sk_buff *ip_frag_next(struct sk_buff *skb,
+ struct ip_frag_state *state);
+
+void ip_send_check(struct iphdr *ip);
+int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+
+int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
+ __u8 tos);
+void ip_init(void);
+int ip_append_data(struct sock *sk, struct flowi4 *fl4,
+ int getfrag(void *from, char *to, int offset, int len,
+ int odd, struct sk_buff *skb),
+ void *from, int len, int protolen,
+ struct ipcm_cookie *ipc,
+ struct rtable **rt,
+ unsigned int flags);
+int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
+ struct sk_buff *skb);
+ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
+ int offset, size_t size, int flags);
+struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
+ struct sk_buff_head *queue,
+ struct inet_cork *cork);
+int ip_send_skb(struct net *net, struct sk_buff *skb);
+int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
+void ip_flush_pending_frames(struct sock *sk);
+struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
+ int getfrag(void *from, char *to, int offset,
+ int len, int odd, struct sk_buff *skb),
+ void *from, int length, int transhdrlen,
+ struct ipcm_cookie *ipc, struct rtable **rtp,
+ struct inet_cork *cork, unsigned int flags);
+
+int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
+
+static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
+{
+ return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
+}
+
+static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
+{
+ return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
+}
+
+static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
+{
+ return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
+}
+
+/* datagram.c */
+int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+
+void ip4_datagram_release_cb(struct sock *sk);
+
+struct ip_reply_arg {
+ struct kvec iov[1];
+ int flags;
+ __wsum csum;
+ int csumoffset; /* u16 offset of csum in iov[0].iov_base */
+ /* -1 if not needed */
+ int bound_dev_if;
+ u8 tos;
+ kuid_t uid;
+};
+
+#define IP_REPLY_ARG_NOSRCCHECK 1
+
+static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
+{
+ return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
+}
+
+void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
+ const struct ip_options *sopt,
+ __be32 daddr, __be32 saddr,
+ const struct ip_reply_arg *arg,
+ unsigned int len, u64 transmit_time, u32 txhash);
+
+#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
+#define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
+#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
+#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
+#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
+#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
+#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
+#define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
+#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
+#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
+
+static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
+{
+ return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
+}
+
+unsigned long snmp_fold_field(void __percpu *mib, int offt);
+#if BITS_PER_LONG==32
+u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
+ size_t syncp_offset);
+u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
+#else
+static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
+ size_t syncp_offset)
+{
+ return snmp_get_cpu_field(mib, cpu, offct);
+
+}
+
+static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
+{
+ return snmp_fold_field(mib, offt);
+}
+#endif
+
+#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
+{ \
+ int i, c; \
+ for_each_possible_cpu(c) { \
+ for (i = 0; stats_list[i].name; i++) \
+ buff64[i] += snmp_get_cpu_field64( \
+ mib_statistic, \
+ c, stats_list[i].entry, \
+ offset); \
+ } \
+}
+
+#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
+{ \
+ int i, c; \
+ for_each_possible_cpu(c) { \
+ for (i = 0; stats_list[i].name; i++) \
+ buff[i] += snmp_get_cpu_field( \
+ mib_statistic, \
+ c, stats_list[i].entry); \
+ } \
+}
+
+void inet_get_local_port_range(const struct net *net, int *low, int *high);
+void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high);
+
+#ifdef CONFIG_SYSCTL
+static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
+{
+ if (!net->ipv4.sysctl_local_reserved_ports)
+ return false;
+ return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
+}
+
+static inline bool sysctl_dev_name_is_allowed(const char *name)
+{
+ return strcmp(name, "default") != 0 && strcmp(name, "all") != 0;
+}
+
+static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
+{
+ return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
+}
+
+#else
+static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
+{
+ return false;
+}
+
+static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
+{
+ return port < PROT_SOCK;
+}
+#endif
+
+__be32 inet_current_timestamp(void);
+
+/* From inetpeer.c */
+extern int inet_peer_threshold;
+extern int inet_peer_minttl;
+extern int inet_peer_maxttl;
+
+void ipfrag_init(void);
+
+void ip_static_sysctl_init(void);
+
+#define IP4_REPLY_MARK(net, mark) \
+ (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0)
+
+static inline bool ip_is_fragment(const struct iphdr *iph)
+{
+ return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
+}
+
+#ifdef CONFIG_INET
+#include <net/dst.h>
+
+/* The function in 2.2 was invalid, producing wrong result for
+ * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
+static inline
+int ip_decrease_ttl(struct iphdr *iph)
+{
+ u32 check = (__force u32)iph->check;
+ check += (__force u32)htons(0x0100);
+ iph->check = (__force __sum16)(check + (check>=0xFFFF));
+ return --iph->ttl;
+}
+
+static inline int ip_mtu_locked(const struct dst_entry *dst)
+{
+ const struct rtable *rt = (const struct rtable *)dst;
+
+ return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
+}
+
+static inline
+int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
+{
+ u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
+
+ return pmtudisc == IP_PMTUDISC_DO ||
+ (pmtudisc == IP_PMTUDISC_WANT &&
+ !ip_mtu_locked(dst));
+}
+
+static inline bool ip_sk_accept_pmtu(const struct sock *sk)
+{
+ return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
+ inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
+}
+
+static inline bool ip_sk_use_pmtu(const struct sock *sk)
+{
+ return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
+}
+
+static inline bool ip_sk_ignore_df(const struct sock *sk)
+{
+ return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
+ inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
+}
+
+static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ bool forwarding)
+{
+ const struct rtable *rt = container_of(dst, struct rtable, dst);
+ struct net *net = dev_net(dst->dev);
+ unsigned int mtu;
+
+ if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
+ ip_mtu_locked(dst) ||
+ !forwarding) {
+ mtu = rt->rt_pmtu;
+ if (mtu && time_before(jiffies, rt->dst.expires))
+ goto out;
+ }
+
+ /* 'forwarding = true' case should always honour route mtu */
+ mtu = dst_metric_raw(dst, RTAX_MTU);
+ if (mtu)
+ goto out;
+
+ mtu = READ_ONCE(dst->dev->mtu);
+
+ if (unlikely(ip_mtu_locked(dst))) {
+ if (rt->rt_uses_gateway && mtu > 576)
+ mtu = 576;
+ }
+
+out:
+ mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
+
+ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
+}
+
+static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+ const struct sk_buff *skb)
+{
+ unsigned int mtu;
+
+ if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
+ bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
+
+ return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
+ }
+
+ mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
+ return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
+}
+
+struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
+ int fc_mx_len,
+ struct netlink_ext_ack *extack);
+static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
+{
+ if (fib_metrics != &dst_default_metrics &&
+ refcount_dec_and_test(&fib_metrics->refcnt))
+ kfree(fib_metrics);
+}
+
+/* ipv4 and ipv6 both use refcounted metrics if it is not the default */
+static inline
+void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
+{
+ dst_init_metrics(dst, fib_metrics->metrics, true);
+
+ if (fib_metrics != &dst_default_metrics) {
+ dst->_metrics |= DST_METRICS_REFCOUNTED;
+ refcount_inc(&fib_metrics->refcnt);
+ }
+}
+
+static inline
+void ip_dst_metrics_put(struct dst_entry *dst)
+{
+ struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
+
+ if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
+ kfree(p);
+}
+
+void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
+
+static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
+ struct sock *sk, int segs)
+{
+ struct iphdr *iph = ip_hdr(skb);
+
+ /* We had many attacks based on IPID, use the private
+ * generator as much as we can.
+ */
+ if (sk && inet_sk(sk)->inet_daddr) {
+ int val;
+
+ /* avoid atomic operations for TCP,
+ * as we hold socket lock at this point.
+ */
+ if (sk_is_tcp(sk)) {
+ sock_owned_by_me(sk);
+ val = atomic_read(&inet_sk(sk)->inet_id);
+ atomic_set(&inet_sk(sk)->inet_id, val + segs);
+ } else {
+ val = atomic_add_return(segs, &inet_sk(sk)->inet_id);
+ }
+ iph->id = htons(val);
+ return;
+ }
+ if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
+ iph->id = 0;
+ } else {
+ /* Unfortunately we need the big hammer to get a suitable IPID */
+ __ip_select_ident(net, iph, segs);
+ }
+}
+
+static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
+ struct sock *sk)
+{
+ ip_select_ident_segs(net, skb, sk, 1);
+}
+
+static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+ skb->len, proto, 0);
+}
+
+/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
+ * Equivalent to : flow->v4addrs.src = iph->saddr;
+ * flow->v4addrs.dst = iph->daddr;
+ */
+static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
+ const struct iphdr *iph)
+{
+ BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
+ offsetof(typeof(flow->addrs), v4addrs.src) +
+ sizeof(flow->addrs.v4addrs.src));
+ memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs));
+ flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+}
+
+/*
+ * Map a multicast IP onto multicast MAC for type ethernet.
+ */
+
+static inline void ip_eth_mc_map(__be32 naddr, char *buf)
+{
+ __u32 addr=ntohl(naddr);
+ buf[0]=0x01;
+ buf[1]=0x00;
+ buf[2]=0x5e;
+ buf[5]=addr&0xFF;
+ addr>>=8;
+ buf[4]=addr&0xFF;
+ addr>>=8;
+ buf[3]=addr&0x7F;
+}
+
+/*
+ * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
+ * Leave P_Key as 0 to be filled in by driver.
+ */
+
+static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
+{
+ __u32 addr;
+ unsigned char scope = broadcast[5] & 0xF;
+
+ buf[0] = 0; /* Reserved */
+ buf[1] = 0xff; /* Multicast QPN */
+ buf[2] = 0xff;
+ buf[3] = 0xff;
+ addr = ntohl(naddr);
+ buf[4] = 0xff;
+ buf[5] = 0x10 | scope; /* scope from broadcast address */
+ buf[6] = 0x40; /* IPv4 signature */
+ buf[7] = 0x1b;
+ buf[8] = broadcast[8]; /* P_Key */
+ buf[9] = broadcast[9];
+ buf[10] = 0;
+ buf[11] = 0;
+ buf[12] = 0;
+ buf[13] = 0;
+ buf[14] = 0;
+ buf[15] = 0;
+ buf[19] = addr & 0xff;
+ addr >>= 8;
+ buf[18] = addr & 0xff;
+ addr >>= 8;
+ buf[17] = addr & 0xff;
+ addr >>= 8;
+ buf[16] = addr & 0x0f;
+}
+
+static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
+{
+ if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
+ memcpy(buf, broadcast, 4);
+ else
+ memcpy(buf, &naddr, sizeof(naddr));
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+#include <linux/ipv6.h>
+#endif
+
+static __inline__ void inet_reset_saddr(struct sock *sk)
+{
+ inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == PF_INET6) {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ memset(&np->saddr, 0, sizeof(np->saddr));
+ memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
+ }
+#endif
+}
+
+#endif
+
+static inline unsigned int ipv4_addr_hash(__be32 ip)
+{
+ return (__force unsigned int) ip;
+}
+
+static inline u32 ipv4_portaddr_hash(const struct net *net,
+ __be32 saddr,
+ unsigned int port)
+{
+ return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
+}
+
+bool ip_call_ra_chain(struct sk_buff *skb);
+
+/*
+ * Functions provided by ip_fragment.c
+ */
+
+enum ip_defrag_users {
+ IP_DEFRAG_LOCAL_DELIVER,
+ IP_DEFRAG_CALL_RA_CHAIN,
+ IP_DEFRAG_CONNTRACK_IN,
+ __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
+ IP_DEFRAG_CONNTRACK_OUT,
+ __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
+ IP_DEFRAG_CONNTRACK_BRIDGE_IN,
+ __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
+ IP_DEFRAG_VS_IN,
+ IP_DEFRAG_VS_OUT,
+ IP_DEFRAG_VS_FWD,
+ IP_DEFRAG_AF_PACKET,
+ IP_DEFRAG_MACVLAN,
+};
+
+/* Return true if the value of 'user' is between 'lower_bond'
+ * and 'upper_bond' inclusively.
+ */
+static inline bool ip_defrag_user_in_between(u32 user,
+ enum ip_defrag_users lower_bond,
+ enum ip_defrag_users upper_bond)
+{
+ return user >= lower_bond && user <= upper_bond;
+}
+
+int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
+#ifdef CONFIG_INET
+struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
+#else
+static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
+{
+ return skb;
+}
+#endif
+
+/*
+ * Functions provided by ip_forward.c
+ */
+
+int ip_forward(struct sk_buff *skb);
+
+/*
+ * Functions provided by ip_options.c
+ */
+
+void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
+ __be32 daddr, struct rtable *rt);
+
+int __ip_options_echo(struct net *net, struct ip_options *dopt,
+ struct sk_buff *skb, const struct ip_options *sopt);
+static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
+ struct sk_buff *skb)
+{
+ return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
+}
+
+void ip_options_fragment(struct sk_buff *skb);
+int __ip_options_compile(struct net *net, struct ip_options *opt,
+ struct sk_buff *skb, __be32 *info);
+int ip_options_compile(struct net *net, struct ip_options *opt,
+ struct sk_buff *skb);
+int ip_options_get(struct net *net, struct ip_options_rcu **optp,
+ sockptr_t data, int optlen);
+void ip_options_undo(struct ip_options *opt);
+void ip_forward_options(struct sk_buff *skb);
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
+
+/*
+ * Functions provided by ip_sockglue.c
+ */
+
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
+void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb, int tlen, int offset);
+int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
+ struct ipcm_cookie *ipc, bool allow_ipv6);
+DECLARE_STATIC_KEY_FALSE(ip4_min_ttl);
+int do_ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+ unsigned int optlen);
+int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+ unsigned int optlen);
+int do_ip_getsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
+int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
+ int __user *optlen);
+int ip_ra_control(struct sock *sk, unsigned char on,
+ void (*destructor)(struct sock *));
+
+int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
+void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+ u32 info, u8 *payload);
+void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
+ u32 info);
+
+static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
+{
+ ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
+}
+
+bool icmp_global_allow(void);
+extern int sysctl_icmp_msgs_per_sec;
+extern int sysctl_icmp_msgs_burst;
+
+#ifdef CONFIG_PROC_FS
+int ip_misc_proc_init(void);
+#endif
+
+int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
+ struct netlink_ext_ack *extack);
+
+static inline bool inetdev_valid_mtu(unsigned int mtu)
+{
+ return likely(mtu >= IPV4_MIN_MTU);
+}
+
+void ip_sock_set_freebind(struct sock *sk);
+int ip_sock_set_mtu_discover(struct sock *sk, int val);
+void ip_sock_set_pktinfo(struct sock *sk);
+void ip_sock_set_recverr(struct sock *sk);
+void ip_sock_set_tos(struct sock *sk, int val);
+void __ip_sock_set_tos(struct sock *sk, int val);
+
+#endif /* _IP_H */
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
new file mode 100644
index 000000000..c8a96b888
--- /dev/null
+++ b/include/net/ip6_checksum.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Checksumming functions for IPv6
+ *
+ * Authors: Jorge Cwik, <jorge@laser.satlink.net>
+ * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ * Borrows very liberally from tcp.c and ip.c, see those
+ * files for more names.
+ */
+
+/*
+ * Fixes:
+ *
+ * Ralf Baechle : generic ipv6 checksum
+ * <ralf@waldorf-gmbh.de>
+ */
+
+#ifndef _CHECKSUM_IPV6_H
+#define _CHECKSUM_IPV6_H
+
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <net/ip.h>
+#include <asm/checksum.h>
+#include <linux/in6.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+
+#ifndef _HAVE_ARCH_IPV6_CSUM
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __wsum csum);
+#endif
+
+static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ return ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ skb->len, proto, 0));
+}
+
+static __inline__ __sum16 tcp_v6_check(int len,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __wsum base)
+{
+ return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
+}
+
+static inline void __tcp_v6_send_check(struct sk_buff *skb,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr)
+{
+ struct tcphdr *th = tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+}
+
+static inline void tcp_v6_gso_csum_prep(struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct tcphdr *th = tcp_hdr(skb);
+
+ ipv6h->payload_len = 0;
+ th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
+}
+
+static inline __sum16 udp_v6_check(int len,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __wsum base)
+{
+ return csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, base);
+}
+
+void udp6_set_csum(bool nocheck, struct sk_buff *skb,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr, int len);
+
+int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto);
+#endif
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
new file mode 100644
index 000000000..fa4e6af38
--- /dev/null
+++ b/include/net/ip6_fib.h
@@ -0,0 +1,654 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Linux INET6 implementation
+ *
+ * Authors:
+ * Pedro Roque <roque@di.fc.ul.pt>
+ */
+
+#ifndef _IP6_FIB_H
+#define _IP6_FIB_H
+
+#include <linux/ipv6_route.h>
+#include <linux/rtnetlink.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <net/dst.h>
+#include <net/flow.h>
+#include <net/ip_fib.h>
+#include <net/netlink.h>
+#include <net/inetpeer.h>
+#include <net/fib_notifier.h>
+#include <linux/indirect_call_wrapper.h>
+#include <uapi/linux/bpf.h>
+
+#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+#define FIB6_TABLE_HASHSZ 256
+#else
+#define FIB6_TABLE_HASHSZ 1
+#endif
+
+#define RT6_DEBUG 2
+
+#if RT6_DEBUG >= 3
+#define RT6_TRACE(x...) pr_debug(x)
+#else
+#define RT6_TRACE(x...) do { ; } while (0)
+#endif
+
+struct rt6_info;
+struct fib6_info;
+
+struct fib6_config {
+ u32 fc_table;
+ u32 fc_metric;
+ int fc_dst_len;
+ int fc_src_len;
+ int fc_ifindex;
+ u32 fc_flags;
+ u32 fc_protocol;
+ u16 fc_type; /* only 8 bits are used */
+ u16 fc_delete_all_nh : 1,
+ fc_ignore_dev_down:1,
+ __unused : 14;
+ u32 fc_nh_id;
+
+ struct in6_addr fc_dst;
+ struct in6_addr fc_src;
+ struct in6_addr fc_prefsrc;
+ struct in6_addr fc_gateway;
+
+ unsigned long fc_expires;
+ struct nlattr *fc_mx;
+ int fc_mx_len;
+ int fc_mp_len;
+ struct nlattr *fc_mp;
+
+ struct nl_info fc_nlinfo;
+ struct nlattr *fc_encap;
+ u16 fc_encap_type;
+ bool fc_is_fdb;
+};
+
+struct fib6_node {
+ struct fib6_node __rcu *parent;
+ struct fib6_node __rcu *left;
+ struct fib6_node __rcu *right;
+#ifdef CONFIG_IPV6_SUBTREES
+ struct fib6_node __rcu *subtree;
+#endif
+ struct fib6_info __rcu *leaf;
+
+ __u16 fn_bit; /* bit key */
+ __u16 fn_flags;
+ int fn_sernum;
+ struct fib6_info __rcu *rr_ptr;
+ struct rcu_head rcu;
+};
+
+struct fib6_gc_args {
+ int timeout;
+ int more;
+};
+
+#ifndef CONFIG_IPV6_SUBTREES
+#define FIB6_SUBTREE(fn) NULL
+
+static inline bool fib6_routes_require_src(const struct net *net)
+{
+ return false;
+}
+
+static inline void fib6_routes_require_src_inc(struct net *net) {}
+static inline void fib6_routes_require_src_dec(struct net *net) {}
+
+#else
+
+static inline bool fib6_routes_require_src(const struct net *net)
+{
+ return net->ipv6.fib6_routes_require_src > 0;
+}
+
+static inline void fib6_routes_require_src_inc(struct net *net)
+{
+ net->ipv6.fib6_routes_require_src++;
+}
+
+static inline void fib6_routes_require_src_dec(struct net *net)
+{
+ net->ipv6.fib6_routes_require_src--;
+}
+
+#define FIB6_SUBTREE(fn) (rcu_dereference_protected((fn)->subtree, 1))
+#endif
+
+/*
+ * routing information
+ *
+ */
+
+struct rt6key {
+ struct in6_addr addr;
+ int plen;
+};
+
+struct fib6_table;
+
+struct rt6_exception_bucket {
+ struct hlist_head chain;
+ int depth;
+};
+
+struct rt6_exception {
+ struct hlist_node hlist;
+ struct rt6_info *rt6i;
+ unsigned long stamp;
+ struct rcu_head rcu;
+};
+
+#define FIB6_EXCEPTION_BUCKET_SIZE_SHIFT 10
+#define FIB6_EXCEPTION_BUCKET_SIZE (1 << FIB6_EXCEPTION_BUCKET_SIZE_SHIFT)
+#define FIB6_MAX_DEPTH 5
+
+struct fib6_nh {
+ struct fib_nh_common nh_common;
+
+#ifdef CONFIG_IPV6_ROUTER_PREF
+ unsigned long last_probe;
+#endif
+
+ struct rt6_info * __percpu *rt6i_pcpu;
+ struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
+};
+
+struct fib6_info {
+ struct fib6_table *fib6_table;
+ struct fib6_info __rcu *fib6_next;
+ struct fib6_node __rcu *fib6_node;
+
+ /* Multipath routes:
+ * siblings is a list of fib6_info that have the same metric/weight,
+ * destination, but not the same gateway. nsiblings is just a cache
+ * to speed up lookup.
+ */
+ union {
+ struct list_head fib6_siblings;
+ struct list_head nh_list;
+ };
+ unsigned int fib6_nsiblings;
+
+ refcount_t fib6_ref;
+ unsigned long expires;
+ struct dst_metrics *fib6_metrics;
+#define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1]
+
+ struct rt6key fib6_dst;
+ u32 fib6_flags;
+ struct rt6key fib6_src;
+ struct rt6key fib6_prefsrc;
+
+ u32 fib6_metric;
+ u8 fib6_protocol;
+ u8 fib6_type;
+
+ u8 offload;
+ u8 trap;
+ u8 offload_failed;
+
+ u8 should_flush:1,
+ dst_nocount:1,
+ dst_nopolicy:1,
+ fib6_destroying:1,
+ unused:4;
+
+ struct rcu_head rcu;
+ struct nexthop *nh;
+ struct fib6_nh fib6_nh[];
+};
+
+struct rt6_info {
+ struct dst_entry dst;
+ struct fib6_info __rcu *from;
+ int sernum;
+
+ struct rt6key rt6i_dst;
+ struct rt6key rt6i_src;
+ struct in6_addr rt6i_gateway;
+ struct inet6_dev *rt6i_idev;
+ u32 rt6i_flags;
+
+ struct list_head rt6i_uncached;
+ struct uncached_list *rt6i_uncached_list;
+
+ /* more non-fragment space at head required */
+ unsigned short rt6i_nfheader_len;
+};
+
+struct fib6_result {
+ struct fib6_nh *nh;
+ struct fib6_info *f6i;
+ u32 fib6_flags;
+ u8 fib6_type;
+ struct rt6_info *rt6;
+};
+
+#define for_each_fib6_node_rt_rcu(fn) \
+ for (rt = rcu_dereference((fn)->leaf); rt; \
+ rt = rcu_dereference(rt->fib6_next))
+
+#define for_each_fib6_walker_rt(w) \
+ for (rt = (w)->leaf; rt; \
+ rt = rcu_dereference_protected(rt->fib6_next, 1))
+
+static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
+{
+ return ((struct rt6_info *)dst)->rt6i_idev;
+}
+
+static inline bool fib6_requires_src(const struct fib6_info *rt)
+{
+ return rt->fib6_src.plen > 0;
+}
+
+static inline void fib6_clean_expires(struct fib6_info *f6i)
+{
+ f6i->fib6_flags &= ~RTF_EXPIRES;
+ f6i->expires = 0;
+}
+
+static inline void fib6_set_expires(struct fib6_info *f6i,
+ unsigned long expires)
+{
+ f6i->expires = expires;
+ f6i->fib6_flags |= RTF_EXPIRES;
+}
+
+static inline bool fib6_check_expired(const struct fib6_info *f6i)
+{
+ if (f6i->fib6_flags & RTF_EXPIRES)
+ return time_after(jiffies, f6i->expires);
+ return false;
+}
+
+/* Function to safely get fn->fn_sernum for passed in rt
+ * and store result in passed in cookie.
+ * Return true if we can get cookie safely
+ * Return false if not
+ */
+static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i,
+ u32 *cookie)
+{
+ struct fib6_node *fn;
+ bool status = false;
+
+ fn = rcu_dereference(f6i->fib6_node);
+
+ if (fn) {
+ *cookie = READ_ONCE(fn->fn_sernum);
+ /* pairs with smp_wmb() in __fib6_update_sernum_upto_root() */
+ smp_rmb();
+ status = true;
+ }
+
+ return status;
+}
+
+static inline u32 rt6_get_cookie(const struct rt6_info *rt)
+{
+ struct fib6_info *from;
+ u32 cookie = 0;
+
+ if (rt->sernum)
+ return rt->sernum;
+
+ rcu_read_lock();
+
+ from = rcu_dereference(rt->from);
+ if (from)
+ fib6_get_cookie_safe(from, &cookie);
+
+ rcu_read_unlock();
+
+ return cookie;
+}
+
+static inline void ip6_rt_put(struct rt6_info *rt)
+{
+ /* dst_release() accepts a NULL parameter.
+ * We rely on dst being first structure in struct rt6_info
+ */
+ BUILD_BUG_ON(offsetof(struct rt6_info, dst) != 0);
+ dst_release(&rt->dst);
+}
+
+struct fib6_info *fib6_info_alloc(gfp_t gfp_flags, bool with_fib6_nh);
+void fib6_info_destroy_rcu(struct rcu_head *head);
+
+static inline void fib6_info_hold(struct fib6_info *f6i)
+{
+ refcount_inc(&f6i->fib6_ref);
+}
+
+static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
+{
+ return refcount_inc_not_zero(&f6i->fib6_ref);
+}
+
+static inline void fib6_info_release(struct fib6_info *f6i)
+{
+ if (f6i && refcount_dec_and_test(&f6i->fib6_ref))
+ call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
+}
+
+enum fib6_walk_state {
+#ifdef CONFIG_IPV6_SUBTREES
+ FWS_S,
+#endif
+ FWS_L,
+ FWS_R,
+ FWS_C,
+ FWS_U
+};
+
+struct fib6_walker {
+ struct list_head lh;
+ struct fib6_node *root, *node;
+ struct fib6_info *leaf;
+ enum fib6_walk_state state;
+ unsigned int skip;
+ unsigned int count;
+ unsigned int skip_in_node;
+ int (*func)(struct fib6_walker *);
+ void *args;
+};
+
+struct rt6_statistics {
+ __u32 fib_nodes; /* all fib6 nodes */
+ __u32 fib_route_nodes; /* intermediate nodes */
+ __u32 fib_rt_entries; /* rt entries in fib table */
+ __u32 fib_rt_cache; /* cached rt entries in exception table */
+ __u32 fib_discarded_routes; /* total number of routes delete */
+
+ /* The following stat is not protected by any lock */
+ atomic_t fib_rt_alloc; /* total number of routes alloced */
+};
+
+#define RTN_TL_ROOT 0x0001
+#define RTN_ROOT 0x0002 /* tree root node */
+#define RTN_RTINFO 0x0004 /* node with valid routing info */
+
+/*
+ * priority levels (or metrics)
+ *
+ */
+
+
+struct fib6_table {
+ struct hlist_node tb6_hlist;
+ u32 tb6_id;
+ spinlock_t tb6_lock;
+ struct fib6_node tb6_root;
+ struct inet_peer_base tb6_peers;
+ unsigned int flags;
+ unsigned int fib_seq;
+#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
+};
+
+#define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC
+#define RT6_TABLE_MAIN RT_TABLE_MAIN
+#define RT6_TABLE_DFLT RT6_TABLE_MAIN
+#define RT6_TABLE_INFO RT6_TABLE_MAIN
+#define RT6_TABLE_PREFIX RT6_TABLE_MAIN
+
+#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+#define FIB6_TABLE_MIN 1
+#define FIB6_TABLE_MAX RT_TABLE_MAX
+#define RT6_TABLE_LOCAL RT_TABLE_LOCAL
+#else
+#define FIB6_TABLE_MIN RT_TABLE_MAIN
+#define FIB6_TABLE_MAX FIB6_TABLE_MIN
+#define RT6_TABLE_LOCAL RT6_TABLE_MAIN
+#endif
+
+typedef struct rt6_info *(*pol_lookup_t)(struct net *,
+ struct fib6_table *,
+ struct flowi6 *,
+ const struct sk_buff *, int);
+
+struct fib6_entry_notifier_info {
+ struct fib_notifier_info info; /* must be first */
+ struct fib6_info *rt;
+ unsigned int nsiblings;
+};
+
+/*
+ * exported functions
+ */
+
+struct fib6_table *fib6_get_table(struct net *net, u32 id);
+struct fib6_table *fib6_new_table(struct net *net, u32 id);
+struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags, pol_lookup_t lookup);
+
+/* called with rcu lock held; can return error pointer
+ * caller needs to select path
+ */
+int fib6_lookup(struct net *net, int oif, struct flowi6 *fl6,
+ struct fib6_result *res, int flags);
+
+/* called with rcu lock held; caller needs to select path */
+int fib6_table_lookup(struct net *net, struct fib6_table *table,
+ int oif, struct flowi6 *fl6, struct fib6_result *res,
+ int strict);
+
+void fib6_select_path(const struct net *net, struct fib6_result *res,
+ struct flowi6 *fl6, int oif, bool have_oif_match,
+ const struct sk_buff *skb, int strict);
+struct fib6_node *fib6_node_lookup(struct fib6_node *root,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr);
+
+struct fib6_node *fib6_locate(struct fib6_node *root,
+ const struct in6_addr *daddr, int dst_len,
+ const struct in6_addr *saddr, int src_len,
+ bool exact_match);
+
+void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *arg),
+ void *arg);
+void fib6_clean_all_skip_notify(struct net *net,
+ int (*func)(struct fib6_info *, void *arg),
+ void *arg);
+
+int fib6_add(struct fib6_node *root, struct fib6_info *rt,
+ struct nl_info *info, struct netlink_ext_ack *extack);
+int fib6_del(struct fib6_info *rt, struct nl_info *info);
+
+static inline
+void rt6_get_prefsrc(const struct rt6_info *rt, struct in6_addr *addr)
+{
+ const struct fib6_info *from;
+
+ rcu_read_lock();
+
+ from = rcu_dereference(rt->from);
+ if (from)
+ *addr = from->fib6_prefsrc.addr;
+ else
+ *addr = in6addr_any;
+
+ rcu_read_unlock();
+}
+
+int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+ struct fib6_config *cfg, gfp_t gfp_flags,
+ struct netlink_ext_ack *extack);
+void fib6_nh_release(struct fib6_nh *fib6_nh);
+void fib6_nh_release_dsts(struct fib6_nh *fib6_nh);
+
+int call_fib6_entry_notifiers(struct net *net,
+ enum fib_event_type event_type,
+ struct fib6_info *rt,
+ struct netlink_ext_ack *extack);
+int call_fib6_multipath_entry_notifiers(struct net *net,
+ enum fib_event_type event_type,
+ struct fib6_info *rt,
+ unsigned int nsiblings,
+ struct netlink_ext_ack *extack);
+int call_fib6_entry_notifiers_replace(struct net *net, struct fib6_info *rt);
+void fib6_rt_update(struct net *net, struct fib6_info *rt,
+ struct nl_info *info);
+void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
+ unsigned int flags);
+
+void fib6_run_gc(unsigned long expires, struct net *net, bool force);
+
+void fib6_gc_cleanup(void);
+
+int fib6_init(void);
+
+struct ipv6_route_iter {
+ struct seq_net_private p;
+ struct fib6_walker w;
+ loff_t skip;
+ struct fib6_table *tbl;
+ int sernum;
+};
+
+extern const struct seq_operations ipv6_route_seq_ops;
+
+int call_fib6_notifier(struct notifier_block *nb,
+ enum fib_event_type event_type,
+ struct fib_notifier_info *info);
+int call_fib6_notifiers(struct net *net, enum fib_event_type event_type,
+ struct fib_notifier_info *info);
+
+int __net_init fib6_notifier_init(struct net *net);
+void __net_exit fib6_notifier_exit(struct net *net);
+
+unsigned int fib6_tables_seq_read(struct net *net);
+int fib6_tables_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
+
+void fib6_update_sernum(struct net *net, struct fib6_info *rt);
+void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt);
+void fib6_update_sernum_stub(struct net *net, struct fib6_info *f6i);
+
+void fib6_metric_set(struct fib6_info *f6i, int metric, u32 val);
+static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric)
+{
+ return !!(f6i->fib6_metrics->metrics[RTAX_LOCK - 1] & (1 << metric));
+}
+void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
+ bool offload, bool trap, bool offload_failed);
+
+#if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL)
+struct bpf_iter__ipv6_route {
+ __bpf_md_ptr(struct bpf_iter_meta *, meta);
+ __bpf_md_ptr(struct fib6_info *, rt);
+};
+#endif
+
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_output(struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags));
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_input(struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags));
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *__ip6_route_redirect(struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags));
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_lookup(struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags));
+static inline struct rt6_info *pol_lookup_func(pol_lookup_t lookup,
+ struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags)
+{
+ return INDIRECT_CALL_4(lookup,
+ ip6_pol_route_output,
+ ip6_pol_route_input,
+ ip6_pol_route_lookup,
+ __ip6_route_redirect,
+ net, table, fl6, skb, flags);
+}
+
+#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+static inline bool fib6_has_custom_rules(const struct net *net)
+{
+ return net->ipv6.fib6_has_custom_rules;
+}
+
+int fib6_rules_init(void);
+void fib6_rules_cleanup(void);
+bool fib6_rule_default(const struct fib_rule *rule);
+int fib6_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
+unsigned int fib6_rules_seq_read(struct net *net);
+
+static inline bool fib6_rules_early_flow_dissect(struct net *net,
+ struct sk_buff *skb,
+ struct flowi6 *fl6,
+ struct flow_keys *flkeys)
+{
+ unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
+
+ if (!net->ipv6.fib6_rules_require_fldissect)
+ return false;
+
+ memset(flkeys, 0, sizeof(*flkeys));
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ flkeys, NULL, 0, 0, 0, flag);
+
+ fl6->fl6_sport = flkeys->ports.src;
+ fl6->fl6_dport = flkeys->ports.dst;
+ fl6->flowi6_proto = flkeys->basic.ip_proto;
+
+ return true;
+}
+#else
+static inline bool fib6_has_custom_rules(const struct net *net)
+{
+ return false;
+}
+static inline int fib6_rules_init(void)
+{
+ return 0;
+}
+static inline void fib6_rules_cleanup(void)
+{
+ return ;
+}
+static inline bool fib6_rule_default(const struct fib_rule *rule)
+{
+ return true;
+}
+static inline int fib6_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+static inline unsigned int fib6_rules_seq_read(struct net *net)
+{
+ return 0;
+}
+static inline bool fib6_rules_early_flow_dissect(struct net *net,
+ struct sk_buff *skb,
+ struct flowi6 *fl6,
+ struct flow_keys *flkeys)
+{
+ return false;
+}
+#endif
+#endif
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
new file mode 100644
index 000000000..035d61d50
--- /dev/null
+++ b/include/net/ip6_route.h
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_IP6_ROUTE_H
+#define _NET_IP6_ROUTE_H
+
+#include <net/addrconf.h>
+#include <net/flow.h>
+#include <net/ip6_fib.h>
+#include <net/sock.h>
+#include <net/lwtunnel.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/route.h>
+#include <net/nexthop.h>
+
+struct route_info {
+ __u8 type;
+ __u8 length;
+ __u8 prefix_len;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ __u8 reserved_h:3,
+ route_pref:2,
+ reserved_l:3;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 reserved_l:3,
+ route_pref:2,
+ reserved_h:3;
+#endif
+ __be32 lifetime;
+ __u8 prefix[]; /* 0,8 or 16 */
+};
+
+#define RT6_LOOKUP_F_IFACE 0x00000001
+#define RT6_LOOKUP_F_REACHABLE 0x00000002
+#define RT6_LOOKUP_F_HAS_SADDR 0x00000004
+#define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008
+#define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
+#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
+#define RT6_LOOKUP_F_IGNORE_LINKSTATE 0x00000040
+#define RT6_LOOKUP_F_DST_NOREF 0x00000080
+
+/* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
+ * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
+ */
+#define IP6_MAX_MTU (0xFFFF + sizeof(struct ipv6hdr))
+
+/*
+ * rt6_srcprefs2flags() and rt6_flags2srcprefs() translate
+ * between IPV6_ADDR_PREFERENCES socket option values
+ * IPV6_PREFER_SRC_TMP = 0x1
+ * IPV6_PREFER_SRC_PUBLIC = 0x2
+ * IPV6_PREFER_SRC_COA = 0x4
+ * and above RT6_LOOKUP_F_SRCPREF_xxx flags.
+ */
+static inline int rt6_srcprefs2flags(unsigned int srcprefs)
+{
+ /* No need to bitmask because srcprefs have only 3 bits. */
+ return srcprefs << 3;
+}
+
+static inline unsigned int rt6_flags2srcprefs(int flags)
+{
+ return (flags >> 3) & 7;
+}
+
+static inline bool rt6_need_strict(const struct in6_addr *daddr)
+{
+ return ipv6_addr_type(daddr) &
+ (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
+}
+
+/* fib entries using a nexthop object can not be coalesced into
+ * a multipath route
+ */
+static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
+{
+ /* the RTF_ADDRCONF flag filters out RA's */
+ return !(f6i->fib6_flags & RTF_ADDRCONF) && !f6i->nh &&
+ f6i->fib6_nh->fib_nh_gw_family;
+}
+
+void ip6_route_input(struct sk_buff *skb);
+struct dst_entry *ip6_route_input_lookup(struct net *net,
+ struct net_device *dev,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb, int flags);
+
+struct dst_entry *ip6_route_output_flags_noref(struct net *net,
+ const struct sock *sk,
+ struct flowi6 *fl6, int flags);
+
+struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
+ struct flowi6 *fl6, int flags);
+
+static inline struct dst_entry *ip6_route_output(struct net *net,
+ const struct sock *sk,
+ struct flowi6 *fl6)
+{
+ return ip6_route_output_flags(net, sk, fl6, 0);
+}
+
+/* Only conditionally release dst if flags indicates
+ * !RT6_LOOKUP_F_DST_NOREF or dst is in uncached_list.
+ */
+static inline void ip6_rt_put_flags(struct rt6_info *rt, int flags)
+{
+ if (!(flags & RT6_LOOKUP_F_DST_NOREF) ||
+ !list_empty(&rt->rt6i_uncached))
+ ip6_rt_put(rt);
+}
+
+struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
+ const struct sk_buff *skb, int flags);
+struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
+ int ifindex, struct flowi6 *fl6,
+ const struct sk_buff *skb, int flags);
+
+void ip6_route_init_special_entries(void);
+int ip6_route_init(void);
+void ip6_route_cleanup(void);
+
+int ipv6_route_ioctl(struct net *net, unsigned int cmd,
+ struct in6_rtmsg *rtmsg);
+
+int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
+ struct netlink_ext_ack *extack);
+int ip6_ins_rt(struct net *net, struct fib6_info *f6i);
+int ip6_del_rt(struct net *net, struct fib6_info *f6i, bool skip_notify);
+
+void rt6_flush_exceptions(struct fib6_info *f6i);
+void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args,
+ unsigned long now);
+
+static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i,
+ const struct in6_addr *daddr,
+ unsigned int prefs,
+ struct in6_addr *saddr)
+{
+ int err = 0;
+
+ if (f6i && f6i->fib6_prefsrc.plen) {
+ *saddr = f6i->fib6_prefsrc.addr;
+ } else {
+ struct net_device *dev = f6i ? fib6_info_nh_dev(f6i) : NULL;
+
+ err = ipv6_dev_get_saddr(net, dev, daddr, prefs, saddr);
+ }
+
+ return err;
+}
+
+struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
+ const struct in6_addr *saddr, int oif,
+ const struct sk_buff *skb, int flags);
+u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
+ const struct sk_buff *skb, struct flow_keys *hkeys);
+
+struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6);
+
+void fib6_force_start_gc(struct net *net);
+
+struct fib6_info *addrconf_f6i_alloc(struct net *net, struct inet6_dev *idev,
+ const struct in6_addr *addr, bool anycast,
+ gfp_t gfp_flags);
+
+struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
+ int flags);
+
+/*
+ * support functions for ND
+ *
+ */
+struct fib6_info *rt6_get_dflt_router(struct net *net,
+ const struct in6_addr *addr,
+ struct net_device *dev);
+struct fib6_info *rt6_add_dflt_router(struct net *net,
+ const struct in6_addr *gwaddr,
+ struct net_device *dev, unsigned int pref,
+ u32 defrtr_usr_metric);
+
+void rt6_purge_dflt_routers(struct net *net);
+
+int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
+ const struct in6_addr *gwaddr);
+
+void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
+ u32 mark, kuid_t uid);
+void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
+void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
+ kuid_t uid);
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif);
+void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
+
+struct netlink_callback;
+
+struct rt6_rtnl_dump_arg {
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ struct net *net;
+ struct fib_dump_filter filter;
+};
+
+int rt6_dump_route(struct fib6_info *f6i, void *p_arg, unsigned int skip);
+void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
+void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
+void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
+void rt6_sync_up(struct net_device *dev, unsigned char nh_flags);
+void rt6_disable_ip(struct net_device *dev, unsigned long event);
+void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
+void rt6_multipath_rebalance(struct fib6_info *f6i);
+
+void rt6_uncached_list_add(struct rt6_info *rt);
+void rt6_uncached_list_del(struct rt6_info *rt);
+
+static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
+{
+ const struct dst_entry *dst = skb_dst(skb);
+ const struct rt6_info *rt6 = NULL;
+
+ if (dst)
+ rt6 = container_of(dst, struct rt6_info, dst);
+
+ return rt6;
+}
+
+/*
+ * Store a destination cache entry in a socket
+ */
+static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ np->dst_cookie = rt6_get_cookie((struct rt6_info *)dst);
+ sk_setup_caps(sk, dst);
+ np->daddr_cache = daddr;
+#ifdef CONFIG_IPV6_SUBTREES
+ np->saddr_cache = saddr;
+#endif
+}
+
+void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
+ const struct flowi6 *fl6);
+
+static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
+{
+ struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
+
+ return rt->rt6i_flags & RTF_LOCAL;
+}
+
+static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
+ const struct in6_addr *daddr)
+{
+ struct rt6_info *rt = (struct rt6_info *)dst;
+
+ return rt->rt6i_flags & RTF_ANYCAST ||
+ (rt->rt6i_dst.plen < 127 &&
+ !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) &&
+ ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
+}
+
+int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct net *, struct sock *, struct sk_buff *));
+
+static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb)
+{
+ const struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
+ inet6_sk(skb->sk) : NULL;
+ const struct dst_entry *dst = skb_dst(skb);
+ unsigned int mtu;
+
+ if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) {
+ mtu = READ_ONCE(dst->dev->mtu);
+ mtu -= lwtunnel_headroom(dst->lwtstate, mtu);
+ } else {
+ mtu = dst_mtu(dst);
+ }
+ return mtu;
+}
+
+static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
+{
+ return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE &&
+ inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT;
+}
+
+static inline bool ip6_sk_ignore_df(const struct sock *sk)
+{
+ return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO ||
+ inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
+}
+
+static inline const struct in6_addr *rt6_nexthop(const struct rt6_info *rt,
+ const struct in6_addr *daddr)
+{
+ if (rt->rt6i_flags & RTF_GATEWAY)
+ return &rt->rt6i_gateway;
+ else if (unlikely(rt->rt6i_flags & RTF_CACHE))
+ return &rt->rt6i_dst.addr;
+ else
+ return daddr;
+}
+
+static inline bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info *b)
+{
+ struct fib6_nh *nha, *nhb;
+
+ if (a->nh || b->nh)
+ return nexthop_cmp(a->nh, b->nh);
+
+ nha = a->fib6_nh;
+ nhb = b->fib6_nh;
+ return nha->fib_nh_dev == nhb->fib_nh_dev &&
+ ipv6_addr_equal(&nha->fib_nh_gw6, &nhb->fib_nh_gw6) &&
+ !lwtunnel_cmp_encap(nha->fib_nh_lws, nhb->fib_nh_lws);
+}
+
+static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ bool forwarding)
+{
+ struct inet6_dev *idev;
+ unsigned int mtu;
+
+ if (!forwarding || dst_metric_locked(dst, RTAX_MTU)) {
+ mtu = dst_metric_raw(dst, RTAX_MTU);
+ if (mtu)
+ goto out;
+ }
+
+ mtu = IPV6_MIN_MTU;
+ rcu_read_lock();
+ idev = __in6_dev_get(dst->dev);
+ if (idev)
+ mtu = idev->cnf.mtu6;
+ rcu_read_unlock();
+
+out:
+ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
+}
+
+u32 ip6_mtu_from_fib6(const struct fib6_result *res,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr);
+
+struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
+ struct net_device *dev, struct sk_buff *skb,
+ const void *daddr);
+#endif
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
new file mode 100644
index 000000000..74b369bdd
--- /dev/null
+++ b/include/net/ip6_tunnel.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_IP6_TUNNEL_H
+#define _NET_IP6_TUNNEL_H
+
+#include <linux/ipv6.h>
+#include <linux/netdevice.h>
+#include <linux/if_tunnel.h>
+#include <linux/ip6_tunnel.h>
+#include <net/ip_tunnels.h>
+#include <net/dst_cache.h>
+
+#define IP6TUNNEL_ERR_TIMEO (30*HZ)
+
+/* capable of sending packets */
+#define IP6_TNL_F_CAP_XMIT 0x10000
+/* capable of receiving packets */
+#define IP6_TNL_F_CAP_RCV 0x20000
+/* determine capability on a per-packet basis */
+#define IP6_TNL_F_CAP_PER_PACKET 0x40000
+
+struct __ip6_tnl_parm {
+ char name[IFNAMSIZ]; /* name of tunnel device */
+ int link; /* ifindex of underlying L2 interface */
+ __u8 proto; /* tunnel protocol */
+ __u8 encap_limit; /* encapsulation limit for tunnel */
+ __u8 hop_limit; /* hop limit for tunnel */
+ bool collect_md;
+ __be32 flowinfo; /* traffic class and flowlabel for tunnel */
+ __u32 flags; /* tunnel flags */
+ struct in6_addr laddr; /* local tunnel end-point address */
+ struct in6_addr raddr; /* remote tunnel end-point address */
+
+ __be16 i_flags;
+ __be16 o_flags;
+ __be32 i_key;
+ __be32 o_key;
+
+ __u32 fwmark;
+ __u32 index; /* ERSPAN type II index */
+ __u8 erspan_ver; /* ERSPAN version */
+ __u8 dir; /* direction */
+ __u16 hwid; /* hwid */
+};
+
+/* IPv6 tunnel */
+struct ip6_tnl {
+ struct ip6_tnl __rcu *next; /* next tunnel in list */
+ struct net_device *dev; /* virtual device associated with tunnel */
+ netdevice_tracker dev_tracker;
+ struct net *net; /* netns for packet i/o */
+ struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
+ struct flowi fl; /* flowi template for xmit */
+ struct dst_cache dst_cache; /* cached dst */
+ struct gro_cells gro_cells;
+
+ int err_count;
+ unsigned long err_time;
+
+ /* These fields used only by GRE */
+ __u32 i_seqno; /* The last seen seqno */
+ atomic_t o_seqno; /* The last output seqno */
+ int hlen; /* tun_hlen + encap_hlen */
+ int tun_hlen; /* Precalculated header length */
+ int encap_hlen; /* Encap header length (FOU,GUE) */
+ struct ip_tunnel_encap encap;
+ int mlink;
+};
+
+struct ip6_tnl_encap_ops {
+ size_t (*encap_hlen)(struct ip_tunnel_encap *e);
+ int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi6 *fl6);
+ int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, __be32 info);
+};
+
+#ifdef CONFIG_INET
+
+extern const struct ip6_tnl_encap_ops __rcu *
+ ip6tun_encaps[MAX_IPTUN_ENCAP_OPS];
+
+int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
+ unsigned int num);
+int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
+ unsigned int num);
+int ip6_tnl_encap_setup(struct ip6_tnl *t,
+ struct ip_tunnel_encap *ipencap);
+
+static inline int ip6_encap_hlen(struct ip_tunnel_encap *e)
+{
+ const struct ip6_tnl_encap_ops *ops;
+ int hlen = -EINVAL;
+
+ if (e->type == TUNNEL_ENCAP_NONE)
+ return 0;
+
+ if (e->type >= MAX_IPTUN_ENCAP_OPS)
+ return -EINVAL;
+
+ rcu_read_lock();
+ ops = rcu_dereference(ip6tun_encaps[e->type]);
+ if (likely(ops && ops->encap_hlen))
+ hlen = ops->encap_hlen(e);
+ rcu_read_unlock();
+
+ return hlen;
+}
+
+static inline int ip6_tnl_encap(struct sk_buff *skb, struct ip6_tnl *t,
+ u8 *protocol, struct flowi6 *fl6)
+{
+ const struct ip6_tnl_encap_ops *ops;
+ int ret = -EINVAL;
+
+ if (t->encap.type == TUNNEL_ENCAP_NONE)
+ return 0;
+
+ if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
+ return -EINVAL;
+
+ rcu_read_lock();
+ ops = rcu_dereference(ip6tun_encaps[t->encap.type]);
+ if (likely(ops && ops->build_header))
+ ret = ops->build_header(skb, &t->encap, protocol, fl6);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/* Tunnel encapsulation limit destination sub-option */
+
+struct ipv6_tlv_tnl_enc_lim {
+ __u8 type; /* type-code for option */
+ __u8 length; /* option length */
+ __u8 encap_limit; /* tunnel encapsulation limit */
+} __packed;
+
+int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
+ const struct in6_addr *raddr);
+int ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+ const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+ bool log_ecn_error);
+int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
+ const struct in6_addr *raddr);
+int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+ struct flowi6 *fl6, int encap_limit, __u32 *pmtu, __u8 proto);
+__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
+__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
+ const struct in6_addr *raddr);
+struct net *ip6_tnl_get_link_net(const struct net_device *dev);
+int ip6_tnl_get_iflink(const struct net_device *dev);
+int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
+
+static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int pkt_len, err;
+
+ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+ pkt_len = skb->len - skb_inner_network_offset(skb);
+ err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
+
+ if (dev) {
+ if (unlikely(net_xmit_eval(err)))
+ pkt_len = -1;
+ iptunnel_xmit_stats(dev, pkt_len);
+ }
+}
+#endif
+#endif
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
new file mode 100644
index 000000000..15de07d36
--- /dev/null
+++ b/include/net/ip_fib.h
@@ -0,0 +1,608 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Forwarding Information Base.
+ *
+ * Authors: A.N.Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ */
+
+#ifndef _NET_IP_FIB_H
+#define _NET_IP_FIB_H
+
+#include <net/flow.h>
+#include <linux/seq_file.h>
+#include <linux/rcupdate.h>
+#include <net/fib_notifier.h>
+#include <net/fib_rules.h>
+#include <net/inet_dscp.h>
+#include <net/inetpeer.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/refcount.h>
+
+struct fib_config {
+ u8 fc_dst_len;
+ dscp_t fc_dscp;
+ u8 fc_protocol;
+ u8 fc_scope;
+ u8 fc_type;
+ u8 fc_gw_family;
+ /* 2 bytes unused */
+ u32 fc_table;
+ __be32 fc_dst;
+ union {
+ __be32 fc_gw4;
+ struct in6_addr fc_gw6;
+ };
+ int fc_oif;
+ u32 fc_flags;
+ u32 fc_priority;
+ __be32 fc_prefsrc;
+ u32 fc_nh_id;
+ struct nlattr *fc_mx;
+ struct rtnexthop *fc_mp;
+ int fc_mx_len;
+ int fc_mp_len;
+ u32 fc_flow;
+ u32 fc_nlflags;
+ struct nl_info fc_nlinfo;
+ struct nlattr *fc_encap;
+ u16 fc_encap_type;
+};
+
+struct fib_info;
+struct rtable;
+
+struct fib_nh_exception {
+ struct fib_nh_exception __rcu *fnhe_next;
+ int fnhe_genid;
+ __be32 fnhe_daddr;
+ u32 fnhe_pmtu;
+ bool fnhe_mtu_locked;
+ __be32 fnhe_gw;
+ unsigned long fnhe_expires;
+ struct rtable __rcu *fnhe_rth_input;
+ struct rtable __rcu *fnhe_rth_output;
+ unsigned long fnhe_stamp;
+ struct rcu_head rcu;
+};
+
+struct fnhe_hash_bucket {
+ struct fib_nh_exception __rcu *chain;
+};
+
+#define FNHE_HASH_SHIFT 11
+#define FNHE_HASH_SIZE (1 << FNHE_HASH_SHIFT)
+#define FNHE_RECLAIM_DEPTH 5
+
+struct fib_nh_common {
+ struct net_device *nhc_dev;
+ netdevice_tracker nhc_dev_tracker;
+ int nhc_oif;
+ unsigned char nhc_scope;
+ u8 nhc_family;
+ u8 nhc_gw_family;
+ unsigned char nhc_flags;
+ struct lwtunnel_state *nhc_lwtstate;
+
+ union {
+ __be32 ipv4;
+ struct in6_addr ipv6;
+ } nhc_gw;
+
+ int nhc_weight;
+ atomic_t nhc_upper_bound;
+
+ /* v4 specific, but allows fib6_nh with v4 routes */
+ struct rtable __rcu * __percpu *nhc_pcpu_rth_output;
+ struct rtable __rcu *nhc_rth_input;
+ struct fnhe_hash_bucket __rcu *nhc_exceptions;
+};
+
+struct fib_nh {
+ struct fib_nh_common nh_common;
+ struct hlist_node nh_hash;
+ struct fib_info *nh_parent;
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ __u32 nh_tclassid;
+#endif
+ __be32 nh_saddr;
+ int nh_saddr_genid;
+#define fib_nh_family nh_common.nhc_family
+#define fib_nh_dev nh_common.nhc_dev
+#define fib_nh_dev_tracker nh_common.nhc_dev_tracker
+#define fib_nh_oif nh_common.nhc_oif
+#define fib_nh_flags nh_common.nhc_flags
+#define fib_nh_lws nh_common.nhc_lwtstate
+#define fib_nh_scope nh_common.nhc_scope
+#define fib_nh_gw_family nh_common.nhc_gw_family
+#define fib_nh_gw4 nh_common.nhc_gw.ipv4
+#define fib_nh_gw6 nh_common.nhc_gw.ipv6
+#define fib_nh_weight nh_common.nhc_weight
+#define fib_nh_upper_bound nh_common.nhc_upper_bound
+};
+
+/*
+ * This structure contains data shared by many of routes.
+ */
+
+struct nexthop;
+
+struct fib_info {
+ struct hlist_node fib_hash;
+ struct hlist_node fib_lhash;
+ struct list_head nh_list;
+ struct net *fib_net;
+ refcount_t fib_treeref;
+ refcount_t fib_clntref;
+ unsigned int fib_flags;
+ unsigned char fib_dead;
+ unsigned char fib_protocol;
+ unsigned char fib_scope;
+ unsigned char fib_type;
+ __be32 fib_prefsrc;
+ u32 fib_tb_id;
+ u32 fib_priority;
+ struct dst_metrics *fib_metrics;
+#define fib_mtu fib_metrics->metrics[RTAX_MTU-1]
+#define fib_window fib_metrics->metrics[RTAX_WINDOW-1]
+#define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
+#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
+ int fib_nhs;
+ bool fib_nh_is_v6;
+ bool nh_updated;
+ bool pfsrc_removed;
+ struct nexthop *nh;
+ struct rcu_head rcu;
+ struct fib_nh fib_nh[];
+};
+
+
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+struct fib_rule;
+#endif
+
+struct fib_table;
+struct fib_result {
+ __be32 prefix;
+ unsigned char prefixlen;
+ unsigned char nh_sel;
+ unsigned char type;
+ unsigned char scope;
+ u32 tclassid;
+ struct fib_nh_common *nhc;
+ struct fib_info *fi;
+ struct fib_table *table;
+ struct hlist_head *fa_head;
+};
+
+struct fib_result_nl {
+ __be32 fl_addr; /* To be looked up*/
+ u32 fl_mark;
+ unsigned char fl_tos;
+ unsigned char fl_scope;
+ unsigned char tb_id_in;
+
+ unsigned char tb_id; /* Results */
+ unsigned char prefixlen;
+ unsigned char nh_sel;
+ unsigned char type;
+ unsigned char scope;
+ int err;
+};
+
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+#define FIB_TABLE_HASHSZ 256
+#else
+#define FIB_TABLE_HASHSZ 2
+#endif
+
+__be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc,
+ unsigned char scope);
+__be32 fib_result_prefsrc(struct net *net, struct fib_result *res);
+
+#define FIB_RES_NHC(res) ((res).nhc)
+#define FIB_RES_DEV(res) (FIB_RES_NHC(res)->nhc_dev)
+#define FIB_RES_OIF(res) (FIB_RES_NHC(res)->nhc_oif)
+
+struct fib_rt_info {
+ struct fib_info *fi;
+ u32 tb_id;
+ __be32 dst;
+ int dst_len;
+ dscp_t dscp;
+ u8 type;
+ u8 offload:1,
+ trap:1,
+ offload_failed:1,
+ unused:5;
+};
+
+struct fib_entry_notifier_info {
+ struct fib_notifier_info info; /* must be first */
+ u32 dst;
+ int dst_len;
+ struct fib_info *fi;
+ dscp_t dscp;
+ u8 type;
+ u32 tb_id;
+};
+
+struct fib_nh_notifier_info {
+ struct fib_notifier_info info; /* must be first */
+ struct fib_nh *fib_nh;
+};
+
+int call_fib4_notifier(struct notifier_block *nb,
+ enum fib_event_type event_type,
+ struct fib_notifier_info *info);
+int call_fib4_notifiers(struct net *net, enum fib_event_type event_type,
+ struct fib_notifier_info *info);
+
+int __net_init fib4_notifier_init(struct net *net);
+void __net_exit fib4_notifier_exit(struct net *net);
+
+void fib_info_notify_update(struct net *net, struct nl_info *info);
+int fib_notify(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
+
+struct fib_table {
+ struct hlist_node tb_hlist;
+ u32 tb_id;
+ int tb_num_default;
+ struct rcu_head rcu;
+ unsigned long *tb_data;
+ unsigned long __data[];
+};
+
+struct fib_dump_filter {
+ u32 table_id;
+ /* filter_set is an optimization that an entry is set */
+ bool filter_set;
+ bool dump_routes;
+ bool dump_exceptions;
+ unsigned char protocol;
+ unsigned char rt_type;
+ unsigned int flags;
+ struct net_device *dev;
+};
+
+int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
+ struct fib_result *res, int fib_flags);
+int fib_table_insert(struct net *, struct fib_table *, struct fib_config *,
+ struct netlink_ext_ack *extack);
+int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
+ struct netlink_ext_ack *extack);
+int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
+ struct netlink_callback *cb, struct fib_dump_filter *filter);
+int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
+struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
+void fib_table_flush_external(struct fib_table *table);
+void fib_free_table(struct fib_table *tb);
+
+#ifndef CONFIG_IP_MULTIPLE_TABLES
+
+#define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1))
+#define TABLE_MAIN_INDEX (RT_TABLE_MAIN & (FIB_TABLE_HASHSZ - 1))
+
+static inline struct fib_table *fib_get_table(struct net *net, u32 id)
+{
+ struct hlist_node *tb_hlist;
+ struct hlist_head *ptr;
+
+ ptr = id == RT_TABLE_LOCAL ?
+ &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX] :
+ &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX];
+
+ tb_hlist = rcu_dereference_rtnl(hlist_first_rcu(ptr));
+
+ return hlist_entry(tb_hlist, struct fib_table, tb_hlist);
+}
+
+static inline struct fib_table *fib_new_table(struct net *net, u32 id)
+{
+ return fib_get_table(net, id);
+}
+
+static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
+ struct fib_result *res, unsigned int flags)
+{
+ struct fib_table *tb;
+ int err = -ENETUNREACH;
+
+ rcu_read_lock();
+
+ tb = fib_get_table(net, RT_TABLE_MAIN);
+ if (tb)
+ err = fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF);
+
+ if (err == -EAGAIN)
+ err = -ENETUNREACH;
+
+ rcu_read_unlock();
+
+ return err;
+}
+
+static inline bool fib4_has_custom_rules(const struct net *net)
+{
+ return false;
+}
+
+static inline bool fib4_rule_default(const struct fib_rule *rule)
+{
+ return true;
+}
+
+static inline int fib4_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static inline unsigned int fib4_rules_seq_read(struct net *net)
+{
+ return 0;
+}
+
+static inline bool fib4_rules_early_flow_dissect(struct net *net,
+ struct sk_buff *skb,
+ struct flowi4 *fl4,
+ struct flow_keys *flkeys)
+{
+ return false;
+}
+#else /* CONFIG_IP_MULTIPLE_TABLES */
+int __net_init fib4_rules_init(struct net *net);
+void __net_exit fib4_rules_exit(struct net *net);
+
+struct fib_table *fib_new_table(struct net *net, u32 id);
+struct fib_table *fib_get_table(struct net *net, u32 id);
+
+int __fib_lookup(struct net *net, struct flowi4 *flp,
+ struct fib_result *res, unsigned int flags);
+
+static inline int fib_lookup(struct net *net, struct flowi4 *flp,
+ struct fib_result *res, unsigned int flags)
+{
+ struct fib_table *tb;
+ int err = -ENETUNREACH;
+
+ flags |= FIB_LOOKUP_NOREF;
+ if (net->ipv4.fib_has_custom_rules)
+ return __fib_lookup(net, flp, res, flags);
+
+ rcu_read_lock();
+
+ res->tclassid = 0;
+
+ tb = rcu_dereference_rtnl(net->ipv4.fib_main);
+ if (tb)
+ err = fib_table_lookup(tb, flp, res, flags);
+
+ if (!err)
+ goto out;
+
+ tb = rcu_dereference_rtnl(net->ipv4.fib_default);
+ if (tb)
+ err = fib_table_lookup(tb, flp, res, flags);
+
+out:
+ if (err == -EAGAIN)
+ err = -ENETUNREACH;
+
+ rcu_read_unlock();
+
+ return err;
+}
+
+static inline bool fib4_has_custom_rules(const struct net *net)
+{
+ return net->ipv4.fib_has_custom_rules;
+}
+
+bool fib4_rule_default(const struct fib_rule *rule);
+int fib4_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
+unsigned int fib4_rules_seq_read(struct net *net);
+
+static inline bool fib4_rules_early_flow_dissect(struct net *net,
+ struct sk_buff *skb,
+ struct flowi4 *fl4,
+ struct flow_keys *flkeys)
+{
+ unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
+
+ if (!net->ipv4.fib_rules_require_fldissect)
+ return false;
+
+ memset(flkeys, 0, sizeof(*flkeys));
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ flkeys, NULL, 0, 0, 0, flag);
+
+ fl4->fl4_sport = flkeys->ports.src;
+ fl4->fl4_dport = flkeys->ports.dst;
+ fl4->flowi4_proto = flkeys->basic.ip_proto;
+
+ return true;
+}
+
+#endif /* CONFIG_IP_MULTIPLE_TABLES */
+
+/* Exported by fib_frontend.c */
+extern const struct nla_policy rtm_ipv4_policy[];
+void ip_fib_init(void);
+int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
+ struct netlink_ext_ack *extack);
+__be32 fib_compute_spec_dst(struct sk_buff *skb);
+bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev);
+int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
+ u8 tos, int oif, struct net_device *dev,
+ struct in_device *idev, u32 *itag);
+#ifdef CONFIG_IP_ROUTE_CLASSID
+static inline int fib_num_tclassid_users(struct net *net)
+{
+ return atomic_read(&net->ipv4.fib_num_tclassid_users);
+}
+#else
+static inline int fib_num_tclassid_users(struct net *net)
+{
+ return 0;
+}
+#endif
+int fib_unmerge(struct net *net);
+
+static inline bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc,
+const struct net_device *dev)
+{
+ if (nhc->nhc_dev == dev ||
+ l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex)
+ return true;
+
+ return false;
+}
+
+/* Exported by fib_semantics.c */
+int ip_fib_check_default(__be32 gw, struct net_device *dev);
+int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
+int fib_sync_down_addr(struct net_device *dev, __be32 local);
+int fib_sync_up(struct net_device *dev, unsigned char nh_flags);
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
+void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig);
+
+/* Fields used for sysctl_fib_multipath_hash_fields.
+ * Common to IPv4 and IPv6.
+ *
+ * Add new fields at the end. This is user API.
+ */
+#define FIB_MULTIPATH_HASH_FIELD_SRC_IP BIT(0)
+#define FIB_MULTIPATH_HASH_FIELD_DST_IP BIT(1)
+#define FIB_MULTIPATH_HASH_FIELD_IP_PROTO BIT(2)
+#define FIB_MULTIPATH_HASH_FIELD_FLOWLABEL BIT(3)
+#define FIB_MULTIPATH_HASH_FIELD_SRC_PORT BIT(4)
+#define FIB_MULTIPATH_HASH_FIELD_DST_PORT BIT(5)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP BIT(6)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP BIT(7)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO BIT(8)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL BIT(9)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT BIT(10)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT BIT(11)
+
+#define FIB_MULTIPATH_HASH_FIELD_OUTER_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_SRC_IP | \
+ FIB_MULTIPATH_HASH_FIELD_DST_IP | \
+ FIB_MULTIPATH_HASH_FIELD_IP_PROTO | \
+ FIB_MULTIPATH_HASH_FIELD_FLOWLABEL | \
+ FIB_MULTIPATH_HASH_FIELD_SRC_PORT | \
+ FIB_MULTIPATH_HASH_FIELD_DST_PORT)
+
+#define FIB_MULTIPATH_HASH_FIELD_INNER_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
+
+#define FIB_MULTIPATH_HASH_FIELD_ALL_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_OUTER_MASK | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
+
+#define FIB_MULTIPATH_HASH_FIELD_DEFAULT_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_SRC_IP | \
+ FIB_MULTIPATH_HASH_FIELD_DST_IP | \
+ FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
+
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
+ const struct sk_buff *skb, struct flow_keys *flkeys);
+#endif
+int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope,
+ struct netlink_ext_ack *extack);
+void fib_select_multipath(struct fib_result *res, int hash);
+void fib_select_path(struct net *net, struct fib_result *res,
+ struct flowi4 *fl4, const struct sk_buff *skb);
+
+int fib_nh_init(struct net *net, struct fib_nh *fib_nh,
+ struct fib_config *cfg, int nh_weight,
+ struct netlink_ext_ack *extack);
+void fib_nh_release(struct net *net, struct fib_nh *fib_nh);
+int fib_nh_common_init(struct net *net, struct fib_nh_common *nhc,
+ struct nlattr *fc_encap, u16 fc_encap_type,
+ void *cfg, gfp_t gfp_flags,
+ struct netlink_ext_ack *extack);
+void fib_nh_common_release(struct fib_nh_common *nhc);
+
+/* Exported by fib_trie.c */
+void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri);
+void fib_trie_init(void);
+struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
+bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags,
+ const struct flowi4 *flp);
+
+static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
+{
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ struct fib_nh_common *nhc = res->nhc;
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+ u32 rtag;
+#endif
+ if (nhc->nhc_family == AF_INET) {
+ struct fib_nh *nh;
+
+ nh = container_of(nhc, struct fib_nh, nh_common);
+ *itag = nh->nh_tclassid << 16;
+ } else {
+ *itag = 0;
+ }
+
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+ rtag = res->tclassid;
+ if (*itag == 0)
+ *itag = (rtag<<16);
+ *itag |= (rtag>>16);
+#endif
+#endif
+}
+
+void fib_flush(struct net *net);
+void free_fib_info(struct fib_info *fi);
+
+static inline void fib_info_hold(struct fib_info *fi)
+{
+ refcount_inc(&fi->fib_clntref);
+}
+
+static inline void fib_info_put(struct fib_info *fi)
+{
+ if (refcount_dec_and_test(&fi->fib_clntref))
+ free_fib_info(fi);
+}
+
+#ifdef CONFIG_PROC_FS
+int __net_init fib_proc_init(struct net *net);
+void __net_exit fib_proc_exit(struct net *net);
+#else
+static inline int fib_proc_init(struct net *net)
+{
+ return 0;
+}
+static inline void fib_proc_exit(struct net *net)
+{
+}
+#endif
+
+u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr);
+
+int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
+ struct fib_dump_filter *filter,
+ struct netlink_callback *cb);
+
+int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
+ u8 rt_family, unsigned char *flags, bool skip_oif);
+int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
+ int nh_weight, u8 rt_family, u32 nh_tclassid);
+#endif /* _NET_FIB_H */
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
new file mode 100644
index 000000000..bca80522f
--- /dev/null
+++ b/include/net/ip_tunnels.h
@@ -0,0 +1,556 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_IP_TUNNELS_H
+#define __NET_IP_TUNNELS_H 1
+
+#include <linux/if_tunnel.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/bitops.h>
+
+#include <net/dsfield.h>
+#include <net/gro_cells.h>
+#include <net/inet_ecn.h>
+#include <net/netns/generic.h>
+#include <net/rtnetlink.h>
+#include <net/lwtunnel.h>
+#include <net/dst_cache.h>
+
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_route.h>
+#endif
+
+/* Keep error state on tunnel for 30 sec */
+#define IPTUNNEL_ERR_TIMEO (30*HZ)
+
+/* Used to memset ip_tunnel padding. */
+#define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
+
+/* Used to memset ipv4 address padding. */
+#define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
+#define IP_TUNNEL_KEY_IPV4_PAD_LEN \
+ (sizeof_field(struct ip_tunnel_key, u) - \
+ sizeof_field(struct ip_tunnel_key, u.ipv4))
+
+struct ip_tunnel_key {
+ __be64 tun_id;
+ union {
+ struct {
+ __be32 src;
+ __be32 dst;
+ } ipv4;
+ struct {
+ struct in6_addr src;
+ struct in6_addr dst;
+ } ipv6;
+ } u;
+ __be16 tun_flags;
+ u8 tos; /* TOS for IPv4, TC for IPv6 */
+ u8 ttl; /* TTL for IPv4, HL for IPv6 */
+ __be32 label; /* Flow Label for IPv6 */
+ __be16 tp_src;
+ __be16 tp_dst;
+ __u8 flow_flags;
+};
+
+/* Flags for ip_tunnel_info mode. */
+#define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
+#define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
+#define IP_TUNNEL_INFO_BRIDGE 0x04 /* represents a bridged tunnel id */
+
+/* Maximum tunnel options length. */
+#define IP_TUNNEL_OPTS_MAX \
+ GENMASK((sizeof_field(struct ip_tunnel_info, \
+ options_len) * BITS_PER_BYTE) - 1, 0)
+
+struct ip_tunnel_info {
+ struct ip_tunnel_key key;
+#ifdef CONFIG_DST_CACHE
+ struct dst_cache dst_cache;
+#endif
+ u8 options_len;
+ u8 mode;
+};
+
+/* 6rd prefix/relay information */
+#ifdef CONFIG_IPV6_SIT_6RD
+struct ip_tunnel_6rd_parm {
+ struct in6_addr prefix;
+ __be32 relay_prefix;
+ u16 prefixlen;
+ u16 relay_prefixlen;
+};
+#endif
+
+struct ip_tunnel_encap {
+ u16 type;
+ u16 flags;
+ __be16 sport;
+ __be16 dport;
+};
+
+struct ip_tunnel_prl_entry {
+ struct ip_tunnel_prl_entry __rcu *next;
+ __be32 addr;
+ u16 flags;
+ struct rcu_head rcu_head;
+};
+
+struct metadata_dst;
+
+struct ip_tunnel {
+ struct ip_tunnel __rcu *next;
+ struct hlist_node hash_node;
+
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
+ struct net *net; /* netns for packet i/o */
+
+ unsigned long err_time; /* Time when the last ICMP error
+ * arrived */
+ int err_count; /* Number of arrived ICMP errors */
+
+ /* These four fields used only by GRE */
+ u32 i_seqno; /* The last seen seqno */
+ atomic_t o_seqno; /* The last output seqno */
+ int tun_hlen; /* Precalculated header length */
+
+ /* These four fields used only by ERSPAN */
+ u32 index; /* ERSPAN type II index */
+ u8 erspan_ver; /* ERSPAN version */
+ u8 dir; /* ERSPAN direction */
+ u16 hwid; /* ERSPAN hardware ID */
+
+ struct dst_cache dst_cache;
+
+ struct ip_tunnel_parm parms;
+
+ int mlink;
+ int encap_hlen; /* Encap header length (FOU,GUE) */
+ int hlen; /* tun_hlen + encap_hlen */
+ struct ip_tunnel_encap encap;
+
+ /* for SIT */
+#ifdef CONFIG_IPV6_SIT_6RD
+ struct ip_tunnel_6rd_parm ip6rd;
+#endif
+ struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
+ unsigned int prl_count; /* # of entries in PRL */
+ unsigned int ip_tnl_net_id;
+ struct gro_cells gro_cells;
+ __u32 fwmark;
+ bool collect_md;
+ bool ignore_df;
+};
+
+struct tnl_ptk_info {
+ __be16 flags;
+ __be16 proto;
+ __be32 key;
+ __be32 seq;
+ int hdr_len;
+};
+
+#define PACKET_RCVD 0
+#define PACKET_REJECT 1
+#define PACKET_NEXT 2
+
+#define IP_TNL_HASH_BITS 7
+#define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
+
+struct ip_tunnel_net {
+ struct net_device *fb_tunnel_dev;
+ struct rtnl_link_ops *rtnl_link_ops;
+ struct hlist_head tunnels[IP_TNL_HASH_SIZE];
+ struct ip_tunnel __rcu *collect_md_tun;
+ int type;
+};
+
+static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
+ __be32 saddr, __be32 daddr,
+ u8 tos, u8 ttl, __be32 label,
+ __be16 tp_src, __be16 tp_dst,
+ __be64 tun_id, __be16 tun_flags)
+{
+ key->tun_id = tun_id;
+ key->u.ipv4.src = saddr;
+ key->u.ipv4.dst = daddr;
+ memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
+ 0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
+ key->tos = tos;
+ key->ttl = ttl;
+ key->label = label;
+ key->tun_flags = tun_flags;
+
+ /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
+ * the upper tunnel are used.
+ * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
+ */
+ key->tp_src = tp_src;
+ key->tp_dst = tp_dst;
+
+ /* Clear struct padding. */
+ if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
+ memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
+ 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
+}
+
+static inline bool
+ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
+ const struct ip_tunnel_info *info)
+{
+ if (skb->mark)
+ return false;
+ if (!info)
+ return true;
+ if (info->key.tun_flags & TUNNEL_NOCACHE)
+ return false;
+
+ return true;
+}
+
+static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
+ *tun_info)
+{
+ return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
+}
+
+static inline __be64 key32_to_tunnel_id(__be32 key)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be64)key;
+#else
+ return (__force __be64)((__force u64)key << 32);
+#endif
+}
+
+/* Returns the least-significant 32 bits of a __be64. */
+static inline __be32 tunnel_id_to_key32(__be64 tun_id)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be32)tun_id;
+#else
+ return (__force __be32)((__force u64)tun_id >> 32);
+#endif
+}
+
+#ifdef CONFIG_INET
+
+static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
+ int proto,
+ __be32 daddr, __be32 saddr,
+ __be32 key, __u8 tos,
+ struct net *net, int oif,
+ __u32 mark, __u32 tun_inner_hash,
+ __u8 flow_flags)
+{
+ memset(fl4, 0, sizeof(*fl4));
+
+ if (oif) {
+ fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index_rcu(net, oif);
+ /* Legacy VRF/l3mdev use case */
+ fl4->flowi4_oif = fl4->flowi4_l3mdev ? 0 : oif;
+ }
+
+ fl4->daddr = daddr;
+ fl4->saddr = saddr;
+ fl4->flowi4_tos = tos;
+ fl4->flowi4_proto = proto;
+ fl4->fl4_gre_key = key;
+ fl4->flowi4_mark = mark;
+ fl4->flowi4_multipath_hash = tun_inner_hash;
+ fl4->flowi4_flags = flow_flags;
+}
+
+int ip_tunnel_init(struct net_device *dev);
+void ip_tunnel_uninit(struct net_device *dev);
+void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
+struct net *ip_tunnel_get_link_net(const struct net_device *dev);
+int ip_tunnel_get_iflink(const struct net_device *dev);
+int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
+ struct rtnl_link_ops *ops, char *devname);
+
+void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id,
+ struct rtnl_link_ops *ops);
+
+void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ const struct iphdr *tnl_params, const u8 protocol);
+void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ const u8 proto, int tunnel_hlen);
+int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
+int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
+int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
+int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
+
+struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
+ int link, __be16 flags,
+ __be32 remote, __be32 local,
+ __be32 key);
+
+int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
+ const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+ bool log_ecn_error);
+int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct ip_tunnel_parm *p, __u32 fwmark);
+int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
+ struct ip_tunnel_parm *p, __u32 fwmark);
+void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
+
+bool ip_tunnel_netlink_encap_parms(struct nlattr *data[],
+ struct ip_tunnel_encap *encap);
+
+void ip_tunnel_netlink_parms(struct nlattr *data[],
+ struct ip_tunnel_parm *parms);
+
+extern const struct header_ops ip_tunnel_header_ops;
+__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);
+
+struct ip_tunnel_encap_ops {
+ size_t (*encap_hlen)(struct ip_tunnel_encap *e);
+ int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi4 *fl4);
+ int (*err_handler)(struct sk_buff *skb, u32 info);
+};
+
+#define MAX_IPTUN_ENCAP_OPS 8
+
+extern const struct ip_tunnel_encap_ops __rcu *
+ iptun_encaps[MAX_IPTUN_ENCAP_OPS];
+
+int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
+ unsigned int num);
+int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
+ unsigned int num);
+
+int ip_tunnel_encap_setup(struct ip_tunnel *t,
+ struct ip_tunnel_encap *ipencap);
+
+static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+{
+ int nhlen;
+
+ switch (skb->protocol) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ nhlen = sizeof(struct ipv6hdr);
+ break;
+#endif
+ case htons(ETH_P_IP):
+ nhlen = sizeof(struct iphdr);
+ break;
+ default:
+ nhlen = 0;
+ }
+
+ return pskb_network_may_pull(skb, nhlen);
+}
+
+static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
+{
+ const struct ip_tunnel_encap_ops *ops;
+ int hlen = -EINVAL;
+
+ if (e->type == TUNNEL_ENCAP_NONE)
+ return 0;
+
+ if (e->type >= MAX_IPTUN_ENCAP_OPS)
+ return -EINVAL;
+
+ rcu_read_lock();
+ ops = rcu_dereference(iptun_encaps[e->type]);
+ if (likely(ops && ops->encap_hlen))
+ hlen = ops->encap_hlen(e);
+ rcu_read_unlock();
+
+ return hlen;
+}
+
+static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
+ u8 *protocol, struct flowi4 *fl4)
+{
+ const struct ip_tunnel_encap_ops *ops;
+ int ret = -EINVAL;
+
+ if (t->encap.type == TUNNEL_ENCAP_NONE)
+ return 0;
+
+ if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
+ return -EINVAL;
+
+ rcu_read_lock();
+ ops = rcu_dereference(iptun_encaps[t->encap.type]);
+ if (likely(ops && ops->build_header))
+ ret = ops->build_header(skb, &t->encap, protocol, fl4);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/* Extract dsfield from inner protocol */
+static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
+ const struct sk_buff *skb)
+{
+ __be16 payload_protocol = skb_protocol(skb, true);
+
+ if (payload_protocol == htons(ETH_P_IP))
+ return iph->tos;
+ else if (payload_protocol == htons(ETH_P_IPV6))
+ return ipv6_get_dsfield((const struct ipv6hdr *)iph);
+ else
+ return 0;
+}
+
+static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
+ const struct sk_buff *skb)
+{
+ __be16 payload_protocol = skb_protocol(skb, true);
+
+ if (payload_protocol == htons(ETH_P_IP))
+ return iph->ttl;
+ else if (payload_protocol == htons(ETH_P_IPV6))
+ return ((const struct ipv6hdr *)iph)->hop_limit;
+ else
+ return 0;
+}
+
+/* Propogate ECN bits out */
+static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
+ const struct sk_buff *skb)
+{
+ u8 inner = ip_tunnel_get_dsfield(iph, skb);
+
+ return INET_ECN_encapsulate(tos, inner);
+}
+
+int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
+ __be16 inner_proto, bool raw_proto, bool xnet);
+
+static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
+ __be16 inner_proto, bool xnet)
+{
+ return __iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet);
+}
+
+void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, u8 proto,
+ u8 tos, u8 ttl, __be16 df, bool xnet);
+struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
+ gfp_t flags);
+int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
+ int headroom, bool reply);
+
+int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
+
+static inline int iptunnel_pull_offloads(struct sk_buff *skb)
+{
+ if (skb_is_gso(skb)) {
+ int err;
+
+ err = skb_unclone(skb, GFP_ATOMIC);
+ if (unlikely(err))
+ return err;
+ skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
+ NETIF_F_GSO_SHIFT);
+ }
+
+ skb->encapsulation = 0;
+ return 0;
+}
+
+static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
+{
+ if (pkt_len > 0) {
+ struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ u64_stats_add(&tstats->tx_bytes, pkt_len);
+ u64_stats_inc(&tstats->tx_packets);
+ u64_stats_update_end(&tstats->syncp);
+ put_cpu_ptr(tstats);
+ return;
+ }
+
+ if (pkt_len < 0) {
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_aborted_errors);
+ } else {
+ DEV_STATS_INC(dev, tx_dropped);
+ }
+}
+
+static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
+{
+ return info + 1;
+}
+
+static inline void ip_tunnel_info_opts_get(void *to,
+ const struct ip_tunnel_info *info)
+{
+ memcpy(to, info + 1, info->options_len);
+}
+
+static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
+ const void *from, int len,
+ __be16 flags)
+{
+ info->options_len = len;
+ if (len > 0) {
+ memcpy(ip_tunnel_info_opts(info), from, len);
+ info->key.tun_flags |= flags;
+ }
+}
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+ return (struct ip_tunnel_info *)lwtstate->data;
+}
+
+DECLARE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt);
+
+/* Returns > 0 if metadata should be collected */
+static inline int ip_tunnel_collect_metadata(void)
+{
+ return static_branch_unlikely(&ip_tunnel_metadata_cnt);
+}
+
+void __init ip_tunnel_core_init(void);
+
+void ip_tunnel_need_metadata(void);
+void ip_tunnel_unneed_metadata(void);
+
+#else /* CONFIG_INET */
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+ return NULL;
+}
+
+static inline void ip_tunnel_need_metadata(void)
+{
+}
+
+static inline void ip_tunnel_unneed_metadata(void)
+{
+}
+
+static inline void ip_tunnel_info_opts_get(void *to,
+ const struct ip_tunnel_info *info)
+{
+}
+
+static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
+ const void *from, int len,
+ __be16 flags)
+{
+ info->options_len = 0;
+}
+
+#endif /* CONFIG_INET */
+
+#endif /* __NET_IP_TUNNELS_H */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
new file mode 100644
index 000000000..abc46f057
--- /dev/null
+++ b/include/net/ip_vs.h
@@ -0,0 +1,1739 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* IP Virtual Server
+ * data structure and functionality definitions
+ */
+
+#ifndef _NET_IP_VS_H
+#define _NET_IP_VS_H
+
+#include <linux/ip_vs.h> /* definitions shared with userland */
+
+#include <asm/types.h> /* for __uXX types */
+
+#include <linux/list.h> /* for struct list_head */
+#include <linux/spinlock.h> /* for struct rwlock_t */
+#include <linux/atomic.h> /* for struct atomic_t */
+#include <linux/refcount.h> /* for struct refcount_t */
+#include <linux/workqueue.h>
+
+#include <linux/compiler.h>
+#include <linux/timer.h>
+#include <linux/bug.h>
+
+#include <net/checksum.h>
+#include <linux/netfilter.h> /* for union nf_inet_addr */
+#include <linux/ip.h>
+#include <linux/ipv6.h> /* for struct ipv6hdr */
+#include <net/ipv6.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+#include <net/net_namespace.h> /* Netw namespace */
+
+#define IP_VS_HDR_INVERSE 1
+#define IP_VS_HDR_ICMP 2
+
+/* Generic access of ipvs struct */
+static inline struct netns_ipvs *net_ipvs(struct net* net)
+{
+ return net->ipvs;
+}
+
+/* Connections' size value needed by ip_vs_ctl.c */
+extern int ip_vs_conn_tab_size;
+
+struct ip_vs_iphdr {
+ int hdr_flags; /* ipvs flags */
+ __u32 off; /* Where IP or IPv4 header starts */
+ __u32 len; /* IPv4 simply where L4 starts
+ * IPv6 where L4 Transport Header starts */
+ __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/
+ __s16 protocol;
+ __s32 flags;
+ union nf_inet_addr saddr;
+ union nf_inet_addr daddr;
+};
+
+static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
+ int len, void *buffer)
+{
+ return skb_header_pointer(skb, offset, len, buffer);
+}
+
+/* This function handles filling *ip_vs_iphdr, both for IPv4 and IPv6.
+ * IPv6 requires some extra work, as finding proper header position,
+ * depend on the IPv6 extension headers.
+ */
+static inline int
+ip_vs_fill_iph_skb_off(int af, const struct sk_buff *skb, int offset,
+ int hdr_flags, struct ip_vs_iphdr *iphdr)
+{
+ iphdr->hdr_flags = hdr_flags;
+ iphdr->off = offset;
+
+#ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6) {
+ struct ipv6hdr _iph;
+ const struct ipv6hdr *iph = skb_header_pointer(
+ skb, offset, sizeof(_iph), &_iph);
+ if (!iph)
+ return 0;
+
+ iphdr->saddr.in6 = iph->saddr;
+ iphdr->daddr.in6 = iph->daddr;
+ /* ipv6_find_hdr() updates len, flags */
+ iphdr->len = offset;
+ iphdr->flags = 0;
+ iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1,
+ &iphdr->fragoffs,
+ &iphdr->flags);
+ if (iphdr->protocol < 0)
+ return 0;
+ } else
+#endif
+ {
+ struct iphdr _iph;
+ const struct iphdr *iph = skb_header_pointer(
+ skb, offset, sizeof(_iph), &_iph);
+ if (!iph)
+ return 0;
+
+ iphdr->len = offset + iph->ihl * 4;
+ iphdr->fragoffs = 0;
+ iphdr->protocol = iph->protocol;
+ iphdr->saddr.ip = iph->saddr;
+ iphdr->daddr.ip = iph->daddr;
+ }
+
+ return 1;
+}
+
+static inline int
+ip_vs_fill_iph_skb_icmp(int af, const struct sk_buff *skb, int offset,
+ bool inverse, struct ip_vs_iphdr *iphdr)
+{
+ int hdr_flags = IP_VS_HDR_ICMP;
+
+ if (inverse)
+ hdr_flags |= IP_VS_HDR_INVERSE;
+
+ return ip_vs_fill_iph_skb_off(af, skb, offset, hdr_flags, iphdr);
+}
+
+static inline int
+ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, bool inverse,
+ struct ip_vs_iphdr *iphdr)
+{
+ int hdr_flags = 0;
+
+ if (inverse)
+ hdr_flags |= IP_VS_HDR_INVERSE;
+
+ return ip_vs_fill_iph_skb_off(af, skb, skb_network_offset(skb),
+ hdr_flags, iphdr);
+}
+
+static inline bool
+ip_vs_iph_inverse(const struct ip_vs_iphdr *iph)
+{
+ return !!(iph->hdr_flags & IP_VS_HDR_INVERSE);
+}
+
+static inline bool
+ip_vs_iph_icmp(const struct ip_vs_iphdr *iph)
+{
+ return !!(iph->hdr_flags & IP_VS_HDR_ICMP);
+}
+
+static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst,
+ const union nf_inet_addr *src)
+{
+#ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6)
+ dst->in6 = src->in6;
+ else
+#endif
+ dst->ip = src->ip;
+}
+
+static inline void ip_vs_addr_set(int af, union nf_inet_addr *dst,
+ const union nf_inet_addr *src)
+{
+#ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6) {
+ dst->in6 = src->in6;
+ return;
+ }
+#endif
+ dst->ip = src->ip;
+ dst->all[1] = 0;
+ dst->all[2] = 0;
+ dst->all[3] = 0;
+}
+
+static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a,
+ const union nf_inet_addr *b)
+{
+#ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6)
+ return ipv6_addr_equal(&a->in6, &b->in6);
+#endif
+ return a->ip == b->ip;
+}
+
+#ifdef CONFIG_IP_VS_DEBUG
+#include <linux/net.h>
+
+int ip_vs_get_debug_level(void);
+
+static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
+ const union nf_inet_addr *addr,
+ int *idx)
+{
+ int len;
+#ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6)
+ len = snprintf(&buf[*idx], buf_len - *idx, "[%pI6c]",
+ &addr->in6) + 1;
+ else
+#endif
+ len = snprintf(&buf[*idx], buf_len - *idx, "%pI4",
+ &addr->ip) + 1;
+
+ *idx += len;
+ BUG_ON(*idx > buf_len + 1);
+ return &buf[*idx - len];
+}
+
+#define IP_VS_DBG_BUF(level, msg, ...) \
+ do { \
+ char ip_vs_dbg_buf[160]; \
+ int ip_vs_dbg_idx = 0; \
+ if (level <= ip_vs_get_debug_level()) \
+ printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \
+ } while (0)
+#define IP_VS_ERR_BUF(msg...) \
+ do { \
+ char ip_vs_dbg_buf[160]; \
+ int ip_vs_dbg_idx = 0; \
+ pr_err(msg); \
+ } while (0)
+
+/* Only use from within IP_VS_DBG_BUF() or IP_VS_ERR_BUF macros */
+#define IP_VS_DBG_ADDR(af, addr) \
+ ip_vs_dbg_addr(af, ip_vs_dbg_buf, \
+ sizeof(ip_vs_dbg_buf), addr, \
+ &ip_vs_dbg_idx)
+
+#define IP_VS_DBG(level, msg, ...) \
+ do { \
+ if (level <= ip_vs_get_debug_level()) \
+ printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \
+ } while (0)
+#define IP_VS_DBG_RL(msg, ...) \
+ do { \
+ if (net_ratelimit()) \
+ printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \
+ } while (0)
+#define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) \
+ do { \
+ if (level <= ip_vs_get_debug_level()) \
+ pp->debug_packet(af, pp, skb, ofs, msg); \
+ } while (0)
+#define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) \
+ do { \
+ if (level <= ip_vs_get_debug_level() && \
+ net_ratelimit()) \
+ pp->debug_packet(af, pp, skb, ofs, msg); \
+ } while (0)
+#else /* NO DEBUGGING at ALL */
+#define IP_VS_DBG_BUF(level, msg...) do {} while (0)
+#define IP_VS_ERR_BUF(msg...) do {} while (0)
+#define IP_VS_DBG(level, msg...) do {} while (0)
+#define IP_VS_DBG_RL(msg...) do {} while (0)
+#define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) do {} while (0)
+#define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) do {} while (0)
+#endif
+
+#define IP_VS_BUG() BUG()
+#define IP_VS_ERR_RL(msg, ...) \
+ do { \
+ if (net_ratelimit()) \
+ pr_err(msg, ##__VA_ARGS__); \
+ } while (0)
+
+#ifdef CONFIG_IP_VS_DEBUG
+#define EnterFunction(level) \
+ do { \
+ if (level <= ip_vs_get_debug_level()) \
+ printk(KERN_DEBUG \
+ pr_fmt("Enter: %s, %s line %i\n"), \
+ __func__, __FILE__, __LINE__); \
+ } while (0)
+#define LeaveFunction(level) \
+ do { \
+ if (level <= ip_vs_get_debug_level()) \
+ printk(KERN_DEBUG \
+ pr_fmt("Leave: %s, %s line %i\n"), \
+ __func__, __FILE__, __LINE__); \
+ } while (0)
+#else
+#define EnterFunction(level) do {} while (0)
+#define LeaveFunction(level) do {} while (0)
+#endif
+
+/* The port number of FTP service (in network order). */
+#define FTPPORT cpu_to_be16(21)
+#define FTPDATA cpu_to_be16(20)
+
+/* TCP State Values */
+enum {
+ IP_VS_TCP_S_NONE = 0,
+ IP_VS_TCP_S_ESTABLISHED,
+ IP_VS_TCP_S_SYN_SENT,
+ IP_VS_TCP_S_SYN_RECV,
+ IP_VS_TCP_S_FIN_WAIT,
+ IP_VS_TCP_S_TIME_WAIT,
+ IP_VS_TCP_S_CLOSE,
+ IP_VS_TCP_S_CLOSE_WAIT,
+ IP_VS_TCP_S_LAST_ACK,
+ IP_VS_TCP_S_LISTEN,
+ IP_VS_TCP_S_SYNACK,
+ IP_VS_TCP_S_LAST
+};
+
+/* UDP State Values */
+enum {
+ IP_VS_UDP_S_NORMAL,
+ IP_VS_UDP_S_LAST,
+};
+
+/* ICMP State Values */
+enum {
+ IP_VS_ICMP_S_NORMAL,
+ IP_VS_ICMP_S_LAST,
+};
+
+/* SCTP State Values */
+enum ip_vs_sctp_states {
+ IP_VS_SCTP_S_NONE,
+ IP_VS_SCTP_S_INIT1,
+ IP_VS_SCTP_S_INIT,
+ IP_VS_SCTP_S_COOKIE_SENT,
+ IP_VS_SCTP_S_COOKIE_REPLIED,
+ IP_VS_SCTP_S_COOKIE_WAIT,
+ IP_VS_SCTP_S_COOKIE,
+ IP_VS_SCTP_S_COOKIE_ECHOED,
+ IP_VS_SCTP_S_ESTABLISHED,
+ IP_VS_SCTP_S_SHUTDOWN_SENT,
+ IP_VS_SCTP_S_SHUTDOWN_RECEIVED,
+ IP_VS_SCTP_S_SHUTDOWN_ACK_SENT,
+ IP_VS_SCTP_S_REJECTED,
+ IP_VS_SCTP_S_CLOSED,
+ IP_VS_SCTP_S_LAST
+};
+
+/* Connection templates use bits from state */
+#define IP_VS_CTPL_S_NONE 0x0000
+#define IP_VS_CTPL_S_ASSURED 0x0001
+#define IP_VS_CTPL_S_LAST 0x0002
+
+/* Delta sequence info structure
+ * Each ip_vs_conn has 2 (output AND input seq. changes).
+ * Only used in the VS/NAT.
+ */
+struct ip_vs_seq {
+ __u32 init_seq; /* Add delta from this seq */
+ __u32 delta; /* Delta in sequence numbers */
+ __u32 previous_delta; /* Delta in sequence numbers
+ * before last resized pkt */
+};
+
+/* counters per cpu */
+struct ip_vs_counters {
+ u64_stats_t conns; /* connections scheduled */
+ u64_stats_t inpkts; /* incoming packets */
+ u64_stats_t outpkts; /* outgoing packets */
+ u64_stats_t inbytes; /* incoming bytes */
+ u64_stats_t outbytes; /* outgoing bytes */
+};
+/* Stats per cpu */
+struct ip_vs_cpu_stats {
+ struct ip_vs_counters cnt;
+ struct u64_stats_sync syncp;
+};
+
+/* IPVS statistics objects */
+struct ip_vs_estimator {
+ struct list_head list;
+
+ u64 last_inbytes;
+ u64 last_outbytes;
+ u64 last_conns;
+ u64 last_inpkts;
+ u64 last_outpkts;
+
+ u64 cps;
+ u64 inpps;
+ u64 outpps;
+ u64 inbps;
+ u64 outbps;
+};
+
+/*
+ * IPVS statistics object, 64-bit kernel version of struct ip_vs_stats_user
+ */
+struct ip_vs_kstats {
+ u64 conns; /* connections scheduled */
+ u64 inpkts; /* incoming packets */
+ u64 outpkts; /* outgoing packets */
+ u64 inbytes; /* incoming bytes */
+ u64 outbytes; /* outgoing bytes */
+
+ u64 cps; /* current connection rate */
+ u64 inpps; /* current in packet rate */
+ u64 outpps; /* current out packet rate */
+ u64 inbps; /* current in byte rate */
+ u64 outbps; /* current out byte rate */
+};
+
+struct ip_vs_stats {
+ struct ip_vs_kstats kstats; /* kernel statistics */
+ struct ip_vs_estimator est; /* estimator */
+ struct ip_vs_cpu_stats __percpu *cpustats; /* per cpu counters */
+ spinlock_t lock; /* spin lock */
+ struct ip_vs_kstats kstats0; /* reset values */
+};
+
+struct dst_entry;
+struct iphdr;
+struct ip_vs_conn;
+struct ip_vs_app;
+struct sk_buff;
+struct ip_vs_proto_data;
+
+struct ip_vs_protocol {
+ struct ip_vs_protocol *next;
+ char *name;
+ u16 protocol;
+ u16 num_states;
+ int dont_defrag;
+
+ void (*init)(struct ip_vs_protocol *pp);
+
+ void (*exit)(struct ip_vs_protocol *pp);
+
+ int (*init_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd);
+
+ void (*exit_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd);
+
+ int (*conn_schedule)(struct netns_ipvs *ipvs,
+ int af, struct sk_buff *skb,
+ struct ip_vs_proto_data *pd,
+ int *verdict, struct ip_vs_conn **cpp,
+ struct ip_vs_iphdr *iph);
+
+ struct ip_vs_conn *
+ (*conn_in_get)(struct netns_ipvs *ipvs,
+ int af,
+ const struct sk_buff *skb,
+ const struct ip_vs_iphdr *iph);
+
+ struct ip_vs_conn *
+ (*conn_out_get)(struct netns_ipvs *ipvs,
+ int af,
+ const struct sk_buff *skb,
+ const struct ip_vs_iphdr *iph);
+
+ int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph);
+
+ int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph);
+
+ const char *(*state_name)(int state);
+
+ void (*state_transition)(struct ip_vs_conn *cp, int direction,
+ const struct sk_buff *skb,
+ struct ip_vs_proto_data *pd);
+
+ int (*register_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc);
+
+ void (*unregister_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc);
+
+ int (*app_conn_bind)(struct ip_vs_conn *cp);
+
+ void (*debug_packet)(int af, struct ip_vs_protocol *pp,
+ const struct sk_buff *skb,
+ int offset,
+ const char *msg);
+
+ void (*timeout_change)(struct ip_vs_proto_data *pd, int flags);
+};
+
+/* protocol data per netns */
+struct ip_vs_proto_data {
+ struct ip_vs_proto_data *next;
+ struct ip_vs_protocol *pp;
+ int *timeout_table; /* protocol timeout table */
+ atomic_t appcnt; /* counter of proto app incs. */
+ struct tcp_states_t *tcp_state_table;
+};
+
+struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto);
+struct ip_vs_proto_data *ip_vs_proto_data_get(struct netns_ipvs *ipvs,
+ unsigned short proto);
+
+struct ip_vs_conn_param {
+ struct netns_ipvs *ipvs;
+ const union nf_inet_addr *caddr;
+ const union nf_inet_addr *vaddr;
+ __be16 cport;
+ __be16 vport;
+ __u16 protocol;
+ u16 af;
+
+ const struct ip_vs_pe *pe;
+ char *pe_data;
+ __u8 pe_data_len;
+};
+
+/* IP_VS structure allocated for each dynamically scheduled connection */
+struct ip_vs_conn {
+ struct hlist_node c_list; /* hashed list heads */
+ /* Protocol, addresses and port numbers */
+ __be16 cport;
+ __be16 dport;
+ __be16 vport;
+ u16 af; /* address family */
+ union nf_inet_addr caddr; /* client address */
+ union nf_inet_addr vaddr; /* virtual address */
+ union nf_inet_addr daddr; /* destination address */
+ volatile __u32 flags; /* status flags */
+ __u16 protocol; /* Which protocol (TCP/UDP) */
+ __u16 daf; /* Address family of the dest */
+ struct netns_ipvs *ipvs;
+
+ /* counter and timer */
+ refcount_t refcnt; /* reference count */
+ struct timer_list timer; /* Expiration timer */
+ volatile unsigned long timeout; /* timeout */
+
+ /* Flags and state transition */
+ spinlock_t lock; /* lock for state transition */
+ volatile __u16 state; /* state info */
+ volatile __u16 old_state; /* old state, to be used for
+ * state transition triggerd
+ * synchronization
+ */
+ __u32 fwmark; /* Fire wall mark from skb */
+ unsigned long sync_endtime; /* jiffies + sent_retries */
+
+ /* Control members */
+ struct ip_vs_conn *control; /* Master control connection */
+ atomic_t n_control; /* Number of controlled ones */
+ struct ip_vs_dest *dest; /* real server */
+ atomic_t in_pkts; /* incoming packet counter */
+
+ /* Packet transmitter for different forwarding methods. If it
+ * mangles the packet, it must return NF_DROP or better NF_STOLEN,
+ * otherwise this must be changed to a sk_buff **.
+ * NF_ACCEPT can be returned when destination is local.
+ */
+ int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+
+ /* Note: we can group the following members into a structure,
+ * in order to save more space, and the following members are
+ * only used in VS/NAT anyway
+ */
+ struct ip_vs_app *app; /* bound ip_vs_app object */
+ void *app_data; /* Application private data */
+ struct_group(sync_conn_opt,
+ struct ip_vs_seq in_seq; /* incoming seq. struct */
+ struct ip_vs_seq out_seq; /* outgoing seq. struct */
+ );
+
+ const struct ip_vs_pe *pe;
+ char *pe_data;
+ __u8 pe_data_len;
+
+ struct rcu_head rcu_head;
+};
+
+/* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user
+ * for IPv6 support.
+ *
+ * We need these to conveniently pass around service and destination
+ * options, but unfortunately, we also need to keep the old definitions to
+ * maintain userspace backwards compatibility for the setsockopt interface.
+ */
+struct ip_vs_service_user_kern {
+ /* virtual service addresses */
+ u16 af;
+ u16 protocol;
+ union nf_inet_addr addr; /* virtual ip address */
+ __be16 port;
+ u32 fwmark; /* firwall mark of service */
+
+ /* virtual service options */
+ char *sched_name;
+ char *pe_name;
+ unsigned int flags; /* virtual service flags */
+ unsigned int timeout; /* persistent timeout in sec */
+ __be32 netmask; /* persistent netmask or plen */
+};
+
+
+struct ip_vs_dest_user_kern {
+ /* destination server address */
+ union nf_inet_addr addr;
+ __be16 port;
+
+ /* real server options */
+ unsigned int conn_flags; /* connection flags */
+ int weight; /* destination weight */
+
+ /* thresholds for active connections */
+ u32 u_threshold; /* upper threshold */
+ u32 l_threshold; /* lower threshold */
+
+ /* Address family of addr */
+ u16 af;
+
+ u16 tun_type; /* tunnel type */
+ __be16 tun_port; /* tunnel port */
+ u16 tun_flags; /* tunnel flags */
+};
+
+
+/*
+ * The information about the virtual service offered to the net and the
+ * forwarding entries.
+ */
+struct ip_vs_service {
+ struct hlist_node s_list; /* for normal service table */
+ struct hlist_node f_list; /* for fwmark-based service table */
+ atomic_t refcnt; /* reference counter */
+
+ u16 af; /* address family */
+ __u16 protocol; /* which protocol (TCP/UDP) */
+ union nf_inet_addr addr; /* IP address for virtual service */
+ __be16 port; /* port number for the service */
+ __u32 fwmark; /* firewall mark of the service */
+ unsigned int flags; /* service status flags */
+ unsigned int timeout; /* persistent timeout in ticks */
+ __be32 netmask; /* grouping granularity, mask/plen */
+ struct netns_ipvs *ipvs;
+
+ struct list_head destinations; /* real server d-linked list */
+ __u32 num_dests; /* number of servers */
+ struct ip_vs_stats stats; /* statistics for the service */
+
+ /* for scheduling */
+ struct ip_vs_scheduler __rcu *scheduler; /* bound scheduler object */
+ spinlock_t sched_lock; /* lock sched_data */
+ void *sched_data; /* scheduler application data */
+
+ /* alternate persistence engine */
+ struct ip_vs_pe __rcu *pe;
+ int conntrack_afmask;
+
+ struct rcu_head rcu_head;
+};
+
+/* Information for cached dst */
+struct ip_vs_dest_dst {
+ struct dst_entry *dst_cache; /* destination cache entry */
+ u32 dst_cookie;
+ union nf_inet_addr dst_saddr;
+ struct rcu_head rcu_head;
+};
+
+/* The real server destination forwarding entry with ip address, port number,
+ * and so on.
+ */
+struct ip_vs_dest {
+ struct list_head n_list; /* for the dests in the service */
+ struct hlist_node d_list; /* for table with all the dests */
+
+ u16 af; /* address family */
+ __be16 port; /* port number of the server */
+ union nf_inet_addr addr; /* IP address of the server */
+ volatile unsigned int flags; /* dest status flags */
+ atomic_t conn_flags; /* flags to copy to conn */
+ atomic_t weight; /* server weight */
+ atomic_t last_weight; /* server latest weight */
+ __u16 tun_type; /* tunnel type */
+ __be16 tun_port; /* tunnel port */
+ __u16 tun_flags; /* tunnel flags */
+
+ refcount_t refcnt; /* reference counter */
+ struct ip_vs_stats stats; /* statistics */
+ unsigned long idle_start; /* start time, jiffies */
+
+ /* connection counters and thresholds */
+ atomic_t activeconns; /* active connections */
+ atomic_t inactconns; /* inactive connections */
+ atomic_t persistconns; /* persistent connections */
+ __u32 u_threshold; /* upper threshold */
+ __u32 l_threshold; /* lower threshold */
+
+ /* for destination cache */
+ spinlock_t dst_lock; /* lock of dst_cache */
+ struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */
+
+ /* for virtual service */
+ struct ip_vs_service __rcu *svc; /* service it belongs to */
+ __u16 protocol; /* which protocol (TCP/UDP) */
+ __be16 vport; /* virtual port number */
+ union nf_inet_addr vaddr; /* virtual IP address */
+ __u32 vfwmark; /* firewall mark of service */
+
+ struct list_head t_list; /* in dest_trash */
+ unsigned int in_rs_table:1; /* we are in rs_table */
+};
+
+/* The scheduler object */
+struct ip_vs_scheduler {
+ struct list_head n_list; /* d-linked list head */
+ char *name; /* scheduler name */
+ atomic_t refcnt; /* reference counter */
+ struct module *module; /* THIS_MODULE/NULL */
+
+ /* scheduler initializing service */
+ int (*init_service)(struct ip_vs_service *svc);
+ /* scheduling service finish */
+ void (*done_service)(struct ip_vs_service *svc);
+ /* dest is linked */
+ int (*add_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest);
+ /* dest is unlinked */
+ int (*del_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest);
+ /* dest is updated */
+ int (*upd_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest);
+
+ /* selecting a server from the given service */
+ struct ip_vs_dest* (*schedule)(struct ip_vs_service *svc,
+ const struct sk_buff *skb,
+ struct ip_vs_iphdr *iph);
+};
+
+/* The persistence engine object */
+struct ip_vs_pe {
+ struct list_head n_list; /* d-linked list head */
+ char *name; /* scheduler name */
+ atomic_t refcnt; /* reference counter */
+ struct module *module; /* THIS_MODULE/NULL */
+
+ /* get the connection template, if any */
+ int (*fill_param)(struct ip_vs_conn_param *p, struct sk_buff *skb);
+ bool (*ct_match)(const struct ip_vs_conn_param *p,
+ struct ip_vs_conn *ct);
+ u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval,
+ bool inverse);
+ int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf);
+ /* create connections for real-server outgoing packets */
+ struct ip_vs_conn* (*conn_out)(struct ip_vs_service *svc,
+ struct ip_vs_dest *dest,
+ struct sk_buff *skb,
+ const struct ip_vs_iphdr *iph,
+ __be16 dport, __be16 cport);
+};
+
+/* The application module object (a.k.a. app incarnation) */
+struct ip_vs_app {
+ struct list_head a_list; /* member in app list */
+ int type; /* IP_VS_APP_TYPE_xxx */
+ char *name; /* application module name */
+ __u16 protocol;
+ struct module *module; /* THIS_MODULE/NULL */
+ struct list_head incs_list; /* list of incarnations */
+
+ /* members for application incarnations */
+ struct list_head p_list; /* member in proto app list */
+ struct ip_vs_app *app; /* its real application */
+ __be16 port; /* port number in net order */
+ atomic_t usecnt; /* usage counter */
+ struct rcu_head rcu_head;
+
+ /* output hook: Process packet in inout direction, diff set for TCP.
+ * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok,
+ * 2=Mangled but checksum was not updated
+ */
+ int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *,
+ struct sk_buff *, int *diff, struct ip_vs_iphdr *ipvsh);
+
+ /* input hook: Process packet in outin direction, diff set for TCP.
+ * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok,
+ * 2=Mangled but checksum was not updated
+ */
+ int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *,
+ struct sk_buff *, int *diff, struct ip_vs_iphdr *ipvsh);
+
+ /* ip_vs_app initializer */
+ int (*init_conn)(struct ip_vs_app *, struct ip_vs_conn *);
+
+ /* ip_vs_app finish */
+ int (*done_conn)(struct ip_vs_app *, struct ip_vs_conn *);
+
+
+ /* not used now */
+ int (*bind_conn)(struct ip_vs_app *, struct ip_vs_conn *,
+ struct ip_vs_protocol *);
+
+ void (*unbind_conn)(struct ip_vs_app *, struct ip_vs_conn *);
+
+ int * timeout_table;
+ int * timeouts;
+ int timeouts_size;
+
+ int (*conn_schedule)(struct sk_buff *skb, struct ip_vs_app *app,
+ int *verdict, struct ip_vs_conn **cpp);
+
+ struct ip_vs_conn *
+ (*conn_in_get)(const struct sk_buff *skb, struct ip_vs_app *app,
+ const struct iphdr *iph, int inverse);
+
+ struct ip_vs_conn *
+ (*conn_out_get)(const struct sk_buff *skb, struct ip_vs_app *app,
+ const struct iphdr *iph, int inverse);
+
+ int (*state_transition)(struct ip_vs_conn *cp, int direction,
+ const struct sk_buff *skb,
+ struct ip_vs_app *app);
+
+ void (*timeout_change)(struct ip_vs_app *app, int flags);
+};
+
+struct ipvs_master_sync_state {
+ struct list_head sync_queue;
+ struct ip_vs_sync_buff *sync_buff;
+ unsigned long sync_queue_len;
+ unsigned int sync_queue_delay;
+ struct delayed_work master_wakeup_work;
+ struct netns_ipvs *ipvs;
+};
+
+struct ip_vs_sync_thread_data;
+
+/* How much time to keep dests in trash */
+#define IP_VS_DEST_TRASH_PERIOD (120 * HZ)
+
+struct ipvs_sync_daemon_cfg {
+ union nf_inet_addr mcast_group;
+ int syncid;
+ u16 sync_maxlen;
+ u16 mcast_port;
+ u8 mcast_af;
+ u8 mcast_ttl;
+ /* multicast interface name */
+ char mcast_ifn[IP_VS_IFNAME_MAXLEN];
+};
+
+/* IPVS in network namespace */
+struct netns_ipvs {
+ int gen; /* Generation */
+ int enable; /* enable like nf_hooks do */
+ /* Hash table: for real service lookups */
+ #define IP_VS_RTAB_BITS 4
+ #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
+ #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
+
+ struct hlist_head rs_table[IP_VS_RTAB_SIZE];
+ /* ip_vs_app */
+ struct list_head app_list;
+ /* ip_vs_proto */
+ #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */
+ struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
+ /* ip_vs_proto_tcp */
+#ifdef CONFIG_IP_VS_PROTO_TCP
+ #define TCP_APP_TAB_BITS 4
+ #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS)
+ #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1)
+ struct list_head tcp_apps[TCP_APP_TAB_SIZE];
+#endif
+ /* ip_vs_proto_udp */
+#ifdef CONFIG_IP_VS_PROTO_UDP
+ #define UDP_APP_TAB_BITS 4
+ #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS)
+ #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1)
+ struct list_head udp_apps[UDP_APP_TAB_SIZE];
+#endif
+ /* ip_vs_proto_sctp */
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+ #define SCTP_APP_TAB_BITS 4
+ #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS)
+ #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
+ /* Hash table for SCTP application incarnations */
+ struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
+#endif
+ /* ip_vs_conn */
+ atomic_t conn_count; /* connection counter */
+
+ /* ip_vs_ctl */
+ struct ip_vs_stats tot_stats; /* Statistics & est. */
+
+ int num_services; /* no of virtual services */
+ int num_services6; /* IPv6 virtual services */
+
+ /* Trash for destinations */
+ struct list_head dest_trash;
+ spinlock_t dest_trash_lock;
+ struct timer_list dest_trash_timer; /* expiration timer */
+ /* Service counters */
+ atomic_t ftpsvc_counter;
+ atomic_t nullsvc_counter;
+ atomic_t conn_out_counter;
+
+#ifdef CONFIG_SYSCTL
+ /* delayed work for expiring no dest connections */
+ struct delayed_work expire_nodest_conn_work;
+ /* 1/rate drop and drop-entry variables */
+ struct delayed_work defense_work; /* Work handler */
+ int drop_rate;
+ int drop_counter;
+ int old_secure_tcp;
+ atomic_t dropentry;
+ /* locks in ctl.c */
+ spinlock_t dropentry_lock; /* drop entry handling */
+ spinlock_t droppacket_lock; /* drop packet handling */
+ spinlock_t securetcp_lock; /* state and timeout tables */
+
+ /* sys-ctl struct */
+ struct ctl_table_header *sysctl_hdr;
+ struct ctl_table *sysctl_tbl;
+#endif
+
+ /* sysctl variables */
+ int sysctl_amemthresh;
+ int sysctl_am_droprate;
+ int sysctl_drop_entry;
+ int sysctl_drop_packet;
+ int sysctl_secure_tcp;
+#ifdef CONFIG_IP_VS_NFCT
+ int sysctl_conntrack;
+#endif
+ int sysctl_snat_reroute;
+ int sysctl_sync_ver;
+ int sysctl_sync_ports;
+ int sysctl_sync_persist_mode;
+ unsigned long sysctl_sync_qlen_max;
+ int sysctl_sync_sock_size;
+ int sysctl_cache_bypass;
+ int sysctl_expire_nodest_conn;
+ int sysctl_sloppy_tcp;
+ int sysctl_sloppy_sctp;
+ int sysctl_expire_quiescent_template;
+ int sysctl_sync_threshold[2];
+ unsigned int sysctl_sync_refresh_period;
+ int sysctl_sync_retries;
+ int sysctl_nat_icmp_send;
+ int sysctl_pmtu_disc;
+ int sysctl_backup_only;
+ int sysctl_conn_reuse_mode;
+ int sysctl_schedule_icmp;
+ int sysctl_ignore_tunneled;
+ int sysctl_run_estimation;
+
+ /* ip_vs_lblc */
+ int sysctl_lblc_expiration;
+ struct ctl_table_header *lblc_ctl_header;
+ struct ctl_table *lblc_ctl_table;
+ /* ip_vs_lblcr */
+ int sysctl_lblcr_expiration;
+ struct ctl_table_header *lblcr_ctl_header;
+ struct ctl_table *lblcr_ctl_table;
+ /* ip_vs_est */
+ struct list_head est_list; /* estimator list */
+ spinlock_t est_lock;
+ struct timer_list est_timer; /* Estimation timer */
+ /* ip_vs_sync */
+ spinlock_t sync_lock;
+ struct ipvs_master_sync_state *ms;
+ spinlock_t sync_buff_lock;
+ struct ip_vs_sync_thread_data *master_tinfo;
+ struct ip_vs_sync_thread_data *backup_tinfo;
+ int threads_mask;
+ volatile int sync_state;
+ struct mutex sync_mutex;
+ struct ipvs_sync_daemon_cfg mcfg; /* Master Configuration */
+ struct ipvs_sync_daemon_cfg bcfg; /* Backup Configuration */
+ /* net name space ptr */
+ struct net *net; /* Needed by timer routines */
+ /* Number of heterogeneous destinations, needed becaus heterogeneous
+ * are not supported when synchronization is enabled.
+ */
+ unsigned int mixed_address_family_dests;
+ unsigned int hooks_afmask; /* &1=AF_INET, &2=AF_INET6 */
+};
+
+#define DEFAULT_SYNC_THRESHOLD 3
+#define DEFAULT_SYNC_PERIOD 50
+#define DEFAULT_SYNC_VER 1
+#define DEFAULT_SLOPPY_TCP 0
+#define DEFAULT_SLOPPY_SCTP 0
+#define DEFAULT_SYNC_REFRESH_PERIOD (0U * HZ)
+#define DEFAULT_SYNC_RETRIES 0
+#define IPVS_SYNC_WAKEUP_RATE 8
+#define IPVS_SYNC_QLEN_MAX (IPVS_SYNC_WAKEUP_RATE * 4)
+#define IPVS_SYNC_SEND_DELAY (HZ / 50)
+#define IPVS_SYNC_CHECK_PERIOD HZ
+#define IPVS_SYNC_FLUSH_TIME (HZ * 2)
+#define IPVS_SYNC_PORTS_MAX (1 << 6)
+
+#ifdef CONFIG_SYSCTL
+
+static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sync_threshold[0];
+}
+
+static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
+{
+ return READ_ONCE(ipvs->sysctl_sync_threshold[1]);
+}
+
+static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs)
+{
+ return READ_ONCE(ipvs->sysctl_sync_refresh_period);
+}
+
+static inline int sysctl_sync_retries(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sync_retries;
+}
+
+static inline int sysctl_sync_ver(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sync_ver;
+}
+
+static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sloppy_tcp;
+}
+
+static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sloppy_sctp;
+}
+
+static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
+{
+ return READ_ONCE(ipvs->sysctl_sync_ports);
+}
+
+static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sync_persist_mode;
+}
+
+static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sync_qlen_max;
+}
+
+static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sync_sock_size;
+}
+
+static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_pmtu_disc;
+}
+
+static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
+{
+ return ipvs->sync_state & IP_VS_STATE_BACKUP &&
+ ipvs->sysctl_backup_only;
+}
+
+static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_conn_reuse_mode;
+}
+
+static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_expire_nodest_conn;
+}
+
+static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_schedule_icmp;
+}
+
+static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_ignore_tunneled;
+}
+
+static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_cache_bypass;
+}
+
+static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_run_estimation;
+}
+
+#else
+
+static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SYNC_THRESHOLD;
+}
+
+static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SYNC_PERIOD;
+}
+
+static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SYNC_REFRESH_PERIOD;
+}
+
+static inline int sysctl_sync_retries(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SYNC_RETRIES & 3;
+}
+
+static inline int sysctl_sync_ver(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SYNC_VER;
+}
+
+static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SLOPPY_TCP;
+}
+
+static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SLOPPY_SCTP;
+}
+
+static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
+{
+ return 1;
+}
+
+static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs)
+{
+ return 0;
+}
+
+static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs)
+{
+ return IPVS_SYNC_QLEN_MAX;
+}
+
+static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
+{
+ return 0;
+}
+
+static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
+{
+ return 1;
+}
+
+static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
+{
+ return 0;
+}
+
+static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
+{
+ return 1;
+}
+
+static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
+{
+ return 0;
+}
+
+static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs)
+{
+ return 0;
+}
+
+static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs)
+{
+ return 0;
+}
+
+static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
+{
+ return 0;
+}
+
+static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
+{
+ return 1;
+}
+
+#endif
+
+/* IPVS core functions
+ * (from ip_vs_core.c)
+ */
+const char *ip_vs_proto_name(unsigned int proto);
+void ip_vs_init_hash_table(struct list_head *table, int rows);
+struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
+ struct ip_vs_dest *dest,
+ struct sk_buff *skb,
+ const struct ip_vs_iphdr *iph,
+ __be16 dport,
+ __be16 cport);
+#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
+
+#define IP_VS_APP_TYPE_FTP 1
+
+/* ip_vs_conn handling functions
+ * (from ip_vs_conn.c)
+ */
+enum {
+ IP_VS_DIR_INPUT = 0,
+ IP_VS_DIR_OUTPUT,
+ IP_VS_DIR_INPUT_ONLY,
+ IP_VS_DIR_LAST,
+};
+
+static inline void ip_vs_conn_fill_param(struct netns_ipvs *ipvs, int af, int protocol,
+ const union nf_inet_addr *caddr,
+ __be16 cport,
+ const union nf_inet_addr *vaddr,
+ __be16 vport,
+ struct ip_vs_conn_param *p)
+{
+ p->ipvs = ipvs;
+ p->af = af;
+ p->protocol = protocol;
+ p->caddr = caddr;
+ p->cport = cport;
+ p->vaddr = vaddr;
+ p->vport = vport;
+ p->pe = NULL;
+ p->pe_data = NULL;
+}
+
+struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p);
+struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p);
+
+struct ip_vs_conn * ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af,
+ const struct sk_buff *skb,
+ const struct ip_vs_iphdr *iph);
+
+struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p);
+
+struct ip_vs_conn * ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
+ const struct sk_buff *skb,
+ const struct ip_vs_iphdr *iph);
+
+/* Get reference to gain full access to conn.
+ * By default, RCU read-side critical sections have access only to
+ * conn fields and its PE data, see ip_vs_conn_rcu_free() for reference.
+ */
+static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp)
+{
+ return refcount_inc_not_zero(&cp->refcnt);
+}
+
+/* put back the conn without restarting its timer */
+static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
+{
+ smp_mb__before_atomic();
+ refcount_dec(&cp->refcnt);
+}
+void ip_vs_conn_put(struct ip_vs_conn *cp);
+void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
+
+struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
+ const union nf_inet_addr *daddr,
+ __be16 dport, unsigned int flags,
+ struct ip_vs_dest *dest, __u32 fwmark);
+void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
+
+const char *ip_vs_state_name(const struct ip_vs_conn *cp);
+
+void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
+int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
+void ip_vs_random_dropentry(struct netns_ipvs *ipvs);
+int ip_vs_conn_init(void);
+void ip_vs_conn_cleanup(void);
+
+static inline void ip_vs_control_del(struct ip_vs_conn *cp)
+{
+ struct ip_vs_conn *ctl_cp = cp->control;
+ if (!ctl_cp) {
+ IP_VS_ERR_BUF("request control DEL for uncontrolled: "
+ "%s:%d to %s:%d\n",
+ IP_VS_DBG_ADDR(cp->af, &cp->caddr),
+ ntohs(cp->cport),
+ IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
+ ntohs(cp->vport));
+
+ return;
+ }
+
+ IP_VS_DBG_BUF(7, "DELeting control for: "
+ "cp.dst=%s:%d ctl_cp.dst=%s:%d\n",
+ IP_VS_DBG_ADDR(cp->af, &cp->caddr),
+ ntohs(cp->cport),
+ IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr),
+ ntohs(ctl_cp->cport));
+
+ cp->control = NULL;
+ if (atomic_read(&ctl_cp->n_control) == 0) {
+ IP_VS_ERR_BUF("BUG control DEL with n=0 : "
+ "%s:%d to %s:%d\n",
+ IP_VS_DBG_ADDR(cp->af, &cp->caddr),
+ ntohs(cp->cport),
+ IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
+ ntohs(cp->vport));
+
+ return;
+ }
+ atomic_dec(&ctl_cp->n_control);
+}
+
+static inline void
+ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
+{
+ if (cp->control) {
+ IP_VS_ERR_BUF("request control ADD for already controlled: "
+ "%s:%d to %s:%d\n",
+ IP_VS_DBG_ADDR(cp->af, &cp->caddr),
+ ntohs(cp->cport),
+ IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
+ ntohs(cp->vport));
+
+ ip_vs_control_del(cp);
+ }
+
+ IP_VS_DBG_BUF(7, "ADDing control for: "
+ "cp.dst=%s:%d ctl_cp.dst=%s:%d\n",
+ IP_VS_DBG_ADDR(cp->af, &cp->caddr),
+ ntohs(cp->cport),
+ IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr),
+ ntohs(ctl_cp->cport));
+
+ cp->control = ctl_cp;
+ atomic_inc(&ctl_cp->n_control);
+}
+
+/* Mark our template as assured */
+static inline void
+ip_vs_control_assure_ct(struct ip_vs_conn *cp)
+{
+ struct ip_vs_conn *ct = cp->control;
+
+ if (ct && !(ct->state & IP_VS_CTPL_S_ASSURED) &&
+ (ct->flags & IP_VS_CONN_F_TEMPLATE))
+ ct->state |= IP_VS_CTPL_S_ASSURED;
+}
+
+/* IPVS netns init & cleanup functions */
+int ip_vs_estimator_net_init(struct netns_ipvs *ipvs);
+int ip_vs_control_net_init(struct netns_ipvs *ipvs);
+int ip_vs_protocol_net_init(struct netns_ipvs *ipvs);
+int ip_vs_app_net_init(struct netns_ipvs *ipvs);
+int ip_vs_conn_net_init(struct netns_ipvs *ipvs);
+int ip_vs_sync_net_init(struct netns_ipvs *ipvs);
+void ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_app_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_control_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_service_nets_cleanup(struct list_head *net_list);
+
+/* IPVS application functions
+ * (from ip_vs_app.c)
+ */
+#define IP_VS_APP_MAX_PORTS 8
+struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app);
+void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app);
+int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+void ip_vs_unbind_app(struct ip_vs_conn *cp);
+int register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto,
+ __u16 port);
+int ip_vs_app_inc_get(struct ip_vs_app *inc);
+void ip_vs_app_inc_put(struct ip_vs_app *inc);
+
+int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb,
+ struct ip_vs_iphdr *ipvsh);
+int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb,
+ struct ip_vs_iphdr *ipvsh);
+
+int register_ip_vs_pe(struct ip_vs_pe *pe);
+int unregister_ip_vs_pe(struct ip_vs_pe *pe);
+struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
+struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
+
+/* Use a #define to avoid all of module.h just for these trivial ops */
+#define ip_vs_pe_get(pe) \
+ if (pe && pe->module) \
+ __module_get(pe->module);
+
+#define ip_vs_pe_put(pe) \
+ if (pe && pe->module) \
+ module_put(pe->module);
+
+/* IPVS protocol functions (from ip_vs_proto.c) */
+int ip_vs_protocol_init(void);
+void ip_vs_protocol_cleanup(void);
+void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
+int *ip_vs_create_timeout_table(int *table, int size);
+void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
+ const struct sk_buff *skb, int offset,
+ const char *msg);
+
+extern struct ip_vs_protocol ip_vs_protocol_tcp;
+extern struct ip_vs_protocol ip_vs_protocol_udp;
+extern struct ip_vs_protocol ip_vs_protocol_icmp;
+extern struct ip_vs_protocol ip_vs_protocol_esp;
+extern struct ip_vs_protocol ip_vs_protocol_ah;
+extern struct ip_vs_protocol ip_vs_protocol_sctp;
+
+/* Registering/unregistering scheduler functions
+ * (from ip_vs_sched.c)
+ */
+int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
+int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
+int ip_vs_bind_scheduler(struct ip_vs_service *svc,
+ struct ip_vs_scheduler *scheduler);
+void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
+ struct ip_vs_scheduler *sched);
+struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
+void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
+struct ip_vs_conn *
+ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
+ struct ip_vs_proto_data *pd, int *ignored,
+ struct ip_vs_iphdr *iph);
+int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
+ struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph);
+
+void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
+
+/* IPVS control data and functions (from ip_vs_ctl.c) */
+extern struct ip_vs_stats ip_vs_stats;
+extern int sysctl_ip_vs_sync_ver;
+
+struct ip_vs_service *
+ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol,
+ const union nf_inet_addr *vaddr, __be16 vport);
+
+bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
+ const union nf_inet_addr *daddr, __be16 dport);
+
+struct ip_vs_dest *
+ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
+ const union nf_inet_addr *daddr, __be16 dport);
+struct ip_vs_dest *ip_vs_find_tunnel(struct netns_ipvs *ipvs, int af,
+ const union nf_inet_addr *daddr,
+ __be16 tun_port);
+
+int ip_vs_use_count_inc(void);
+void ip_vs_use_count_dec(void);
+int ip_vs_register_nl_ioctl(void);
+void ip_vs_unregister_nl_ioctl(void);
+int ip_vs_control_init(void);
+void ip_vs_control_cleanup(void);
+struct ip_vs_dest *
+ip_vs_find_dest(struct netns_ipvs *ipvs, int svc_af, int dest_af,
+ const union nf_inet_addr *daddr, __be16 dport,
+ const union nf_inet_addr *vaddr, __be16 vport,
+ __u16 protocol, __u32 fwmark, __u32 flags);
+void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
+
+static inline void ip_vs_dest_hold(struct ip_vs_dest *dest)
+{
+ refcount_inc(&dest->refcnt);
+}
+
+static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
+{
+ smp_mb__before_atomic();
+ refcount_dec(&dest->refcnt);
+}
+
+static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
+{
+ if (refcount_dec_and_test(&dest->refcnt))
+ kfree(dest);
+}
+
+/* IPVS sync daemon data and function prototypes
+ * (from ip_vs_sync.c)
+ */
+int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *cfg,
+ int state);
+int stop_sync_thread(struct netns_ipvs *ipvs, int state);
+void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts);
+
+/* IPVS rate estimator prototypes (from ip_vs_est.c) */
+void ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats);
+void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats);
+void ip_vs_zero_estimator(struct ip_vs_stats *stats);
+void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats);
+
+/* Various IPVS packet transmitters (from ip_vs_xmit.c) */
+int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, int offset,
+ unsigned int hooknum, struct ip_vs_iphdr *iph);
+void ip_vs_dest_dst_rcu_free(struct rcu_head *head);
+
+#ifdef CONFIG_IP_VS_IPV6
+int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, int offset,
+ unsigned int hooknum, struct ip_vs_iphdr *iph);
+#endif
+
+#ifdef CONFIG_SYSCTL
+/* This is a simple mechanism to ignore packets when
+ * we are loaded. Just set ip_vs_drop_rate to 'n' and
+ * we start to drop 1/rate of the packets
+ */
+static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
+{
+ if (!ipvs->drop_rate)
+ return 0;
+ if (--ipvs->drop_counter > 0)
+ return 0;
+ ipvs->drop_counter = ipvs->drop_rate;
+ return 1;
+}
+#else
+static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; }
+#endif
+
+#ifdef CONFIG_SYSCTL
+/* Enqueue delayed work for expiring no dest connections
+ * Only run when sysctl_expire_nodest=1
+ */
+static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs)
+{
+ if (sysctl_expire_nodest_conn(ipvs))
+ queue_delayed_work(system_long_wq,
+ &ipvs->expire_nodest_conn_work, 1);
+}
+
+void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs);
+#else
+static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs) {}
+#endif
+
+#define IP_VS_DFWD_METHOD(dest) (atomic_read(&(dest)->conn_flags) & \
+ IP_VS_CONN_F_FWD_MASK)
+
+/* ip_vs_fwd_tag returns the forwarding tag of the connection */
+#define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK)
+
+static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
+{
+ char fwd;
+
+ switch (IP_VS_FWD_METHOD(cp)) {
+ case IP_VS_CONN_F_MASQ:
+ fwd = 'M'; break;
+ case IP_VS_CONN_F_LOCALNODE:
+ fwd = 'L'; break;
+ case IP_VS_CONN_F_TUNNEL:
+ fwd = 'T'; break;
+ case IP_VS_CONN_F_DROUTE:
+ fwd = 'R'; break;
+ case IP_VS_CONN_F_BYPASS:
+ fwd = 'B'; break;
+ default:
+ fwd = '?'; break;
+ }
+ return fwd;
+}
+
+void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, int dir);
+
+#ifdef CONFIG_IP_VS_IPV6
+void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, int dir);
+#endif
+
+__sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
+
+static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum)
+{
+ __be32 diff[2] = { ~old, new };
+
+ return csum_partial(diff, sizeof(diff), oldsum);
+}
+
+#ifdef CONFIG_IP_VS_IPV6
+static inline __wsum ip_vs_check_diff16(const __be32 *old, const __be32 *new,
+ __wsum oldsum)
+{
+ __be32 diff[8] = { ~old[3], ~old[2], ~old[1], ~old[0],
+ new[3], new[2], new[1], new[0] };
+
+ return csum_partial(diff, sizeof(diff), oldsum);
+}
+#endif
+
+static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum)
+{
+ __be16 diff[2] = { ~old, new };
+
+ return csum_partial(diff, sizeof(diff), oldsum);
+}
+
+/* Forget current conntrack (unconfirmed) and attach notrack entry */
+static inline void ip_vs_notrack(struct sk_buff *skb)
+{
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+
+ if (ct) {
+ nf_conntrack_put(&ct->ct_general);
+ nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
+ }
+#endif
+}
+
+#ifdef CONFIG_IP_VS_NFCT
+/* Netfilter connection tracking
+ * (from ip_vs_nfct.c)
+ */
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
+{
+#ifdef CONFIG_SYSCTL
+ return ipvs->sysctl_conntrack;
+#else
+ return 0;
+#endif
+}
+
+void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
+ int outin);
+int ip_vs_confirm_conntrack(struct sk_buff *skb);
+void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
+ struct ip_vs_conn *cp, u_int8_t proto,
+ const __be16 port, int from_rs);
+void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
+
+#else
+
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
+{
+ return 0;
+}
+
+static inline void ip_vs_update_conntrack(struct sk_buff *skb,
+ struct ip_vs_conn *cp, int outin)
+{
+}
+
+static inline int ip_vs_confirm_conntrack(struct sk_buff *skb)
+{
+ return NF_ACCEPT;
+}
+
+static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
+{
+}
+#endif /* CONFIG_IP_VS_NFCT */
+
+/* Using old conntrack that can not be redirected to another real server? */
+static inline bool ip_vs_conn_uses_old_conntrack(struct ip_vs_conn *cp,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_IP_VS_NFCT
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct && nf_ct_is_confirmed(ct))
+ return true;
+#endif
+ return false;
+}
+
+static inline int ip_vs_register_conntrack(struct ip_vs_service *svc)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ int afmask = (svc->af == AF_INET6) ? 2 : 1;
+ int ret = 0;
+
+ if (!(svc->conntrack_afmask & afmask)) {
+ ret = nf_ct_netns_get(svc->ipvs->net, svc->af);
+ if (ret >= 0)
+ svc->conntrack_afmask |= afmask;
+ }
+ return ret;
+#else
+ return 0;
+#endif
+}
+
+static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ int afmask = (svc->af == AF_INET6) ? 2 : 1;
+
+ if (svc->conntrack_afmask & afmask) {
+ nf_ct_netns_put(svc->ipvs->net, svc->af);
+ svc->conntrack_afmask &= ~afmask;
+ }
+#endif
+}
+
+int ip_vs_register_hooks(struct netns_ipvs *ipvs, unsigned int af);
+void ip_vs_unregister_hooks(struct netns_ipvs *ipvs, unsigned int af);
+
+static inline int
+ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
+{
+ /* We think the overhead of processing active connections is 256
+ * times higher than that of inactive connections in average. (This
+ * 256 times might not be accurate, we will change it later) We
+ * use the following formula to estimate the overhead now:
+ * dest->activeconns*256 + dest->inactconns
+ */
+ return (atomic_read(&dest->activeconns) << 8) +
+ atomic_read(&dest->inactconns);
+}
+
+#ifdef CONFIG_IP_VS_PROTO_TCP
+INDIRECT_CALLABLE_DECLARE(int
+ tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph));
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_UDP
+INDIRECT_CALLABLE_DECLARE(int
+ udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph));
+#endif
+#endif /* _NET_IP_VS_H */
diff --git a/include/net/ipcomp.h b/include/net/ipcomp.h
new file mode 100644
index 000000000..8660a2a6d
--- /dev/null
+++ b/include/net/ipcomp.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_IPCOMP_H
+#define _NET_IPCOMP_H
+
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#define IPCOMP_SCRATCH_SIZE 65400
+
+struct crypto_comp;
+struct ip_comp_hdr;
+
+struct ipcomp_data {
+ u16 threshold;
+ struct crypto_comp * __percpu *tfms;
+};
+
+struct ip_comp_hdr;
+struct sk_buff;
+struct xfrm_state;
+
+int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb);
+int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb);
+void ipcomp_destroy(struct xfrm_state *x);
+int ipcomp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack);
+
+static inline struct ip_comp_hdr *ip_comp_hdr(const struct sk_buff *skb)
+{
+ return (struct ip_comp_hdr *)skb_transport_header(skb);
+}
+
+#endif
diff --git a/include/net/ipconfig.h b/include/net/ipconfig.h
new file mode 100644
index 000000000..8276897d0
--- /dev/null
+++ b/include/net/ipconfig.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1997 Martin Mares
+ *
+ * Automatic IP Layer Configuration
+ */
+
+/* The following are initdata: */
+
+#include <linux/types.h>
+
+extern int ic_proto_enabled; /* Protocols enabled (see IC_xxx) */
+extern int ic_set_manually; /* IPconfig parameters set manually */
+
+extern __be32 ic_myaddr; /* My IP address */
+extern __be32 ic_gateway; /* Gateway IP address */
+
+extern __be32 ic_servaddr; /* Boot server IP address */
+
+extern __be32 root_server_addr; /* Address of NFS server */
+extern u8 root_server_path[]; /* Path to mount as root */
+
+
+/* bits in ic_proto_{enabled,used} */
+#define IC_PROTO 0xFF /* Protocols mask: */
+#define IC_BOOTP 0x01 /* BOOTP (or DHCP, see below) */
+#define IC_RARP 0x02 /* RARP */
+#define IC_USE_DHCP 0x100 /* If on, use DHCP instead of BOOTP */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
new file mode 100644
index 000000000..517bdae78
--- /dev/null
+++ b/include/net/ipv6.h
@@ -0,0 +1,1347 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Linux INET6 implementation
+ *
+ * Authors:
+ * Pedro Roque <roque@di.fc.ul.pt>
+ */
+
+#ifndef _NET_IPV6_H
+#define _NET_IPV6_H
+
+#include <linux/ipv6.h>
+#include <linux/hardirq.h>
+#include <linux/jhash.h>
+#include <linux/refcount.h>
+#include <linux/jump_label_ratelimit.h>
+#include <net/if_inet6.h>
+#include <net/flow.h>
+#include <net/flow_dissector.h>
+#include <net/inet_dscp.h>
+#include <net/snmp.h>
+#include <net/netns/hash.h>
+
+struct ip_tunnel_info;
+
+#define SIN6_LEN_RFC2133 24
+
+#define IPV6_MAXPLEN 65535
+
+/*
+ * NextHeader field of IPv6 header
+ */
+
+#define NEXTHDR_HOP 0 /* Hop-by-hop option header. */
+#define NEXTHDR_IPV4 4 /* IPv4 in IPv6 */
+#define NEXTHDR_TCP 6 /* TCP segment. */
+#define NEXTHDR_UDP 17 /* UDP message. */
+#define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */
+#define NEXTHDR_ROUTING 43 /* Routing header. */
+#define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */
+#define NEXTHDR_GRE 47 /* GRE header. */
+#define NEXTHDR_ESP 50 /* Encapsulating security payload. */
+#define NEXTHDR_AUTH 51 /* Authentication header. */
+#define NEXTHDR_ICMP 58 /* ICMP for IPv6. */
+#define NEXTHDR_NONE 59 /* No next header */
+#define NEXTHDR_DEST 60 /* Destination options header. */
+#define NEXTHDR_SCTP 132 /* SCTP message. */
+#define NEXTHDR_MOBILITY 135 /* Mobility header. */
+
+#define NEXTHDR_MAX 255
+
+#define IPV6_DEFAULT_HOPLIMIT 64
+#define IPV6_DEFAULT_MCASTHOPS 1
+
+/* Limits on Hop-by-Hop and Destination options.
+ *
+ * Per RFC8200 there is no limit on the maximum number or lengths of options in
+ * Hop-by-Hop or Destination options other then the packet must fit in an MTU.
+ * We allow configurable limits in order to mitigate potential denial of
+ * service attacks.
+ *
+ * There are three limits that may be set:
+ * - Limit the number of options in a Hop-by-Hop or Destination options
+ * extension header
+ * - Limit the byte length of a Hop-by-Hop or Destination options extension
+ * header
+ * - Disallow unknown options
+ *
+ * The limits are expressed in corresponding sysctls:
+ *
+ * ipv6.sysctl.max_dst_opts_cnt
+ * ipv6.sysctl.max_hbh_opts_cnt
+ * ipv6.sysctl.max_dst_opts_len
+ * ipv6.sysctl.max_hbh_opts_len
+ *
+ * max_*_opts_cnt is the number of TLVs that are allowed for Destination
+ * options or Hop-by-Hop options. If the number is less than zero then unknown
+ * TLVs are disallowed and the number of known options that are allowed is the
+ * absolute value. Setting the value to INT_MAX indicates no limit.
+ *
+ * max_*_opts_len is the length limit in bytes of a Destination or
+ * Hop-by-Hop options extension header. Setting the value to INT_MAX
+ * indicates no length limit.
+ *
+ * If a limit is exceeded when processing an extension header the packet is
+ * silently discarded.
+ */
+
+/* Default limits for Hop-by-Hop and Destination options */
+#define IP6_DEFAULT_MAX_DST_OPTS_CNT 8
+#define IP6_DEFAULT_MAX_HBH_OPTS_CNT 8
+#define IP6_DEFAULT_MAX_DST_OPTS_LEN INT_MAX /* No limit */
+#define IP6_DEFAULT_MAX_HBH_OPTS_LEN INT_MAX /* No limit */
+
+/*
+ * Addr type
+ *
+ * type - unicast | multicast
+ * scope - local | site | global
+ * v4 - compat
+ * v4mapped
+ * any
+ * loopback
+ */
+
+#define IPV6_ADDR_ANY 0x0000U
+
+#define IPV6_ADDR_UNICAST 0x0001U
+#define IPV6_ADDR_MULTICAST 0x0002U
+
+#define IPV6_ADDR_LOOPBACK 0x0010U
+#define IPV6_ADDR_LINKLOCAL 0x0020U
+#define IPV6_ADDR_SITELOCAL 0x0040U
+
+#define IPV6_ADDR_COMPATv4 0x0080U
+
+#define IPV6_ADDR_SCOPE_MASK 0x00f0U
+
+#define IPV6_ADDR_MAPPED 0x1000U
+
+/*
+ * Addr scopes
+ */
+#define IPV6_ADDR_MC_SCOPE(a) \
+ ((a)->s6_addr[1] & 0x0f) /* nonstandard */
+#define __IPV6_ADDR_SCOPE_INVALID -1
+#define IPV6_ADDR_SCOPE_NODELOCAL 0x01
+#define IPV6_ADDR_SCOPE_LINKLOCAL 0x02
+#define IPV6_ADDR_SCOPE_SITELOCAL 0x05
+#define IPV6_ADDR_SCOPE_ORGLOCAL 0x08
+#define IPV6_ADDR_SCOPE_GLOBAL 0x0e
+
+/*
+ * Addr flags
+ */
+#define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \
+ ((a)->s6_addr[1] & 0x10)
+#define IPV6_ADDR_MC_FLAG_PREFIX(a) \
+ ((a)->s6_addr[1] & 0x20)
+#define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \
+ ((a)->s6_addr[1] & 0x40)
+
+/*
+ * fragmentation header
+ */
+
+struct frag_hdr {
+ __u8 nexthdr;
+ __u8 reserved;
+ __be16 frag_off;
+ __be32 identification;
+};
+
+/*
+ * Jumbo payload option, as described in RFC 2675 2.
+ */
+struct hop_jumbo_hdr {
+ u8 nexthdr;
+ u8 hdrlen;
+ u8 tlv_type; /* IPV6_TLV_JUMBO, 0xC2 */
+ u8 tlv_len; /* 4 */
+ __be32 jumbo_payload_len;
+};
+
+#define IP6_MF 0x0001
+#define IP6_OFFSET 0xFFF8
+
+struct ip6_fraglist_iter {
+ struct ipv6hdr *tmp_hdr;
+ struct sk_buff *frag;
+ int offset;
+ unsigned int hlen;
+ __be32 frag_id;
+ u8 nexthdr;
+};
+
+int ip6_fraglist_init(struct sk_buff *skb, unsigned int hlen, u8 *prevhdr,
+ u8 nexthdr, __be32 frag_id,
+ struct ip6_fraglist_iter *iter);
+void ip6_fraglist_prepare(struct sk_buff *skb, struct ip6_fraglist_iter *iter);
+
+static inline struct sk_buff *ip6_fraglist_next(struct ip6_fraglist_iter *iter)
+{
+ struct sk_buff *skb = iter->frag;
+
+ iter->frag = skb->next;
+ skb_mark_not_on_list(skb);
+
+ return skb;
+}
+
+struct ip6_frag_state {
+ u8 *prevhdr;
+ unsigned int hlen;
+ unsigned int mtu;
+ unsigned int left;
+ int offset;
+ int ptr;
+ int hroom;
+ int troom;
+ __be32 frag_id;
+ u8 nexthdr;
+};
+
+void ip6_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int mtu,
+ unsigned short needed_tailroom, int hdr_room, u8 *prevhdr,
+ u8 nexthdr, __be32 frag_id, struct ip6_frag_state *state);
+struct sk_buff *ip6_frag_next(struct sk_buff *skb,
+ struct ip6_frag_state *state);
+
+#define IP6_REPLY_MARK(net, mark) \
+ ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0)
+
+#include <net/sock.h>
+
+/* sysctls */
+extern int sysctl_mld_max_msf;
+extern int sysctl_mld_qrv;
+
+#define _DEVINC(net, statname, mod, idev, field) \
+({ \
+ struct inet6_dev *_idev = (idev); \
+ if (likely(_idev != NULL)) \
+ mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\
+ mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\
+})
+
+/* per device counters are atomic_long_t */
+#define _DEVINCATOMIC(net, statname, mod, idev, field) \
+({ \
+ struct inet6_dev *_idev = (idev); \
+ if (likely(_idev != NULL)) \
+ SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
+ mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\
+})
+
+/* per device and per net counters are atomic_long_t */
+#define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field) \
+({ \
+ struct inet6_dev *_idev = (idev); \
+ if (likely(_idev != NULL)) \
+ SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
+ SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
+})
+
+#define _DEVADD(net, statname, mod, idev, field, val) \
+({ \
+ struct inet6_dev *_idev = (idev); \
+ if (likely(_idev != NULL)) \
+ mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \
+ mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\
+})
+
+#define _DEVUPD(net, statname, mod, idev, field, val) \
+({ \
+ struct inet6_dev *_idev = (idev); \
+ if (likely(_idev != NULL)) \
+ mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \
+ mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\
+})
+
+/* MIBs */
+
+#define IP6_INC_STATS(net, idev,field) \
+ _DEVINC(net, ipv6, , idev, field)
+#define __IP6_INC_STATS(net, idev,field) \
+ _DEVINC(net, ipv6, __, idev, field)
+#define IP6_ADD_STATS(net, idev,field,val) \
+ _DEVADD(net, ipv6, , idev, field, val)
+#define __IP6_ADD_STATS(net, idev,field,val) \
+ _DEVADD(net, ipv6, __, idev, field, val)
+#define IP6_UPD_PO_STATS(net, idev,field,val) \
+ _DEVUPD(net, ipv6, , idev, field, val)
+#define __IP6_UPD_PO_STATS(net, idev,field,val) \
+ _DEVUPD(net, ipv6, __, idev, field, val)
+#define ICMP6_INC_STATS(net, idev, field) \
+ _DEVINCATOMIC(net, icmpv6, , idev, field)
+#define __ICMP6_INC_STATS(net, idev, field) \
+ _DEVINCATOMIC(net, icmpv6, __, idev, field)
+
+#define ICMP6MSGOUT_INC_STATS(net, idev, field) \
+ _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
+#define ICMP6MSGIN_INC_STATS(net, idev, field) \
+ _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
+
+struct ip6_ra_chain {
+ struct ip6_ra_chain *next;
+ struct sock *sk;
+ int sel;
+ void (*destructor)(struct sock *);
+};
+
+extern struct ip6_ra_chain *ip6_ra_chain;
+extern rwlock_t ip6_ra_lock;
+
+/*
+ This structure is prepared by protocol, when parsing
+ ancillary data and passed to IPv6.
+ */
+
+struct ipv6_txoptions {
+ refcount_t refcnt;
+ /* Length of this structure */
+ int tot_len;
+
+ /* length of extension headers */
+
+ __u16 opt_flen; /* after fragment hdr */
+ __u16 opt_nflen; /* before fragment hdr */
+
+ struct ipv6_opt_hdr *hopopt;
+ struct ipv6_opt_hdr *dst0opt;
+ struct ipv6_rt_hdr *srcrt; /* Routing Header */
+ struct ipv6_opt_hdr *dst1opt;
+ struct rcu_head rcu;
+ /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
+};
+
+/* flowlabel_reflect sysctl values */
+enum flowlabel_reflect {
+ FLOWLABEL_REFLECT_ESTABLISHED = 1,
+ FLOWLABEL_REFLECT_TCP_RESET = 2,
+ FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4,
+};
+
+struct ip6_flowlabel {
+ struct ip6_flowlabel __rcu *next;
+ __be32 label;
+ atomic_t users;
+ struct in6_addr dst;
+ struct ipv6_txoptions *opt;
+ unsigned long linger;
+ struct rcu_head rcu;
+ u8 share;
+ union {
+ struct pid *pid;
+ kuid_t uid;
+ } owner;
+ unsigned long lastuse;
+ unsigned long expires;
+ struct net *fl_net;
+};
+
+#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
+#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
+#define IPV6_FLOWLABEL_STATELESS_FLAG cpu_to_be32(0x00080000)
+
+#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
+#define IPV6_TCLASS_SHIFT 20
+
+struct ipv6_fl_socklist {
+ struct ipv6_fl_socklist __rcu *next;
+ struct ip6_flowlabel *fl;
+ struct rcu_head rcu;
+};
+
+struct ipcm6_cookie {
+ struct sockcm_cookie sockc;
+ __s16 hlimit;
+ __s16 tclass;
+ __u16 gso_size;
+ __s8 dontfrag;
+ struct ipv6_txoptions *opt;
+};
+
+static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
+{
+ *ipc6 = (struct ipcm6_cookie) {
+ .hlimit = -1,
+ .tclass = -1,
+ .dontfrag = -1,
+ };
+}
+
+static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
+ const struct ipv6_pinfo *np)
+{
+ *ipc6 = (struct ipcm6_cookie) {
+ .hlimit = -1,
+ .tclass = np->tclass,
+ .dontfrag = np->dontfrag,
+ };
+}
+
+static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
+{
+ struct ipv6_txoptions *opt;
+
+ rcu_read_lock();
+ opt = rcu_dereference(np->opt);
+ if (opt) {
+ if (!refcount_inc_not_zero(&opt->refcnt))
+ opt = NULL;
+ else
+ opt = rcu_pointer_handoff(opt);
+ }
+ rcu_read_unlock();
+ return opt;
+}
+
+static inline void txopt_put(struct ipv6_txoptions *opt)
+{
+ if (opt && refcount_dec_and_test(&opt->refcnt))
+ kfree_rcu(opt, rcu);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label);
+
+extern struct static_key_false_deferred ipv6_flowlabel_exclusive;
+static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk,
+ __be32 label)
+{
+ if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) &&
+ READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl))
+ return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT);
+
+ return NULL;
+}
+#endif
+
+struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
+ struct ip6_flowlabel *fl,
+ struct ipv6_txoptions *fopt);
+void fl6_free_socklist(struct sock *sk);
+int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen);
+int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
+ int flags);
+int ip6_flowlabel_init(void);
+void ip6_flowlabel_cleanup(void);
+bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
+
+static inline void fl6_sock_release(struct ip6_flowlabel *fl)
+{
+ if (fl)
+ atomic_dec(&fl->users);
+}
+
+void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
+
+void icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+ struct icmp6hdr *thdr, int len);
+
+int ip6_ra_control(struct sock *sk, int sel);
+
+int ipv6_parse_hopopts(struct sk_buff *skb);
+
+struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
+ struct ipv6_txoptions *opt);
+struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
+ struct ipv6_txoptions *opt,
+ int newtype,
+ struct ipv6_opt_hdr *newopt);
+struct ipv6_txoptions *__ipv6_fixup_options(struct ipv6_txoptions *opt_space,
+ struct ipv6_txoptions *opt);
+
+static inline struct ipv6_txoptions *
+ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt)
+{
+ if (!opt)
+ return NULL;
+ return __ipv6_fixup_options(opt_space, opt);
+}
+
+bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
+ const struct inet6_skb_parm *opt);
+struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
+ struct ipv6_txoptions *opt);
+
+/* This helper is specialized for BIG TCP needs.
+ * It assumes the hop_jumbo_hdr will immediately follow the IPV6 header.
+ * It assumes headers are already in skb->head.
+ * Returns 0, or IPPROTO_TCP if a BIG TCP packet is there.
+ */
+static inline int ipv6_has_hopopt_jumbo(const struct sk_buff *skb)
+{
+ const struct hop_jumbo_hdr *jhdr;
+ const struct ipv6hdr *nhdr;
+
+ if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
+ return 0;
+
+ if (skb->protocol != htons(ETH_P_IPV6))
+ return 0;
+
+ if (skb_network_offset(skb) +
+ sizeof(struct ipv6hdr) +
+ sizeof(struct hop_jumbo_hdr) > skb_headlen(skb))
+ return 0;
+
+ nhdr = ipv6_hdr(skb);
+
+ if (nhdr->nexthdr != NEXTHDR_HOP)
+ return 0;
+
+ jhdr = (const struct hop_jumbo_hdr *) (nhdr + 1);
+ if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
+ jhdr->nexthdr != IPPROTO_TCP)
+ return 0;
+ return jhdr->nexthdr;
+}
+
+static inline bool ipv6_accept_ra(struct inet6_dev *idev)
+{
+ /* If forwarding is enabled, RA are not accepted unless the special
+ * hybrid mode (accept_ra=2) is enabled.
+ */
+ return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 :
+ idev->cnf.accept_ra;
+}
+
+#define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
+#define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */
+#define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */
+
+int __ipv6_addr_type(const struct in6_addr *addr);
+static inline int ipv6_addr_type(const struct in6_addr *addr)
+{
+ return __ipv6_addr_type(addr) & 0xffff;
+}
+
+static inline int ipv6_addr_scope(const struct in6_addr *addr)
+{
+ return __ipv6_addr_type(addr) & IPV6_ADDR_SCOPE_MASK;
+}
+
+static inline int __ipv6_addr_src_scope(int type)
+{
+ return (type == IPV6_ADDR_ANY) ? __IPV6_ADDR_SCOPE_INVALID : (type >> 16);
+}
+
+static inline int ipv6_addr_src_scope(const struct in6_addr *addr)
+{
+ return __ipv6_addr_src_scope(__ipv6_addr_type(addr));
+}
+
+static inline bool __ipv6_addr_needs_scope_id(int type)
+{
+ return type & IPV6_ADDR_LINKLOCAL ||
+ (type & IPV6_ADDR_MULTICAST &&
+ (type & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)));
+}
+
+static inline __u32 ipv6_iface_scope_id(const struct in6_addr *addr, int iface)
+{
+ return __ipv6_addr_needs_scope_id(__ipv6_addr_type(addr)) ? iface : 0;
+}
+
+static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2)
+{
+ return memcmp(a1, a2, sizeof(struct in6_addr));
+}
+
+static inline bool
+ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
+ const struct in6_addr *a2)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul1 = (const unsigned long *)a1;
+ const unsigned long *ulm = (const unsigned long *)m;
+ const unsigned long *ul2 = (const unsigned long *)a2;
+
+ return !!(((ul1[0] ^ ul2[0]) & ulm[0]) |
+ ((ul1[1] ^ ul2[1]) & ulm[1]));
+#else
+ return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) |
+ ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) |
+ ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) |
+ ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]));
+#endif
+}
+
+static inline void ipv6_addr_prefix(struct in6_addr *pfx,
+ const struct in6_addr *addr,
+ int plen)
+{
+ /* caller must guarantee 0 <= plen <= 128 */
+ int o = plen >> 3,
+ b = plen & 0x7;
+
+ memset(pfx->s6_addr, 0, sizeof(pfx->s6_addr));
+ memcpy(pfx->s6_addr, addr, o);
+ if (b != 0)
+ pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b);
+}
+
+static inline void ipv6_addr_prefix_copy(struct in6_addr *addr,
+ const struct in6_addr *pfx,
+ int plen)
+{
+ /* caller must guarantee 0 <= plen <= 128 */
+ int o = plen >> 3,
+ b = plen & 0x7;
+
+ memcpy(addr->s6_addr, pfx, o);
+ if (b != 0) {
+ addr->s6_addr[o] &= ~(0xff00 >> b);
+ addr->s6_addr[o] |= (pfx->s6_addr[o] & (0xff00 >> b));
+ }
+}
+
+static inline void __ipv6_addr_set_half(__be32 *addr,
+ __be32 wh, __be32 wl)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+#if defined(__BIG_ENDIAN)
+ if (__builtin_constant_p(wh) && __builtin_constant_p(wl)) {
+ *(__force u64 *)addr = ((__force u64)(wh) << 32 | (__force u64)(wl));
+ return;
+ }
+#elif defined(__LITTLE_ENDIAN)
+ if (__builtin_constant_p(wl) && __builtin_constant_p(wh)) {
+ *(__force u64 *)addr = ((__force u64)(wl) << 32 | (__force u64)(wh));
+ return;
+ }
+#endif
+#endif
+ addr[0] = wh;
+ addr[1] = wl;
+}
+
+static inline void ipv6_addr_set(struct in6_addr *addr,
+ __be32 w1, __be32 w2,
+ __be32 w3, __be32 w4)
+{
+ __ipv6_addr_set_half(&addr->s6_addr32[0], w1, w2);
+ __ipv6_addr_set_half(&addr->s6_addr32[2], w3, w4);
+}
+
+static inline bool ipv6_addr_equal(const struct in6_addr *a1,
+ const struct in6_addr *a2)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul1 = (const unsigned long *)a1;
+ const unsigned long *ul2 = (const unsigned long *)a2;
+
+ return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL;
+#else
+ return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) |
+ (a1->s6_addr32[1] ^ a2->s6_addr32[1]) |
+ (a1->s6_addr32[2] ^ a2->s6_addr32[2]) |
+ (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0;
+#endif
+}
+
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+static inline bool __ipv6_prefix_equal64_half(const __be64 *a1,
+ const __be64 *a2,
+ unsigned int len)
+{
+ if (len && ((*a1 ^ *a2) & cpu_to_be64((~0UL) << (64 - len))))
+ return false;
+ return true;
+}
+
+static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
+ const struct in6_addr *addr2,
+ unsigned int prefixlen)
+{
+ const __be64 *a1 = (const __be64 *)addr1;
+ const __be64 *a2 = (const __be64 *)addr2;
+
+ if (prefixlen >= 64) {
+ if (a1[0] ^ a2[0])
+ return false;
+ return __ipv6_prefix_equal64_half(a1 + 1, a2 + 1, prefixlen - 64);
+ }
+ return __ipv6_prefix_equal64_half(a1, a2, prefixlen);
+}
+#else
+static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
+ const struct in6_addr *addr2,
+ unsigned int prefixlen)
+{
+ const __be32 *a1 = addr1->s6_addr32;
+ const __be32 *a2 = addr2->s6_addr32;
+ unsigned int pdw, pbi;
+
+ /* check complete u32 in prefix */
+ pdw = prefixlen >> 5;
+ if (pdw && memcmp(a1, a2, pdw << 2))
+ return false;
+
+ /* check incomplete u32 in prefix */
+ pbi = prefixlen & 0x1f;
+ if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi))))
+ return false;
+
+ return true;
+}
+#endif
+
+static inline bool ipv6_addr_any(const struct in6_addr *a)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul = (const unsigned long *)a;
+
+ return (ul[0] | ul[1]) == 0UL;
+#else
+ return (a->s6_addr32[0] | a->s6_addr32[1] |
+ a->s6_addr32[2] | a->s6_addr32[3]) == 0;
+#endif
+}
+
+static inline u32 ipv6_addr_hash(const struct in6_addr *a)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul = (const unsigned long *)a;
+ unsigned long x = ul[0] ^ ul[1];
+
+ return (u32)(x ^ (x >> 32));
+#else
+ return (__force u32)(a->s6_addr32[0] ^ a->s6_addr32[1] ^
+ a->s6_addr32[2] ^ a->s6_addr32[3]);
+#endif
+}
+
+/* more secured version of ipv6_addr_hash() */
+static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
+{
+ return jhash2((__force const u32 *)a->s6_addr32,
+ ARRAY_SIZE(a->s6_addr32), initval);
+}
+
+static inline bool ipv6_addr_loopback(const struct in6_addr *a)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const __be64 *be = (const __be64 *)a;
+
+ return (be[0] | (be[1] ^ cpu_to_be64(1))) == 0UL;
+#else
+ return (a->s6_addr32[0] | a->s6_addr32[1] |
+ a->s6_addr32[2] | (a->s6_addr32[3] ^ cpu_to_be32(1))) == 0;
+#endif
+}
+
+/*
+ * Note that we must __force cast these to unsigned long to make sparse happy,
+ * since all of the endian-annotated types are fixed size regardless of arch.
+ */
+static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
+{
+ return (
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ *(unsigned long *)a |
+#else
+ (__force unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) |
+#endif
+ (__force unsigned long)(a->s6_addr32[2] ^
+ cpu_to_be32(0x0000ffff))) == 0UL;
+}
+
+static inline bool ipv6_addr_v4mapped_any(const struct in6_addr *a)
+{
+ return ipv6_addr_v4mapped(a) && ipv4_is_zeronet(a->s6_addr32[3]);
+}
+
+static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a)
+{
+ return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]);
+}
+
+static inline u32 ipv6_portaddr_hash(const struct net *net,
+ const struct in6_addr *addr6,
+ unsigned int port)
+{
+ unsigned int hash, mix = net_hash_mix(net);
+
+ if (ipv6_addr_any(addr6))
+ hash = jhash_1word(0, mix);
+ else if (ipv6_addr_v4mapped(addr6))
+ hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
+ else
+ hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
+
+ return hash ^ port;
+}
+
+/*
+ * Check for a RFC 4843 ORCHID address
+ * (Overlay Routable Cryptographic Hash Identifiers)
+ */
+static inline bool ipv6_addr_orchid(const struct in6_addr *a)
+{
+ return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010);
+}
+
+static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr)
+{
+ return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000);
+}
+
+static inline void ipv6_addr_set_v4mapped(const __be32 addr,
+ struct in6_addr *v4mapped)
+{
+ ipv6_addr_set(v4mapped,
+ 0, 0,
+ htonl(0x0000FFFF),
+ addr);
+}
+
+/*
+ * find the first different bit between two addresses
+ * length of address must be a multiple of 32bits
+ */
+static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int addrlen)
+{
+ const __be32 *a1 = token1, *a2 = token2;
+ int i;
+
+ addrlen >>= 2;
+
+ for (i = 0; i < addrlen; i++) {
+ __be32 xb = a1[i] ^ a2[i];
+ if (xb)
+ return i * 32 + 31 - __fls(ntohl(xb));
+ }
+
+ /*
+ * we should *never* get to this point since that
+ * would mean the addrs are equal
+ *
+ * However, we do get to it 8) And exacly, when
+ * addresses are equal 8)
+ *
+ * ip route add 1111::/128 via ...
+ * ip route add 1111::/64 via ...
+ * and we are here.
+ *
+ * Ideally, this function should stop comparison
+ * at prefix length. It does not, but it is still OK,
+ * if returned value is greater than prefix length.
+ * --ANK (980803)
+ */
+ return addrlen << 5;
+}
+
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+static inline int __ipv6_addr_diff64(const void *token1, const void *token2, int addrlen)
+{
+ const __be64 *a1 = token1, *a2 = token2;
+ int i;
+
+ addrlen >>= 3;
+
+ for (i = 0; i < addrlen; i++) {
+ __be64 xb = a1[i] ^ a2[i];
+ if (xb)
+ return i * 64 + 63 - __fls(be64_to_cpu(xb));
+ }
+
+ return addrlen << 6;
+}
+#endif
+
+static inline int __ipv6_addr_diff(const void *token1, const void *token2, int addrlen)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ if (__builtin_constant_p(addrlen) && !(addrlen & 7))
+ return __ipv6_addr_diff64(token1, token2, addrlen);
+#endif
+ return __ipv6_addr_diff32(token1, token2, addrlen);
+}
+
+static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2)
+{
+ return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
+}
+
+__be32 ipv6_select_ident(struct net *net,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr);
+__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
+
+int ip6_dst_hoplimit(struct dst_entry *dst);
+
+static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
+ struct dst_entry *dst)
+{
+ int hlimit;
+
+ if (ipv6_addr_is_multicast(&fl6->daddr))
+ hlimit = np->mcast_hops;
+ else
+ hlimit = np->hop_limit;
+ if (hlimit < 0)
+ hlimit = ip6_dst_hoplimit(dst);
+ return hlimit;
+}
+
+/* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store
+ * Equivalent to : flow->v6addrs.src = iph->saddr;
+ * flow->v6addrs.dst = iph->daddr;
+ */
+static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
+ const struct ipv6hdr *iph)
+{
+ BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
+ offsetof(typeof(flow->addrs), v6addrs.src) +
+ sizeof(flow->addrs.v6addrs.src));
+ memcpy(&flow->addrs.v6addrs, &iph->addrs, sizeof(flow->addrs.v6addrs));
+ flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+static inline bool ipv6_can_nonlocal_bind(struct net *net,
+ struct inet_sock *inet)
+{
+ return net->ipv6.sysctl.ip_nonlocal_bind ||
+ inet->freebind || inet->transparent;
+}
+
+/* Sysctl settings for net ipv6.auto_flowlabels */
+#define IP6_AUTO_FLOW_LABEL_OFF 0
+#define IP6_AUTO_FLOW_LABEL_OPTOUT 1
+#define IP6_AUTO_FLOW_LABEL_OPTIN 2
+#define IP6_AUTO_FLOW_LABEL_FORCED 3
+
+#define IP6_AUTO_FLOW_LABEL_MAX IP6_AUTO_FLOW_LABEL_FORCED
+
+#define IP6_DEFAULT_AUTO_FLOW_LABELS IP6_AUTO_FLOW_LABEL_OPTOUT
+
+static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
+ __be32 flowlabel, bool autolabel,
+ struct flowi6 *fl6)
+{
+ u32 hash;
+
+ /* @flowlabel may include more than a flow label, eg, the traffic class.
+ * Here we want only the flow label value.
+ */
+ flowlabel &= IPV6_FLOWLABEL_MASK;
+
+ if (flowlabel ||
+ net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
+ (!autolabel &&
+ net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED))
+ return flowlabel;
+
+ hash = skb_get_hash_flowi6(skb, fl6);
+
+ /* Since this is being sent on the wire obfuscate hash a bit
+ * to minimize possbility that any useful information to an
+ * attacker is leaked. Only lower 20 bits are relevant.
+ */
+ hash = rol32(hash, 16);
+
+ flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+
+ if (net->ipv6.sysctl.flowlabel_state_ranges)
+ flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
+
+ return flowlabel;
+}
+
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+ switch (net->ipv6.sysctl.auto_flowlabels) {
+ case IP6_AUTO_FLOW_LABEL_OFF:
+ case IP6_AUTO_FLOW_LABEL_OPTIN:
+ default:
+ return 0;
+ case IP6_AUTO_FLOW_LABEL_OPTOUT:
+ case IP6_AUTO_FLOW_LABEL_FORCED:
+ return 1;
+ }
+}
+#else
+static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
+ __be32 flowlabel, bool autolabel,
+ struct flowi6 *fl6)
+{
+ return flowlabel;
+}
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int ip6_multipath_hash_policy(const struct net *net)
+{
+ return net->ipv6.sysctl.multipath_hash_policy;
+}
+static inline u32 ip6_multipath_hash_fields(const struct net *net)
+{
+ return net->ipv6.sysctl.multipath_hash_fields;
+}
+#else
+static inline int ip6_multipath_hash_policy(const struct net *net)
+{
+ return 0;
+}
+static inline u32 ip6_multipath_hash_fields(const struct net *net)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Header manipulation
+ */
+static inline void ip6_flow_hdr(struct ipv6hdr *hdr, unsigned int tclass,
+ __be32 flowlabel)
+{
+ *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | flowlabel;
+}
+
+static inline __be32 ip6_flowinfo(const struct ipv6hdr *hdr)
+{
+ return *(__be32 *)hdr & IPV6_FLOWINFO_MASK;
+}
+
+static inline __be32 ip6_flowlabel(const struct ipv6hdr *hdr)
+{
+ return *(__be32 *)hdr & IPV6_FLOWLABEL_MASK;
+}
+
+static inline u8 ip6_tclass(__be32 flowinfo)
+{
+ return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT;
+}
+
+static inline dscp_t ip6_dscp(__be32 flowinfo)
+{
+ return inet_dsfield_to_dscp(ip6_tclass(flowinfo));
+}
+
+static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel)
+{
+ return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel;
+}
+
+static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6)
+{
+ return fl6->flowlabel & IPV6_FLOWLABEL_MASK;
+}
+
+/*
+ * Prototypes exported by ipv6
+ */
+
+/*
+ * rcv function (called from netdevice level)
+ */
+
+int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev);
+void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
+ struct net_device *orig_dev);
+
+int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
+
+/*
+ * upper-layer output functions
+ */
+int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+ __u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority);
+
+int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
+
+int ip6_append_data(struct sock *sk,
+ int getfrag(void *from, char *to, int offset, int len,
+ int odd, struct sk_buff *skb),
+ void *from, size_t length, int transhdrlen,
+ struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
+ struct rt6_info *rt, unsigned int flags);
+
+int ip6_push_pending_frames(struct sock *sk);
+
+void ip6_flush_pending_frames(struct sock *sk);
+
+int ip6_send_skb(struct sk_buff *skb);
+
+struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue,
+ struct inet_cork_full *cork,
+ struct inet6_cork *v6_cork);
+struct sk_buff *ip6_make_skb(struct sock *sk,
+ int getfrag(void *from, char *to, int offset,
+ int len, int odd, struct sk_buff *skb),
+ void *from, size_t length, int transhdrlen,
+ struct ipcm6_cookie *ipc6,
+ struct rt6_info *rt, unsigned int flags,
+ struct inet_cork_full *cork);
+
+static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
+{
+ return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork,
+ &inet6_sk(sk)->cork);
+}
+
+int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+ struct flowi6 *fl6);
+struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
+ const struct in6_addr *final_dst);
+struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+ const struct in6_addr *final_dst,
+ bool connected);
+struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net *net, struct socket *sock,
+ struct in6_addr *saddr,
+ const struct ip_tunnel_info *info,
+ u8 protocol, bool use_cache);
+struct dst_entry *ip6_blackhole_route(struct net *net,
+ struct dst_entry *orig_dst);
+
+/*
+ * skb processing functions
+ */
+
+int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip6_forward(struct sk_buff *skb);
+int ip6_input(struct sk_buff *skb);
+int ip6_mc_input(struct sk_buff *skb);
+void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
+ bool have_final);
+
+int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+
+/*
+ * Extension header (options) processing
+ */
+
+void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
+ u8 *proto, struct in6_addr **daddr_p,
+ struct in6_addr *saddr);
+void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
+ u8 *proto);
+
+int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp,
+ __be16 *frag_offp);
+
+bool ipv6_ext_hdr(u8 nexthdr);
+
+enum {
+ IP6_FH_F_FRAG = (1 << 0),
+ IP6_FH_F_AUTH = (1 << 1),
+ IP6_FH_F_SKIP_RH = (1 << 2),
+};
+
+/* find specified header and get offset to it */
+int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target,
+ unsigned short *fragoff, int *fragflg);
+
+int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type);
+
+struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
+ const struct ipv6_txoptions *opt,
+ struct in6_addr *orig);
+
+/*
+ * socket options (ipv6_sockglue.c)
+ */
+DECLARE_STATIC_KEY_FALSE(ip6_min_hopcount);
+
+int do_ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+ unsigned int optlen);
+int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+ unsigned int optlen);
+int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
+int ipv6_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+
+int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr,
+ int addr_len);
+int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
+int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
+ int addr_len);
+int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
+void ip6_datagram_release_cb(struct sock *sk);
+
+int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
+ int *addr_len);
+int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
+ int *addr_len);
+void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+ u32 info, u8 *payload);
+void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
+void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
+
+void inet6_cleanup_sock(struct sock *sk);
+void inet6_sock_destruct(struct sock *sk);
+int inet6_release(struct socket *sock);
+int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
+ int peer);
+int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+int inet6_compat_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg);
+
+int inet6_hash_connect(struct inet_timewait_death_row *death_row,
+ struct sock *sk);
+int inet6_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
+int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
+
+/*
+ * reassembly.c
+ */
+extern const struct proto_ops inet6_stream_ops;
+extern const struct proto_ops inet6_dgram_ops;
+extern const struct proto_ops inet6_sockraw_ops;
+
+struct group_source_req;
+struct group_filter;
+
+int ip6_mc_source(int add, int omode, struct sock *sk,
+ struct group_source_req *pgsr);
+int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
+ struct sockaddr_storage *list);
+int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
+ sockptr_t optval, size_t ss_offset);
+
+#ifdef CONFIG_PROC_FS
+int ac6_proc_init(struct net *net);
+void ac6_proc_exit(struct net *net);
+int raw6_proc_init(void);
+void raw6_proc_exit(void);
+int tcp6_proc_init(struct net *net);
+void tcp6_proc_exit(struct net *net);
+int udp6_proc_init(struct net *net);
+void udp6_proc_exit(struct net *net);
+int udplite6_proc_init(void);
+void udplite6_proc_exit(void);
+int ipv6_misc_proc_init(void);
+void ipv6_misc_proc_exit(void);
+int snmp6_register_dev(struct inet6_dev *idev);
+int snmp6_unregister_dev(struct inet6_dev *idev);
+
+#else
+static inline int ac6_proc_init(struct net *net) { return 0; }
+static inline void ac6_proc_exit(struct net *net) { }
+static inline int snmp6_register_dev(struct inet6_dev *idev) { return 0; }
+static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
+#endif
+
+#ifdef CONFIG_SYSCTL
+struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
+struct ctl_table *ipv6_route_sysctl_init(struct net *net);
+int ipv6_sysctl_register(void);
+void ipv6_sysctl_unregister(void);
+#endif
+
+int ipv6_sock_mc_join(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
+ const struct in6_addr *addr, unsigned int mode);
+int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+
+static inline int ip6_sock_set_v6only(struct sock *sk)
+{
+ if (inet_sk(sk)->inet_num)
+ return -EINVAL;
+ lock_sock(sk);
+ sk->sk_ipv6only = true;
+ release_sock(sk);
+ return 0;
+}
+
+static inline void ip6_sock_set_recverr(struct sock *sk)
+{
+ lock_sock(sk);
+ inet6_sk(sk)->recverr = true;
+ release_sock(sk);
+}
+
+static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
+{
+ unsigned int pref = 0;
+ unsigned int prefmask = ~0;
+
+ /* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */
+ switch (val & (IPV6_PREFER_SRC_PUBLIC |
+ IPV6_PREFER_SRC_TMP |
+ IPV6_PREFER_SRC_PUBTMP_DEFAULT)) {
+ case IPV6_PREFER_SRC_PUBLIC:
+ pref |= IPV6_PREFER_SRC_PUBLIC;
+ prefmask &= ~(IPV6_PREFER_SRC_PUBLIC |
+ IPV6_PREFER_SRC_TMP);
+ break;
+ case IPV6_PREFER_SRC_TMP:
+ pref |= IPV6_PREFER_SRC_TMP;
+ prefmask &= ~(IPV6_PREFER_SRC_PUBLIC |
+ IPV6_PREFER_SRC_TMP);
+ break;
+ case IPV6_PREFER_SRC_PUBTMP_DEFAULT:
+ prefmask &= ~(IPV6_PREFER_SRC_PUBLIC |
+ IPV6_PREFER_SRC_TMP);
+ break;
+ case 0:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* check HOME/COA conflicts */
+ switch (val & (IPV6_PREFER_SRC_HOME | IPV6_PREFER_SRC_COA)) {
+ case IPV6_PREFER_SRC_HOME:
+ prefmask &= ~IPV6_PREFER_SRC_COA;
+ break;
+ case IPV6_PREFER_SRC_COA:
+ pref |= IPV6_PREFER_SRC_COA;
+ break;
+ case 0:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* check CGA/NONCGA conflicts */
+ switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) {
+ case IPV6_PREFER_SRC_CGA:
+ case IPV6_PREFER_SRC_NONCGA:
+ case 0:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ inet6_sk(sk)->srcprefs = (inet6_sk(sk)->srcprefs & prefmask) | pref;
+ return 0;
+}
+
+static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val)
+{
+ int ret;
+
+ lock_sock(sk);
+ ret = __ip6_sock_set_addr_preferences(sk, val);
+ release_sock(sk);
+ return ret;
+}
+
+static inline void ip6_sock_set_recvpktinfo(struct sock *sk)
+{
+ lock_sock(sk);
+ inet6_sk(sk)->rxopt.bits.rxinfo = true;
+ release_sock(sk);
+}
+
+#endif /* _NET_IPV6_H */
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
new file mode 100644
index 000000000..5052c66e2
--- /dev/null
+++ b/include/net/ipv6_frag.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _IPV6_FRAG_H
+#define _IPV6_FRAG_H
+#include <linux/icmpv6.h>
+#include <linux/kernel.h>
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+#include <net/inet_frag.h>
+
+enum ip6_defrag_users {
+ IP6_DEFRAG_LOCAL_DELIVER,
+ IP6_DEFRAG_CONNTRACK_IN,
+ __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
+ IP6_DEFRAG_CONNTRACK_OUT,
+ __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
+ IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
+ __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
+};
+
+/*
+ * Equivalent of ipv4 struct ip
+ */
+struct frag_queue {
+ struct inet_frag_queue q;
+
+ int iif;
+ __u16 nhoffset;
+ u8 ecn;
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
+{
+ struct frag_queue *fq = container_of(q, struct frag_queue, q);
+ const struct frag_v6_compare_key *key = a;
+
+ q->key.v6 = *key;
+ fq->ecn = 0;
+}
+
+static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ return jhash2(data,
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct inet_frag_queue *fq = data;
+
+ return jhash2((const u32 *)&fq->key.v6,
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static inline int
+ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+ const struct frag_v6_compare_key *key = arg->key;
+ const struct inet_frag_queue *fq = ptr;
+
+ return !!memcmp(&fq->key, key, sizeof(*key));
+}
+
+static inline void
+ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
+{
+ struct net_device *dev = NULL;
+ struct sk_buff *head;
+
+ rcu_read_lock();
+ /* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */
+ if (READ_ONCE(fq->q.fqdir->dead))
+ goto out_rcu_unlock;
+ spin_lock(&fq->q.lock);
+
+ if (fq->q.flags & INET_FRAG_COMPLETE)
+ goto out;
+
+ inet_frag_kill(&fq->q);
+
+ dev = dev_get_by_index_rcu(net, fq->iif);
+ if (!dev)
+ goto out;
+
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
+
+ /* Don't send error if the first segment did not arrive. */
+ if (!(fq->q.flags & INET_FRAG_FIRST_IN))
+ goto out;
+
+ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
+ * pull the head out of the tree in order to be able to
+ * deal with head->dev.
+ */
+ head = inet_frag_pull_head(&fq->q);
+ if (!head)
+ goto out;
+
+ head->dev = dev;
+ spin_unlock(&fq->q.lock);
+
+ icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
+ kfree_skb(head);
+ goto out_rcu_unlock;
+
+out:
+ spin_unlock(&fq->q.lock);
+out_rcu_unlock:
+ rcu_read_unlock();
+ inet_frag_put(&fq->q);
+}
+
+/* Check if the upper layer header is truncated in the first fragment. */
+static inline bool
+ipv6frag_thdr_truncated(struct sk_buff *skb, int start, u8 *nexthdrp)
+{
+ u8 nexthdr = *nexthdrp;
+ __be16 frag_off;
+ int offset;
+
+ offset = ipv6_skip_exthdr(skb, start, &nexthdr, &frag_off);
+ if (offset < 0 || (frag_off & htons(IP6_OFFSET)))
+ return false;
+ switch (nexthdr) {
+ case NEXTHDR_TCP:
+ offset += sizeof(struct tcphdr);
+ break;
+ case NEXTHDR_UDP:
+ offset += sizeof(struct udphdr);
+ break;
+ case NEXTHDR_ICMP:
+ offset += sizeof(struct icmp6hdr);
+ break;
+ default:
+ offset += 1;
+ }
+ if (offset > skb->len)
+ return true;
+ return false;
+}
+
+#endif
+#endif
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
new file mode 100644
index 000000000..c48186bf4
--- /dev/null
+++ b/include/net/ipv6_stubs.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _IPV6_STUBS_H
+#define _IPV6_STUBS_H
+
+#include <linux/in6.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <net/dst.h>
+#include <net/flow.h>
+#include <net/neighbour.h>
+#include <net/sock.h>
+
+/* structs from net/ip6_fib.h */
+struct fib6_info;
+struct fib6_nh;
+struct fib6_config;
+struct fib6_result;
+
+/* This is ugly, ideally these symbols should be built
+ * into the core kernel.
+ */
+struct ipv6_stub {
+ int (*ipv6_sock_mc_join)(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+ int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+ struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net,
+ const struct sock *sk,
+ struct flowi6 *fl6,
+ const struct in6_addr *final_dst);
+ int (*ipv6_route_input)(struct sk_buff *skb);
+
+ struct fib6_table *(*fib6_get_table)(struct net *net, u32 id);
+ int (*fib6_lookup)(struct net *net, int oif, struct flowi6 *fl6,
+ struct fib6_result *res, int flags);
+ int (*fib6_table_lookup)(struct net *net, struct fib6_table *table,
+ int oif, struct flowi6 *fl6,
+ struct fib6_result *res, int flags);
+ void (*fib6_select_path)(const struct net *net, struct fib6_result *res,
+ struct flowi6 *fl6, int oif, bool oif_match,
+ const struct sk_buff *skb, int strict);
+ u32 (*ip6_mtu_from_fib6)(const struct fib6_result *res,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr);
+
+ int (*fib6_nh_init)(struct net *net, struct fib6_nh *fib6_nh,
+ struct fib6_config *cfg, gfp_t gfp_flags,
+ struct netlink_ext_ack *extack);
+ void (*fib6_nh_release)(struct fib6_nh *fib6_nh);
+ void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh);
+ void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt);
+ int (*ip6_del_rt)(struct net *net, struct fib6_info *rt, bool skip_notify);
+ void (*fib6_rt_update)(struct net *net, struct fib6_info *rt,
+ struct nl_info *info);
+
+ void (*udpv6_encap_enable)(void);
+ void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr,
+ const struct in6_addr *solicited_addr,
+ bool router, bool solicited, bool override, bool inc_opt);
+#if IS_ENABLED(CONFIG_XFRM)
+ void (*xfrm6_local_rxpmtu)(struct sk_buff *skb, u32 mtu);
+ int (*xfrm6_udp_encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ int (*xfrm6_rcv_encap)(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type);
+#endif
+ struct neigh_table *nd_tbl;
+
+ int (*ipv6_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct net *, struct sock *, struct sk_buff *));
+ struct net_device *(*ipv6_dev_find)(struct net *net, const struct in6_addr *addr,
+ struct net_device *dev);
+};
+extern const struct ipv6_stub *ipv6_stub __read_mostly;
+
+/* A stub used by bpf helpers. Similarly ugly as ipv6_stub */
+struct ipv6_bpf_stub {
+ int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len,
+ u32 flags);
+ struct sock *(*udp6_lib_lookup)(struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, __be16 dport,
+ int dif, int sdif, struct udp_table *tbl,
+ struct sk_buff *skb);
+ int (*ipv6_setsockopt)(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
+ int (*ipv6_getsockopt)(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
+};
+extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
+
+#endif
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
new file mode 100644
index 000000000..df85d19fb
--- /dev/null
+++ b/include/net/iucv/af_iucv.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2006 IBM Corporation
+ * IUCV protocol stack for Linux on zSeries
+ * Version 1.0
+ * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
+ *
+ */
+
+#ifndef __AFIUCV_H
+#define __AFIUCV_H
+
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <linux/socket.h>
+#include <net/iucv/iucv.h>
+
+#ifndef AF_IUCV
+#define AF_IUCV 32
+#define PF_IUCV AF_IUCV
+#endif
+
+/* Connection and socket states */
+enum {
+ IUCV_CONNECTED = 1,
+ IUCV_OPEN,
+ IUCV_BOUND,
+ IUCV_LISTEN,
+ IUCV_DISCONN,
+ IUCV_CLOSING,
+ IUCV_CLOSED
+};
+
+#define IUCV_QUEUELEN_DEFAULT 65535
+#define IUCV_HIPER_MSGLIM_DEFAULT 128
+#define IUCV_CONN_TIMEOUT (HZ * 40)
+#define IUCV_DISCONN_TIMEOUT (HZ * 2)
+#define IUCV_CONN_IDLE_TIMEOUT (HZ * 60)
+#define IUCV_BUFSIZE_DEFAULT 32768
+
+/* IUCV socket address */
+struct sockaddr_iucv {
+ sa_family_t siucv_family;
+ unsigned short siucv_port; /* Reserved */
+ unsigned int siucv_addr; /* Reserved */
+ char siucv_nodeid[8]; /* Reserved */
+ char siucv_user_id[8]; /* Guest User Id */
+ char siucv_name[8]; /* Application Name */
+};
+
+
+/* Common socket structures and functions */
+struct sock_msg_q {
+ struct iucv_path *path;
+ struct iucv_message msg;
+ struct list_head list;
+ spinlock_t lock;
+};
+
+#define AF_IUCV_FLAG_ACK 0x1
+#define AF_IUCV_FLAG_SYN 0x2
+#define AF_IUCV_FLAG_FIN 0x4
+#define AF_IUCV_FLAG_WIN 0x8
+#define AF_IUCV_FLAG_SHT 0x10
+
+struct af_iucv_trans_hdr {
+ u16 magic;
+ u8 version;
+ u8 flags;
+ u16 window;
+ char destNodeID[8];
+ char destUserID[8];
+ char destAppName[16];
+ char srcNodeID[8];
+ char srcUserID[8];
+ char srcAppName[16]; /* => 70 bytes */
+ struct iucv_message iucv_hdr; /* => 33 bytes */
+ u8 pad; /* total 104 bytes */
+} __packed;
+
+static inline struct af_iucv_trans_hdr *iucv_trans_hdr(struct sk_buff *skb)
+{
+ return (struct af_iucv_trans_hdr *)skb_network_header(skb);
+}
+
+enum iucv_tx_notify {
+ /* transmission of skb is completed and was successful */
+ TX_NOTIFY_OK = 0,
+ /* target is unreachable */
+ TX_NOTIFY_UNREACHABLE = 1,
+ /* transfer pending queue full */
+ TX_NOTIFY_TPQFULL = 2,
+ /* general error */
+ TX_NOTIFY_GENERALERROR = 3,
+ /* transmission of skb is pending - may interleave
+ * with TX_NOTIFY_DELAYED_* */
+ TX_NOTIFY_PENDING = 4,
+ /* transmission of skb was done successfully (delayed) */
+ TX_NOTIFY_DELAYED_OK = 5,
+ /* target unreachable (detected delayed) */
+ TX_NOTIFY_DELAYED_UNREACHABLE = 6,
+ /* general error (detected delayed) */
+ TX_NOTIFY_DELAYED_GENERALERROR = 7,
+};
+
+#define iucv_sk(__sk) ((struct iucv_sock *) __sk)
+
+#define AF_IUCV_TRANS_IUCV 0
+#define AF_IUCV_TRANS_HIPER 1
+
+struct iucv_sock {
+ struct sock sk;
+ struct_group(init,
+ char src_user_id[8];
+ char src_name[8];
+ char dst_user_id[8];
+ char dst_name[8];
+ );
+ struct list_head accept_q;
+ spinlock_t accept_q_lock;
+ struct sock *parent;
+ struct iucv_path *path;
+ struct net_device *hs_dev;
+ struct sk_buff_head send_skb_q;
+ struct sk_buff_head backlog_skb_q;
+ struct sock_msg_q message_q;
+ unsigned int send_tag;
+ u8 flags;
+ u16 msglimit;
+ u16 msglimit_peer;
+ atomic_t skbs_in_xmit;
+ atomic_t msg_sent;
+ atomic_t msg_recv;
+ atomic_t pendings;
+ int transport;
+ void (*sk_txnotify)(struct sock *sk,
+ enum iucv_tx_notify n);
+};
+
+struct iucv_skb_cb {
+ u32 class; /* target class of message */
+ u32 tag; /* tag associated with message */
+ u32 offset; /* offset for skb receival */
+};
+
+#define IUCV_SKB_CB(__skb) ((struct iucv_skb_cb *)&((__skb)->cb[0]))
+
+/* iucv socket options (SOL_IUCV) */
+#define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */
+#define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */
+#define SO_MSGSIZE 0x0800 /* get maximum msgsize */
+
+/* iucv related control messages (scm) */
+#define SCM_IUCV_TRGCLS 0x0001 /* target class control message */
+
+struct iucv_sock_list {
+ struct hlist_head head;
+ rwlock_t lock;
+ atomic_t autobind_name;
+};
+
+#endif /* __IUCV_H */
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
new file mode 100644
index 000000000..f9e88401d
--- /dev/null
+++ b/include/net/iucv/iucv.h
@@ -0,0 +1,496 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * drivers/s390/net/iucv.h
+ * IUCV base support.
+ *
+ * S390 version
+ * Copyright 2000, 2006 IBM Corporation
+ * Author(s):Alan Altmark (Alan_Altmark@us.ibm.com)
+ * Xenia Tkatschow (xenia@us.ibm.com)
+ * Rewritten for af_iucv:
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ *
+ * Functionality:
+ * To explore any of the IUCV functions, one must first register their
+ * program using iucv_register(). Once your program has successfully
+ * completed a register, it can exploit the other functions.
+ * For furthur reference on all IUCV functionality, refer to the
+ * CP Programming Services book, also available on the web thru
+ * www.vm.ibm.com/pubs, manual # SC24-6084
+ *
+ * Definition of Return Codes
+ * - All positive return codes including zero are reflected back
+ * from CP. The definition of each return code can be found in
+ * CP Programming Services book.
+ * - Return Code of:
+ * -EINVAL: Invalid value
+ * -ENOMEM: storage allocation failed
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <asm/debug.h>
+
+/*
+ * IUCV option flags usable by device drivers:
+ *
+ * IUCV_IPRMDATA Indicates that your program can handle a message in the
+ * parameter list / a message is sent in the parameter list.
+ * Used for iucv_path_accept, iucv_path_connect,
+ * iucv_message_reply, iucv_message_send, iucv_message_send2way.
+ * IUCV_IPQUSCE Indicates that you do not want to receive messages on this
+ * path until an iucv_path_resume is issued.
+ * Used for iucv_path_accept, iucv_path_connect.
+ * IUCV_IPBUFLST Indicates that an address list is used for the message data.
+ * Used for iucv_message_receive, iucv_message_send,
+ * iucv_message_send2way.
+ * IUCV_IPPRTY Specifies that you want to send priority messages.
+ * Used for iucv_path_accept, iucv_path_connect,
+ * iucv_message_reply, iucv_message_send, iucv_message_send2way.
+ * IUCV_IPSYNC Indicates a synchronous send request.
+ * Used for iucv_message_send, iucv_message_send2way.
+ * IUCV_IPANSLST Indicates that an address list is used for the reply data.
+ * Used for iucv_message_reply, iucv_message_send2way.
+ * IUCV_IPLOCAL Specifies that the communication partner has to be on the
+ * local system. If local is specified no target class can be
+ * specified.
+ * Used for iucv_path_connect.
+ *
+ * All flags are defined in the input field IPFLAGS1 of each function
+ * and can be found in CP Programming Services.
+ */
+#define IUCV_IPRMDATA 0x80
+#define IUCV_IPQUSCE 0x40
+#define IUCV_IPBUFLST 0x40
+#define IUCV_IPPRTY 0x20
+#define IUCV_IPANSLST 0x08
+#define IUCV_IPSYNC 0x04
+#define IUCV_IPLOCAL 0x01
+
+/*
+ * iucv_array : Defines buffer array.
+ * Inside the array may be 31- bit addresses and 31-bit lengths.
+ * Use a pointer to an iucv_array as the buffer, reply or answer
+ * parameter on iucv_message_send, iucv_message_send2way, iucv_message_receive
+ * and iucv_message_reply if IUCV_IPBUFLST or IUCV_IPANSLST are used.
+ */
+struct iucv_array {
+ u32 address;
+ u32 length;
+} __attribute__ ((aligned (8)));
+
+extern struct bus_type iucv_bus;
+extern struct device *iucv_root;
+
+/*
+ * struct iucv_path
+ * pathid: 16 bit path identification
+ * msglim: 16 bit message limit
+ * flags: properties of the path: IPRMDATA, IPQUSCE, IPPRTY
+ * handler: address of iucv handler structure
+ * private: private information of the handler associated with the path
+ * list: list_head for the iucv_handler path list.
+ */
+struct iucv_path {
+ u16 pathid;
+ u16 msglim;
+ u8 flags;
+ void *private;
+ struct iucv_handler *handler;
+ struct list_head list;
+};
+
+/*
+ * struct iucv_message
+ * id: 32 bit message id
+ * audit: 32 bit error information of purged or replied messages
+ * class: 32 bit target class of a message (source class for replies)
+ * tag: 32 bit tag to be associated with the message
+ * length: 32 bit length of the message / reply
+ * reply_size: 32 bit maximum allowed length of the reply
+ * rmmsg: 8 byte inline message
+ * flags: message properties (IUCV_IPPRTY)
+ */
+struct iucv_message {
+ u32 id;
+ u32 audit;
+ u32 class;
+ u32 tag;
+ u32 length;
+ u32 reply_size;
+ u8 rmmsg[8];
+ u8 flags;
+} __packed;
+
+/*
+ * struct iucv_handler
+ *
+ * A vector of functions that handle IUCV interrupts. Each functions gets
+ * a parameter area as defined by the CP Programming Services and private
+ * pointer that is provided by the user of the interface.
+ */
+struct iucv_handler {
+ /*
+ * The path_pending function is called after an iucv interrupt
+ * type 0x01 has been received. The base code allocates a path
+ * structure and "asks" the handler if this path belongs to the
+ * handler. To accept the path the path_pending function needs
+ * to call iucv_path_accept and return 0. If the callback returns
+ * a value != 0 the iucv base code will continue with the next
+ * handler. The order in which the path_pending functions are
+ * called is the order of the registration of the iucv handlers
+ * to the base code.
+ */
+ int (*path_pending)(struct iucv_path *, u8 *ipvmid, u8 *ipuser);
+ /*
+ * The path_complete function is called after an iucv interrupt
+ * type 0x02 has been received for a path that has been established
+ * for this handler with iucv_path_connect and got accepted by the
+ * peer with iucv_path_accept.
+ */
+ void (*path_complete)(struct iucv_path *, u8 *ipuser);
+ /*
+ * The path_severed function is called after an iucv interrupt
+ * type 0x03 has been received. The communication peer shutdown
+ * his end of the communication path. The path still exists and
+ * remaining messages can be received until a iucv_path_sever
+ * shuts down the other end of the path as well.
+ */
+ void (*path_severed)(struct iucv_path *, u8 *ipuser);
+ /*
+ * The path_quiesced function is called after an icuv interrupt
+ * type 0x04 has been received. The communication peer has quiesced
+ * the path. Delivery of messages is stopped until iucv_path_resume
+ * has been called.
+ */
+ void (*path_quiesced)(struct iucv_path *, u8 *ipuser);
+ /*
+ * The path_resumed function is called after an icuv interrupt
+ * type 0x05 has been received. The communication peer has resumed
+ * the path.
+ */
+ void (*path_resumed)(struct iucv_path *, u8 *ipuser);
+ /*
+ * The message_pending function is called after an icuv interrupt
+ * type 0x06 or type 0x07 has been received. A new message is
+ * available and can be received with iucv_message_receive.
+ */
+ void (*message_pending)(struct iucv_path *, struct iucv_message *);
+ /*
+ * The message_complete function is called after an icuv interrupt
+ * type 0x08 or type 0x09 has been received. A message send with
+ * iucv_message_send2way has been replied to. The reply can be
+ * received with iucv_message_receive.
+ */
+ void (*message_complete)(struct iucv_path *, struct iucv_message *);
+
+ struct list_head list;
+ struct list_head paths;
+};
+
+/**
+ * iucv_register:
+ * @handler: address of iucv handler structure
+ * @smp: != 0 indicates that the handler can deal with out of order messages
+ *
+ * Registers a driver with IUCV.
+ *
+ * Returns 0 on success, -ENOMEM if the memory allocation for the pathid
+ * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus.
+ */
+int iucv_register(struct iucv_handler *handler, int smp);
+
+/**
+ * iucv_unregister
+ * @handler: address of iucv handler structure
+ * @smp: != 0 indicates that the handler can deal with out of order messages
+ *
+ * Unregister driver from IUCV.
+ */
+void iucv_unregister(struct iucv_handler *handle, int smp);
+
+/**
+ * iucv_path_alloc
+ * @msglim: initial message limit
+ * @flags: initial flags
+ * @gfp: kmalloc allocation flag
+ *
+ * Allocate a new path structure for use with iucv_connect.
+ *
+ * Returns NULL if the memory allocation failed or a pointer to the
+ * path structure.
+ */
+static inline struct iucv_path *iucv_path_alloc(u16 msglim, u8 flags, gfp_t gfp)
+{
+ struct iucv_path *path;
+
+ path = kzalloc(sizeof(struct iucv_path), gfp);
+ if (path) {
+ path->msglim = msglim;
+ path->flags = flags;
+ }
+ return path;
+}
+
+/**
+ * iucv_path_free
+ * @path: address of iucv path structure
+ *
+ * Frees a path structure.
+ */
+static inline void iucv_path_free(struct iucv_path *path)
+{
+ kfree(path);
+}
+
+/**
+ * iucv_path_accept
+ * @path: address of iucv path structure
+ * @handler: address of iucv handler structure
+ * @userdata: 16 bytes of data reflected to the communication partner
+ * @private: private data passed to interrupt handlers for this path
+ *
+ * This function is issued after the user received a connection pending
+ * external interrupt and now wishes to complete the IUCV communication path.
+ *
+ * Returns the result of the CP IUCV call.
+ */
+int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
+ u8 *userdata, void *private);
+
+/**
+ * iucv_path_connect
+ * @path: address of iucv path structure
+ * @handler: address of iucv handler structure
+ * @userid: 8-byte user identification
+ * @system: 8-byte target system identification
+ * @userdata: 16 bytes of data reflected to the communication partner
+ * @private: private data passed to interrupt handlers for this path
+ *
+ * This function establishes an IUCV path. Although the connect may complete
+ * successfully, you are not able to use the path until you receive an IUCV
+ * Connection Complete external interrupt.
+ *
+ * Returns the result of the CP IUCV call.
+ */
+int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
+ u8 *userid, u8 *system, u8 *userdata,
+ void *private);
+
+/**
+ * iucv_path_quiesce:
+ * @path: address of iucv path structure
+ * @userdata: 16 bytes of data reflected to the communication partner
+ *
+ * This function temporarily suspends incoming messages on an IUCV path.
+ * You can later reactivate the path by invoking the iucv_resume function.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_path_quiesce(struct iucv_path *path, u8 *userdata);
+
+/**
+ * iucv_path_resume:
+ * @path: address of iucv path structure
+ * @userdata: 16 bytes of data reflected to the communication partner
+ *
+ * This function resumes incoming messages on an IUCV path that has
+ * been stopped with iucv_path_quiesce.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_path_resume(struct iucv_path *path, u8 *userdata);
+
+/**
+ * iucv_path_sever
+ * @path: address of iucv path structure
+ * @userdata: 16 bytes of data reflected to the communication partner
+ *
+ * This function terminates an IUCV path.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_path_sever(struct iucv_path *path, u8 *userdata);
+
+/**
+ * iucv_message_purge
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @srccls: source class of message
+ *
+ * Cancels a message you have sent.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
+ u32 srccls);
+
+/**
+ * iucv_message_receive
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: flags that affect how the message is received (IUCV_IPBUFLST)
+ * @buffer: address of data buffer or address of struct iucv_array
+ * @size: length of data buffer
+ * @residual:
+ *
+ * This function receives messages that are being sent to you over
+ * established paths. This function will deal with RMDATA messages
+ * embedded in struct iucv_message as well.
+ *
+ * Locking: local_bh_enable/local_bh_disable
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *buffer, size_t size, size_t *residual);
+
+/**
+ * __iucv_message_receive
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: flags that affect how the message is received (IUCV_IPBUFLST)
+ * @buffer: address of data buffer or address of struct iucv_array
+ * @size: length of data buffer
+ * @residual:
+ *
+ * This function receives messages that are being sent to you over
+ * established paths. This function will deal with RMDATA messages
+ * embedded in struct iucv_message as well.
+ *
+ * Locking: no locking.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *buffer, size_t size,
+ size_t *residual);
+
+/**
+ * iucv_message_reject
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ *
+ * The reject function refuses a specified message. Between the time you
+ * are notified of a message and the time that you complete the message,
+ * the message may be rejected.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg);
+
+/**
+ * iucv_message_reply
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
+ * @reply: address of data buffer or address of struct iucv_array
+ * @size: length of reply data buffer
+ *
+ * This function responds to the two-way messages that you receive. You
+ * must identify completely the message to which you wish to reply. ie,
+ * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into
+ * the parameter list.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *reply, size_t size);
+
+/**
+ * iucv_message_send
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
+ * @srccls: source class of message
+ * @buffer: address of data buffer or address of struct iucv_array
+ * @size: length of send buffer
+ *
+ * This function transmits data to another application. Data to be
+ * transmitted is in a buffer and this is a one-way message and the
+ * receiver will not reply to the message.
+ *
+ * Locking: local_bh_enable/local_bh_disable
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, u32 srccls, void *buffer, size_t size);
+
+/**
+ * __iucv_message_send
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
+ * @srccls: source class of message
+ * @buffer: address of data buffer or address of struct iucv_array
+ * @size: length of send buffer
+ *
+ * This function transmits data to another application. Data to be
+ * transmitted is in a buffer and this is a one-way message and the
+ * receiver will not reply to the message.
+ *
+ * Locking: no locking.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, u32 srccls, void *buffer, size_t size);
+
+/**
+ * iucv_message_send2way
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: how the message is sent and the reply is received
+ * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST)
+ * @srccls: source class of message
+ * @buffer: address of data buffer or address of struct iucv_array
+ * @size: length of send buffer
+ * @ansbuf: address of answer buffer or address of struct iucv_array
+ * @asize: size of reply buffer
+ *
+ * This function transmits data to another application. Data to be
+ * transmitted is in a buffer. The receiver of the send is expected to
+ * reply to the message and a buffer is provided into which IUCV moves
+ * the reply to this message.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, u32 srccls, void *buffer, size_t size,
+ void *answer, size_t asize, size_t *residual);
+
+struct iucv_interface {
+ int (*message_receive)(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *buffer, size_t size, size_t *residual);
+ int (*__message_receive)(struct iucv_path *path,
+ struct iucv_message *msg, u8 flags, void *buffer, size_t size,
+ size_t *residual);
+ int (*message_reply)(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *reply, size_t size);
+ int (*message_reject)(struct iucv_path *path, struct iucv_message *msg);
+ int (*message_send)(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, u32 srccls, void *buffer, size_t size);
+ int (*__message_send)(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, u32 srccls, void *buffer, size_t size);
+ int (*message_send2way)(struct iucv_path *path,
+ struct iucv_message *msg, u8 flags, u32 srccls, void *buffer,
+ size_t size, void *answer, size_t asize, size_t *residual);
+ int (*message_purge)(struct iucv_path *path, struct iucv_message *msg,
+ u32 srccls);
+ int (*path_accept)(struct iucv_path *path, struct iucv_handler *handler,
+ u8 userdata[16], void *private);
+ int (*path_connect)(struct iucv_path *path,
+ struct iucv_handler *handler,
+ u8 userid[8], u8 system[8], u8 userdata[16], void *private);
+ int (*path_quiesce)(struct iucv_path *path, u8 userdata[16]);
+ int (*path_resume)(struct iucv_path *path, u8 userdata[16]);
+ int (*path_sever)(struct iucv_path *path, u8 userdata[16]);
+ int (*iucv_register)(struct iucv_handler *handler, int smp);
+ void (*iucv_unregister)(struct iucv_handler *handler, int smp);
+ struct bus_type *bus;
+ struct device *root;
+};
+
+extern struct iucv_interface iucv_if;
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
new file mode 100644
index 000000000..d2ea5863e
--- /dev/null
+++ b/include/net/iw_handler.h
@@ -0,0 +1,552 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file define the new driver API for Wireless Extensions
+ *
+ * Version : 8 16.3.07
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+ * Copyright (c) 2001-2007 Jean Tourrilhes, All Rights Reserved.
+ */
+
+#ifndef _IW_HANDLER_H
+#define _IW_HANDLER_H
+
+/************************** DOCUMENTATION **************************/
+/*
+ * Initial driver API (1996 -> onward) :
+ * -----------------------------------
+ * The initial API just sends the IOCTL request received from user space
+ * to the driver (via the driver ioctl handler). The driver has to
+ * handle all the rest...
+ *
+ * The initial API also defines a specific handler in struct net_device
+ * to handle wireless statistics.
+ *
+ * The initial APIs served us well and has proven a reasonably good design.
+ * However, there is a few shortcommings :
+ * o No events, everything is a request to the driver.
+ * o Large ioctl function in driver with gigantic switch statement
+ * (i.e. spaghetti code).
+ * o Driver has to mess up with copy_to/from_user, and in many cases
+ * does it unproperly. Common mistakes are :
+ * * buffer overflows (no checks or off by one checks)
+ * * call copy_to/from_user with irq disabled
+ * o The user space interface is tied to ioctl because of the use
+ * copy_to/from_user.
+ *
+ * New driver API (2002 -> onward) :
+ * -------------------------------
+ * The new driver API is just a bunch of standard functions (handlers),
+ * each handling a specific Wireless Extension. The driver just export
+ * the list of handler it supports, and those will be called apropriately.
+ *
+ * I tried to keep the main advantage of the previous API (simplicity,
+ * efficiency and light weight), and also I provide a good dose of backward
+ * compatibility (most structures are the same, driver can use both API
+ * simultaneously, ...).
+ * Hopefully, I've also addressed the shortcomming of the initial API.
+ *
+ * The advantage of the new API are :
+ * o Handling of Extensions in driver broken in small contained functions
+ * o Tighter checks of ioctl before calling the driver
+ * o Flexible commit strategy (at least, the start of it)
+ * o Backward compatibility (can be mixed with old API)
+ * o Driver doesn't have to worry about memory and user-space issues
+ * The last point is important for the following reasons :
+ * o You are now able to call the new driver API from any API you
+ * want (including from within other parts of the kernel).
+ * o Common mistakes are avoided (buffer overflow, user space copy
+ * with irq disabled and so on).
+ *
+ * The Drawback of the new API are :
+ * o bloat (especially kernel)
+ * o need to migrate existing drivers to new API
+ * My initial testing shows that the new API adds around 3kB to the kernel
+ * and save between 0 and 5kB from a typical driver.
+ * Also, as all structures and data types are unchanged, the migration is
+ * quite straightforward (but tedious).
+ *
+ * ---
+ *
+ * The new driver API is defined below in this file. User space should
+ * not be aware of what's happening down there...
+ *
+ * A new kernel wrapper is in charge of validating the IOCTLs and calling
+ * the appropriate driver handler. This is implemented in :
+ * # net/core/wireless.c
+ *
+ * The driver export the list of handlers in :
+ * # include/linux/netdevice.h (one place)
+ *
+ * The new driver API is available for WIRELESS_EXT >= 13.
+ * Good luck with migration to the new API ;-)
+ */
+
+/* ---------------------- THE IMPLEMENTATION ---------------------- */
+/*
+ * Some of the choice I've made are pretty controversials. Defining an
+ * API is very much weighting compromises. This goes into some of the
+ * details and the thinking behind the implementation.
+ *
+ * Implementation goals :
+ * --------------------
+ * The implementation goals were as follow :
+ * o Obvious : you should not need a PhD to understand what's happening,
+ * the benefit is easier maintenance.
+ * o Flexible : it should accommodate a wide variety of driver
+ * implementations and be as flexible as the old API.
+ * o Lean : it should be efficient memory wise to minimise the impact
+ * on kernel footprint.
+ * o Transparent to user space : the large number of user space
+ * applications that use Wireless Extensions should not need
+ * any modifications.
+ *
+ * Array of functions versus Struct of functions
+ * ---------------------------------------------
+ * 1) Having an array of functions allow the kernel code to access the
+ * handler in a single lookup, which is much more efficient (think hash
+ * table here).
+ * 2) The only drawback is that driver writer may put their handler in
+ * the wrong slot. This is trivial to test (I set the frequency, the
+ * bitrate changes). Once the handler is in the proper slot, it will be
+ * there forever, because the array is only extended at the end.
+ * 3) Backward/forward compatibility : adding new handler just require
+ * extending the array, so you can put newer driver in older kernel
+ * without having to patch the kernel code (and vice versa).
+ *
+ * All handler are of the same generic type
+ * ----------------------------------------
+ * That's a feature !!!
+ * 1) Having a generic handler allow to have generic code, which is more
+ * efficient. If each of the handler was individually typed I would need
+ * to add a big switch in the kernel (== more bloat). This solution is
+ * more scalable, adding new Wireless Extensions doesn't add new code.
+ * 2) You can use the same handler in different slots of the array. For
+ * hardware, it may be more efficient or logical to handle multiple
+ * Wireless Extensions with a single function, and the API allow you to
+ * do that. (An example would be a single record on the card to control
+ * both bitrate and frequency, the handler would read the old record,
+ * modify it according to info->cmd and rewrite it).
+ *
+ * Functions prototype uses union iwreq_data
+ * -----------------------------------------
+ * Some would have preferred functions defined this way :
+ * static int mydriver_ioctl_setrate(struct net_device *dev,
+ * long rate, int auto)
+ * 1) The kernel code doesn't "validate" the content of iwreq_data, and
+ * can't do it (different hardware may have different notion of what a
+ * valid frequency is), so we don't pretend that we do it.
+ * 2) The above form is not extendable. If I want to add a flag (for
+ * example to distinguish setting max rate and basic rate), I would
+ * break the prototype. Using iwreq_data is more flexible.
+ * 3) Also, the above form is not generic (see above).
+ * 4) I don't expect driver developper using the wrong field of the
+ * union (Doh !), so static typechecking doesn't add much value.
+ * 5) Lastly, you can skip the union by doing :
+ * static int mydriver_ioctl_setrate(struct net_device *dev,
+ * struct iw_request_info *info,
+ * struct iw_param *rrq,
+ * char *extra)
+ * And then adding the handler in the array like this :
+ * (iw_handler) mydriver_ioctl_setrate, // SIOCSIWRATE
+ *
+ * Using functions and not a registry
+ * ----------------------------------
+ * Another implementation option would have been for every instance to
+ * define a registry (a struct containing all the Wireless Extensions)
+ * and only have a function to commit the registry to the hardware.
+ * 1) This approach can be emulated by the current code, but not
+ * vice versa.
+ * 2) Some drivers don't keep any configuration in the driver, for them
+ * adding such a registry would be a significant bloat.
+ * 3) The code to translate from Wireless Extension to native format is
+ * needed anyway, so it would not reduce significantely the amount of code.
+ * 4) The current approach only selectively translate Wireless Extensions
+ * to native format and only selectively set, whereas the registry approach
+ * would require to translate all WE and set all parameters for any single
+ * change.
+ * 5) For many Wireless Extensions, the GET operation return the current
+ * dynamic value, not the value that was set.
+ *
+ * This header is <net/iw_handler.h>
+ * ---------------------------------
+ * 1) This header is kernel space only and should not be exported to
+ * user space. Headers in "include/linux/" are exported, headers in
+ * "include/net/" are not.
+ *
+ * Mixed 32/64 bit issues
+ * ----------------------
+ * The Wireless Extensions are designed to be 64 bit clean, by using only
+ * datatypes with explicit storage size.
+ * There are some issues related to kernel and user space using different
+ * memory model, and in particular 64bit kernel with 32bit user space.
+ * The problem is related to struct iw_point, that contains a pointer
+ * that *may* need to be translated.
+ * This is quite messy. The new API doesn't solve this problem (it can't),
+ * but is a step in the right direction :
+ * 1) Meta data about each ioctl is easily available, so we know what type
+ * of translation is needed.
+ * 2) The move of data between kernel and user space is only done in a single
+ * place in the kernel, so adding specific hooks in there is possible.
+ * 3) In the long term, it allows to move away from using ioctl as the
+ * user space API.
+ *
+ * So many comments and so few code
+ * --------------------------------
+ * That's a feature. Comments won't bloat the resulting kernel binary.
+ */
+
+/***************************** INCLUDES *****************************/
+
+#include <linux/wireless.h> /* IOCTL user space API */
+#include <linux/if_ether.h>
+
+/***************************** VERSION *****************************/
+/*
+ * This constant is used to know which version of the driver API is
+ * available. Hopefully, this will be pretty stable and no changes
+ * will be needed...
+ * I just plan to increment with each new version.
+ */
+#define IW_HANDLER_VERSION 8
+
+/*
+ * Changes :
+ *
+ * V2 to V3
+ * --------
+ * - Move event definition in <linux/wireless.h>
+ * - Add Wireless Event support :
+ * o wireless_send_event() prototype
+ * o iwe_stream_add_event/point() inline functions
+ * V3 to V4
+ * --------
+ * - Reshuffle IW_HEADER_TYPE_XXX to map IW_PRIV_TYPE_XXX changes
+ *
+ * V4 to V5
+ * --------
+ * - Add new spy support : struct iw_spy_data & prototypes
+ *
+ * V5 to V6
+ * --------
+ * - Change the way we get to spy_data method for added safety
+ * - Remove spy #ifdef, they are always on -> cleaner code
+ * - Add IW_DESCR_FLAG_NOMAX flag for very large requests
+ * - Start migrating get_wireless_stats to struct iw_handler_def
+ *
+ * V6 to V7
+ * --------
+ * - Add struct ieee80211_device pointer in struct iw_public_data
+ * - Remove (struct iw_point *)->pointer from events and streams
+ * - Remove spy_offset from struct iw_handler_def
+ * - Add "check" version of event macros for ieee802.11 stack
+ *
+ * V7 to V8
+ * ----------
+ * - Prevent leaking of kernel space in stream on 64 bits.
+ */
+
+/**************************** CONSTANTS ****************************/
+
+/* Enhanced spy support available */
+#define IW_WIRELESS_SPY
+#define IW_WIRELESS_THRSPY
+
+/* Special error message for the driver to indicate that we
+ * should do a commit after return from the iw_handler */
+#define EIWCOMMIT EINPROGRESS
+
+/* Flags available in struct iw_request_info */
+#define IW_REQUEST_FLAG_COMPAT 0x0001 /* Compat ioctl call */
+
+/* Type of headers we know about (basically union iwreq_data) */
+#define IW_HEADER_TYPE_NULL 0 /* Not available */
+#define IW_HEADER_TYPE_CHAR 2 /* char [IFNAMSIZ] */
+#define IW_HEADER_TYPE_UINT 4 /* __u32 */
+#define IW_HEADER_TYPE_FREQ 5 /* struct iw_freq */
+#define IW_HEADER_TYPE_ADDR 6 /* struct sockaddr */
+#define IW_HEADER_TYPE_POINT 8 /* struct iw_point */
+#define IW_HEADER_TYPE_PARAM 9 /* struct iw_param */
+#define IW_HEADER_TYPE_QUAL 10 /* struct iw_quality */
+
+/* Handling flags */
+/* Most are not implemented. I just use them as a reminder of some
+ * cool features we might need one day ;-) */
+#define IW_DESCR_FLAG_NONE 0x0000 /* Obvious */
+/* Wrapper level flags */
+#define IW_DESCR_FLAG_DUMP 0x0001 /* Not part of the dump command */
+#define IW_DESCR_FLAG_EVENT 0x0002 /* Generate an event on SET */
+#define IW_DESCR_FLAG_RESTRICT 0x0004 /* GET : request is ROOT only */
+ /* SET : Omit payload from generated iwevent */
+#define IW_DESCR_FLAG_NOMAX 0x0008 /* GET : no limit on request size */
+/* Driver level flags */
+#define IW_DESCR_FLAG_WAIT 0x0100 /* Wait for driver event */
+
+/****************************** TYPES ******************************/
+
+/* ----------------------- WIRELESS HANDLER ----------------------- */
+/*
+ * A wireless handler is just a standard function, that looks like the
+ * ioctl handler.
+ * We also define there how a handler list look like... As the Wireless
+ * Extension space is quite dense, we use a simple array, which is faster
+ * (that's the perfect hash table ;-).
+ */
+
+/*
+ * Meta data about the request passed to the iw_handler.
+ * Most handlers can safely ignore what's in there.
+ * The 'cmd' field might come handy if you want to use the same handler
+ * for multiple command...
+ * This struct is also my long term insurance. I can add new fields here
+ * without breaking the prototype of iw_handler...
+ */
+struct iw_request_info {
+ __u16 cmd; /* Wireless Extension command */
+ __u16 flags; /* More to come ;-) */
+};
+
+struct net_device;
+
+/*
+ * This is how a function handling a Wireless Extension should look
+ * like (both get and set, standard and private).
+ */
+typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+
+/*
+ * This define all the handler that the driver export.
+ * As you need only one per driver type, please use a static const
+ * shared by all driver instances... Same for the members...
+ * This will be linked from net_device in <linux/netdevice.h>
+ */
+struct iw_handler_def {
+
+ /* Array of handlers for standard ioctls
+ * We will call dev->wireless_handlers->standard[ioctl - SIOCIWFIRST]
+ */
+ const iw_handler * standard;
+ /* Number of handlers defined (more precisely, index of the
+ * last defined handler + 1) */
+ __u16 num_standard;
+
+#ifdef CONFIG_WEXT_PRIV
+ __u16 num_private;
+ /* Number of private arg description */
+ __u16 num_private_args;
+ /* Array of handlers for private ioctls
+ * Will call dev->wireless_handlers->private[ioctl - SIOCIWFIRSTPRIV]
+ */
+ const iw_handler * private;
+
+ /* Arguments of private handler. This one is just a list, so you
+ * can put it in any order you want and should not leave holes...
+ * We will automatically export that to user space... */
+ const struct iw_priv_args * private_args;
+#endif
+
+ /* New location of get_wireless_stats, to de-bloat struct net_device.
+ * The old pointer in struct net_device will be gradually phased
+ * out, and drivers are encouraged to use this one... */
+ struct iw_statistics* (*get_wireless_stats)(struct net_device *dev);
+};
+
+/* ---------------------- IOCTL DESCRIPTION ---------------------- */
+/*
+ * One of the main goal of the new interface is to deal entirely with
+ * user space/kernel space memory move.
+ * For that, we need to know :
+ * o if iwreq is a pointer or contain the full data
+ * o what is the size of the data to copy
+ *
+ * For private IOCTLs, we use the same rules as used by iwpriv and
+ * defined in struct iw_priv_args.
+ *
+ * For standard IOCTLs, things are quite different and we need to
+ * use the structures below. Actually, this struct is also more
+ * efficient, but that's another story...
+ */
+
+/*
+ * Describe how a standard IOCTL looks like.
+ */
+struct iw_ioctl_description {
+ __u8 header_type; /* NULL, iw_point or other */
+ __u8 token_type; /* Future */
+ __u16 token_size; /* Granularity of payload */
+ __u16 min_tokens; /* Min acceptable token number */
+ __u16 max_tokens; /* Max acceptable token number */
+ __u32 flags; /* Special handling of the request */
+};
+
+/* Need to think of short header translation table. Later. */
+
+/* --------------------- ENHANCED SPY SUPPORT --------------------- */
+/*
+ * In the old days, the driver was handling spy support all by itself.
+ * Now, the driver can delegate this task to Wireless Extensions.
+ * It needs to include this struct in its private part and use the
+ * standard spy iw_handler.
+ */
+
+/*
+ * Instance specific spy data, i.e. addresses spied and quality for them.
+ */
+struct iw_spy_data {
+ /* --- Standard spy support --- */
+ int spy_number;
+ u_char spy_address[IW_MAX_SPY][ETH_ALEN];
+ struct iw_quality spy_stat[IW_MAX_SPY];
+ /* --- Enhanced spy support (event) */
+ struct iw_quality spy_thr_low; /* Low threshold */
+ struct iw_quality spy_thr_high; /* High threshold */
+ u_char spy_thr_under[IW_MAX_SPY];
+};
+
+/* --------------------- DEVICE WIRELESS DATA --------------------- */
+/*
+ * This is all the wireless data specific to a device instance that
+ * is managed by the core of Wireless Extensions or the 802.11 layer.
+ * We only keep pointer to those structures, so that a driver is free
+ * to share them between instances.
+ * This structure should be initialised before registering the device.
+ * Access to this data follow the same rules as any other struct net_device
+ * data (i.e. valid as long as struct net_device exist, same locking rules).
+ */
+/* Forward declaration */
+struct libipw_device;
+/* The struct */
+struct iw_public_data {
+ /* Driver enhanced spy support */
+ struct iw_spy_data * spy_data;
+ /* Legacy structure managed by the ipw2x00-specific IEEE 802.11 layer */
+ struct libipw_device * libipw;
+};
+
+/**************************** PROTOTYPES ****************************/
+/*
+ * Functions part of the Wireless Extensions (defined in net/core/wireless.c).
+ * Those may be called only within the kernel.
+ */
+
+/* First : function strictly used inside the kernel */
+
+/* Handle /proc/net/wireless, called in net/code/dev.c */
+int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
+
+/* Second : functions that may be called by driver modules */
+
+/* Send a single event to user space */
+void wireless_send_event(struct net_device *dev, unsigned int cmd,
+ union iwreq_data *wrqu, const char *extra);
+#ifdef CONFIG_WEXT_CORE
+/* flush all previous wext events - if work is done from netdev notifiers */
+void wireless_nlevent_flush(void);
+#else
+static inline void wireless_nlevent_flush(void) {}
+#endif
+
+/* We may need a function to send a stream of events to user space.
+ * More on that later... */
+
+/* Standard handler for SIOCSIWSPY */
+int iw_handler_set_spy(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+/* Standard handler for SIOCGIWSPY */
+int iw_handler_get_spy(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+/* Standard handler for SIOCSIWTHRSPY */
+int iw_handler_set_thrspy(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+/* Standard handler for SIOCGIWTHRSPY */
+int iw_handler_get_thrspy(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+/* Driver call to update spy records */
+void wireless_spy_update(struct net_device *dev, unsigned char *address,
+ struct iw_quality *wstats);
+
+/************************* INLINE FUNTIONS *************************/
+/*
+ * Function that are so simple that it's more efficient inlining them
+ */
+
+static inline int iwe_stream_lcp_len(struct iw_request_info *info)
+{
+#ifdef CONFIG_COMPAT
+ if (info->flags & IW_REQUEST_FLAG_COMPAT)
+ return IW_EV_COMPAT_LCP_LEN;
+#endif
+ return IW_EV_LCP_LEN;
+}
+
+static inline int iwe_stream_point_len(struct iw_request_info *info)
+{
+#ifdef CONFIG_COMPAT
+ if (info->flags & IW_REQUEST_FLAG_COMPAT)
+ return IW_EV_COMPAT_POINT_LEN;
+#endif
+ return IW_EV_POINT_LEN;
+}
+
+static inline int iwe_stream_event_len_adjust(struct iw_request_info *info,
+ int event_len)
+{
+#ifdef CONFIG_COMPAT
+ if (info->flags & IW_REQUEST_FLAG_COMPAT) {
+ event_len -= IW_EV_LCP_LEN;
+ event_len += IW_EV_COMPAT_LCP_LEN;
+ }
+#endif
+
+ return event_len;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wrapper to add an Wireless Event to a stream of events.
+ */
+char *iwe_stream_add_event(struct iw_request_info *info, char *stream,
+ char *ends, struct iw_event *iwe, int event_len);
+
+static inline char *
+iwe_stream_add_event_check(struct iw_request_info *info, char *stream,
+ char *ends, struct iw_event *iwe, int event_len)
+{
+ char *res = iwe_stream_add_event(info, stream, ends, iwe, event_len);
+
+ if (res == stream)
+ return ERR_PTR(-E2BIG);
+ return res;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wrapper to add an short Wireless Event containing a pointer to a
+ * stream of events.
+ */
+char *iwe_stream_add_point(struct iw_request_info *info, char *stream,
+ char *ends, struct iw_event *iwe, char *extra);
+
+static inline char *
+iwe_stream_add_point_check(struct iw_request_info *info, char *stream,
+ char *ends, struct iw_event *iwe, char *extra)
+{
+ char *res = iwe_stream_add_point(info, stream, ends, iwe, extra);
+
+ if (res == stream)
+ return ERR_PTR(-E2BIG);
+ return res;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wrapper to add a value to a Wireless Event in a stream of events.
+ * Be careful, this one is tricky to use properly :
+ * At the first run, you need to have (value = event + IW_EV_LCP_LEN).
+ */
+char *iwe_stream_add_value(struct iw_request_info *info, char *event,
+ char *value, char *ends, struct iw_event *iwe,
+ int event_len);
+
+#endif /* _IW_HANDLER_H */
diff --git a/include/net/kcm.h b/include/net/kcm.h
new file mode 100644
index 000000000..2d704f8f4
--- /dev/null
+++ b/include/net/kcm.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Kernel Connection Multiplexor
+ *
+ * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
+ */
+
+#ifndef __NET_KCM_H_
+#define __NET_KCM_H_
+
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/strparser.h>
+#include <uapi/linux/kcm.h>
+
+extern unsigned int kcm_net_id;
+
+#define KCM_STATS_ADD(stat, count) ((stat) += (count))
+#define KCM_STATS_INCR(stat) ((stat)++)
+
+struct kcm_psock_stats {
+ unsigned long long tx_msgs;
+ unsigned long long tx_bytes;
+ unsigned long long reserved;
+ unsigned long long unreserved;
+ unsigned int tx_aborts;
+};
+
+struct kcm_mux_stats {
+ unsigned long long rx_msgs;
+ unsigned long long rx_bytes;
+ unsigned long long tx_msgs;
+ unsigned long long tx_bytes;
+ unsigned int rx_ready_drops;
+ unsigned int tx_retries;
+ unsigned int psock_attach;
+ unsigned int psock_unattach_rsvd;
+ unsigned int psock_unattach;
+};
+
+struct kcm_stats {
+ unsigned long long rx_msgs;
+ unsigned long long rx_bytes;
+ unsigned long long tx_msgs;
+ unsigned long long tx_bytes;
+};
+
+struct kcm_tx_msg {
+ unsigned int sent;
+ unsigned int fragidx;
+ unsigned int frag_offset;
+ unsigned int msg_flags;
+ struct sk_buff *frag_skb;
+ struct sk_buff *last_skb;
+};
+
+/* Socket structure for KCM client sockets */
+struct kcm_sock {
+ struct sock sk;
+ struct kcm_mux *mux;
+ struct list_head kcm_sock_list;
+ int index;
+ u32 done : 1;
+ struct work_struct done_work;
+
+ struct kcm_stats stats;
+
+ /* Transmit */
+ struct kcm_psock *tx_psock;
+ struct work_struct tx_work;
+ struct list_head wait_psock_list;
+ struct sk_buff *seq_skb;
+ u32 tx_stopped : 1;
+
+ /* Don't use bit fields here, these are set under different locks */
+ bool tx_wait;
+ bool tx_wait_more;
+
+ /* Receive */
+ struct kcm_psock *rx_psock;
+ struct list_head wait_rx_list; /* KCMs waiting for receiving */
+ bool rx_wait;
+ u32 rx_disabled : 1;
+};
+
+struct bpf_prog;
+
+/* Structure for an attached lower socket */
+struct kcm_psock {
+ struct sock *sk;
+ struct strparser strp;
+ struct kcm_mux *mux;
+ int index;
+
+ u32 tx_stopped : 1;
+ u32 done : 1;
+ u32 unattaching : 1;
+
+ void (*save_state_change)(struct sock *sk);
+ void (*save_data_ready)(struct sock *sk);
+ void (*save_write_space)(struct sock *sk);
+
+ struct list_head psock_list;
+
+ struct kcm_psock_stats stats;
+
+ /* Receive */
+ struct list_head psock_ready_list;
+ struct bpf_prog *bpf_prog;
+ struct kcm_sock *rx_kcm;
+ unsigned long long saved_rx_bytes;
+ unsigned long long saved_rx_msgs;
+ struct sk_buff *ready_rx_msg;
+
+ /* Transmit */
+ struct kcm_sock *tx_kcm;
+ struct list_head psock_avail_list;
+ unsigned long long saved_tx_bytes;
+ unsigned long long saved_tx_msgs;
+};
+
+/* Per net MUX list */
+struct kcm_net {
+ struct mutex mutex;
+ struct kcm_psock_stats aggregate_psock_stats;
+ struct kcm_mux_stats aggregate_mux_stats;
+ struct strp_aggr_stats aggregate_strp_stats;
+ struct list_head mux_list;
+ int count;
+};
+
+/* Structure for a MUX */
+struct kcm_mux {
+ struct list_head kcm_mux_list;
+ struct rcu_head rcu;
+ struct kcm_net *knet;
+
+ struct list_head kcm_socks; /* All KCM sockets on MUX */
+ int kcm_socks_cnt; /* Total KCM socket count for MUX */
+ struct list_head psocks; /* List of all psocks on MUX */
+ int psocks_cnt; /* Total attached sockets */
+
+ struct kcm_mux_stats stats;
+ struct kcm_psock_stats aggregate_psock_stats;
+ struct strp_aggr_stats aggregate_strp_stats;
+
+ /* Receive */
+ spinlock_t rx_lock ____cacheline_aligned_in_smp;
+ struct list_head kcm_rx_waiters; /* KCMs waiting for receiving */
+ struct list_head psocks_ready; /* List of psocks with a msg ready */
+ struct sk_buff_head rx_hold_queue;
+
+ /* Transmit */
+ spinlock_t lock ____cacheline_aligned_in_smp; /* TX and mux locking */
+ struct list_head psocks_avail; /* List of available psocks */
+ struct list_head kcm_tx_waiters; /* KCMs waiting for a TX psock */
+};
+
+#ifdef CONFIG_PROC_FS
+int kcm_proc_init(void);
+void kcm_proc_exit(void);
+#else
+static inline int kcm_proc_init(void) { return 0; }
+static inline void kcm_proc_exit(void) { }
+#endif
+
+static inline void aggregate_psock_stats(struct kcm_psock_stats *stats,
+ struct kcm_psock_stats *agg_stats)
+{
+ /* Save psock statistics in the mux when psock is being unattached. */
+
+#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat)
+ SAVE_PSOCK_STATS(tx_msgs);
+ SAVE_PSOCK_STATS(tx_bytes);
+ SAVE_PSOCK_STATS(reserved);
+ SAVE_PSOCK_STATS(unreserved);
+ SAVE_PSOCK_STATS(tx_aborts);
+#undef SAVE_PSOCK_STATS
+}
+
+static inline void aggregate_mux_stats(struct kcm_mux_stats *stats,
+ struct kcm_mux_stats *agg_stats)
+{
+ /* Save psock statistics in the mux when psock is being unattached. */
+
+#define SAVE_MUX_STATS(_stat) (agg_stats->_stat += stats->_stat)
+ SAVE_MUX_STATS(rx_msgs);
+ SAVE_MUX_STATS(rx_bytes);
+ SAVE_MUX_STATS(tx_msgs);
+ SAVE_MUX_STATS(tx_bytes);
+ SAVE_MUX_STATS(rx_ready_drops);
+ SAVE_MUX_STATS(psock_attach);
+ SAVE_MUX_STATS(psock_unattach_rsvd);
+ SAVE_MUX_STATS(psock_unattach);
+#undef SAVE_MUX_STATS
+}
+
+#endif /* __NET_KCM_H_ */
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
new file mode 100644
index 000000000..031c661aa
--- /dev/null
+++ b/include/net/l3mdev.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/net/l3mdev.h - L3 master device API
+ * Copyright (c) 2015 Cumulus Networks
+ * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
+ */
+#ifndef _NET_L3MDEV_H_
+#define _NET_L3MDEV_H_
+
+#include <net/dst.h>
+#include <net/fib_rules.h>
+
+enum l3mdev_type {
+ L3MDEV_TYPE_UNSPEC,
+ L3MDEV_TYPE_VRF,
+ __L3MDEV_TYPE_MAX
+};
+
+#define L3MDEV_TYPE_MAX (__L3MDEV_TYPE_MAX - 1)
+
+typedef int (*lookup_by_table_id_t)(struct net *net, u32 table_d);
+
+/**
+ * struct l3mdev_ops - l3mdev operations
+ *
+ * @l3mdev_fib_table: Get FIB table id to use for lookups
+ *
+ * @l3mdev_l3_rcv: Hook in L3 receive path
+ *
+ * @l3mdev_l3_out: Hook in L3 output path
+ *
+ * @l3mdev_link_scope_lookup: IPv6 lookup for linklocal and mcast destinations
+ */
+
+struct l3mdev_ops {
+ u32 (*l3mdev_fib_table)(const struct net_device *dev);
+ struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *dev,
+ struct sk_buff *skb, u16 proto);
+ struct sk_buff * (*l3mdev_l3_out)(struct net_device *dev,
+ struct sock *sk, struct sk_buff *skb,
+ u16 proto);
+
+ /* IPv6 ops */
+ struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *dev,
+ struct flowi6 *fl6);
+};
+
+#ifdef CONFIG_NET_L3_MASTER_DEV
+
+int l3mdev_table_lookup_register(enum l3mdev_type l3type,
+ lookup_by_table_id_t fn);
+
+void l3mdev_table_lookup_unregister(enum l3mdev_type l3type,
+ lookup_by_table_id_t fn);
+
+int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net,
+ u32 table_id);
+
+int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
+ struct fib_lookup_arg *arg);
+
+void l3mdev_update_flow(struct net *net, struct flowi *fl);
+
+int l3mdev_master_ifindex_rcu(const struct net_device *dev);
+static inline int l3mdev_master_ifindex(struct net_device *dev)
+{
+ int ifindex;
+
+ rcu_read_lock();
+ ifindex = l3mdev_master_ifindex_rcu(dev);
+ rcu_read_unlock();
+
+ return ifindex;
+}
+
+static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
+{
+ struct net_device *dev;
+ int rc = 0;
+
+ if (likely(ifindex)) {
+ rcu_read_lock();
+
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (dev)
+ rc = l3mdev_master_ifindex_rcu(dev);
+
+ rcu_read_unlock();
+ }
+
+ return rc;
+}
+
+static inline
+struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
+{
+ /* netdev_master_upper_dev_get_rcu calls
+ * list_first_or_null_rcu to walk the upper dev list.
+ * list_first_or_null_rcu does not handle a const arg. We aren't
+ * making changes, just want the master device from that list so
+ * typecast to remove the const
+ */
+ struct net_device *dev = (struct net_device *)_dev;
+ struct net_device *master;
+
+ if (!dev)
+ return NULL;
+
+ if (netif_is_l3_master(dev))
+ master = dev;
+ else if (netif_is_l3_slave(dev))
+ master = netdev_master_upper_dev_get_rcu(dev);
+ else
+ master = NULL;
+
+ return master;
+}
+
+int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex);
+static inline
+int l3mdev_master_upper_ifindex_by_index(struct net *net, int ifindex)
+{
+ rcu_read_lock();
+ ifindex = l3mdev_master_upper_ifindex_by_index_rcu(net, ifindex);
+ rcu_read_unlock();
+
+ return ifindex;
+}
+
+u32 l3mdev_fib_table_rcu(const struct net_device *dev);
+u32 l3mdev_fib_table_by_index(struct net *net, int ifindex);
+static inline u32 l3mdev_fib_table(const struct net_device *dev)
+{
+ u32 tb_id;
+
+ rcu_read_lock();
+ tb_id = l3mdev_fib_table_rcu(dev);
+ rcu_read_unlock();
+
+ return tb_id;
+}
+
+static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
+{
+ struct net_device *dev;
+ bool rc = false;
+
+ if (ifindex == 0)
+ return false;
+
+ rcu_read_lock();
+
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (dev)
+ rc = netif_is_l3_master(dev);
+
+ rcu_read_unlock();
+
+ return rc;
+}
+
+struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6);
+
+static inline
+struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
+{
+ struct net_device *master = NULL;
+
+ if (netif_is_l3_slave(skb->dev))
+ master = netdev_master_upper_dev_get_rcu(skb->dev);
+ else if (netif_is_l3_master(skb->dev) ||
+ netif_has_l3_rx_handler(skb->dev))
+ master = skb->dev;
+
+ if (master && master->l3mdev_ops->l3mdev_l3_rcv)
+ skb = master->l3mdev_ops->l3mdev_l3_rcv(master, skb, proto);
+
+ return skb;
+}
+
+static inline
+struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
+{
+ return l3mdev_l3_rcv(skb, AF_INET);
+}
+
+static inline
+struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
+{
+ return l3mdev_l3_rcv(skb, AF_INET6);
+}
+
+static inline
+struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto)
+{
+ struct net_device *dev = skb_dst(skb)->dev;
+
+ if (netif_is_l3_slave(dev)) {
+ struct net_device *master;
+
+ master = netdev_master_upper_dev_get_rcu(dev);
+ if (master && master->l3mdev_ops->l3mdev_l3_out)
+ skb = master->l3mdev_ops->l3mdev_l3_out(master, sk,
+ skb, proto);
+ }
+
+ return skb;
+}
+
+static inline
+struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
+{
+ return l3mdev_l3_out(sk, skb, AF_INET);
+}
+
+static inline
+struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
+{
+ return l3mdev_l3_out(sk, skb, AF_INET6);
+}
+#else
+
+static inline int l3mdev_master_ifindex_rcu(const struct net_device *dev)
+{
+ return 0;
+}
+static inline int l3mdev_master_ifindex(struct net_device *dev)
+{
+ return 0;
+}
+
+static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
+{
+ return 0;
+}
+
+static inline
+int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
+{
+ return 0;
+}
+static inline
+int l3mdev_master_upper_ifindex_by_index(struct net *net, int ifindex)
+{
+ return 0;
+}
+
+static inline
+struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev)
+{
+ return NULL;
+}
+
+static inline u32 l3mdev_fib_table_rcu(const struct net_device *dev)
+{
+ return 0;
+}
+static inline u32 l3mdev_fib_table(const struct net_device *dev)
+{
+ return 0;
+}
+static inline u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
+{
+ return 0;
+}
+
+static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
+{
+ return false;
+}
+
+static inline
+struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6)
+{
+ return NULL;
+}
+
+static inline
+struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
+{
+ return skb;
+}
+
+static inline
+struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
+{
+ return skb;
+}
+
+static inline
+struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
+{
+ return skb;
+}
+
+static inline
+struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
+{
+ return skb;
+}
+
+static inline
+int l3mdev_table_lookup_register(enum l3mdev_type l3type,
+ lookup_by_table_id_t fn)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+void l3mdev_table_lookup_unregister(enum l3mdev_type l3type,
+ lookup_by_table_id_t fn)
+{
+}
+
+static inline
+int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net,
+ u32 table_id)
+{
+ return -ENODEV;
+}
+
+static inline
+int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
+ struct fib_lookup_arg *arg)
+{
+ return 1;
+}
+static inline
+void l3mdev_update_flow(struct net *net, struct flowi *fl)
+{
+}
+#endif
+
+#endif /* _NET_L3MDEV_H_ */
diff --git a/include/net/lag.h b/include/net/lag.h
new file mode 100644
index 000000000..95b880e6f
--- /dev/null
+++ b/include/net/lag.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IF_LAG_H
+#define _LINUX_IF_LAG_H
+
+#include <linux/netdevice.h>
+#include <linux/if_team.h>
+#include <net/bonding.h>
+
+static inline bool net_lag_port_dev_txable(const struct net_device *port_dev)
+{
+ if (netif_is_team_port(port_dev))
+ return team_port_dev_txable(port_dev);
+ else
+ return bond_is_active_slave_dev(port_dev);
+}
+
+#endif /* _LINUX_IF_LAG_H */
diff --git a/include/net/lapb.h b/include/net/lapb.h
new file mode 100644
index 000000000..124ee122f
--- /dev/null
+++ b/include/net/lapb.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LAPB_H
+#define _LAPB_H
+#include <linux/lapb.h>
+#include <linux/refcount.h>
+
+#define LAPB_HEADER_LEN 20 /* LAPB over Ethernet + a bit more */
+
+#define LAPB_ACK_PENDING_CONDITION 0x01
+#define LAPB_REJECT_CONDITION 0x02
+#define LAPB_PEER_RX_BUSY_CONDITION 0x04
+
+/* Control field templates */
+#define LAPB_I 0x00 /* Information frames */
+#define LAPB_S 0x01 /* Supervisory frames */
+#define LAPB_U 0x03 /* Unnumbered frames */
+
+#define LAPB_RR 0x01 /* Receiver ready */
+#define LAPB_RNR 0x05 /* Receiver not ready */
+#define LAPB_REJ 0x09 /* Reject */
+
+#define LAPB_SABM 0x2F /* Set Asynchronous Balanced Mode */
+#define LAPB_SABME 0x6F /* Set Asynchronous Balanced Mode Extended */
+#define LAPB_DISC 0x43 /* Disconnect */
+#define LAPB_DM 0x0F /* Disconnected mode */
+#define LAPB_UA 0x63 /* Unnumbered acknowledge */
+#define LAPB_FRMR 0x87 /* Frame reject */
+
+#define LAPB_ILLEGAL 0x100 /* Impossible to be a real frame type */
+
+#define LAPB_SPF 0x10 /* Poll/final bit for standard LAPB */
+#define LAPB_EPF 0x01 /* Poll/final bit for extended LAPB */
+
+#define LAPB_FRMR_W 0x01 /* Control field invalid */
+#define LAPB_FRMR_X 0x02 /* I field invalid */
+#define LAPB_FRMR_Y 0x04 /* I field too long */
+#define LAPB_FRMR_Z 0x08 /* Invalid N(R) */
+
+#define LAPB_POLLOFF 0
+#define LAPB_POLLON 1
+
+/* LAPB C-bit */
+#define LAPB_COMMAND 1
+#define LAPB_RESPONSE 2
+
+#define LAPB_ADDR_A 0x03
+#define LAPB_ADDR_B 0x01
+#define LAPB_ADDR_C 0x0F
+#define LAPB_ADDR_D 0x07
+
+/* Define Link State constants. */
+enum {
+ LAPB_STATE_0, /* Disconnected State */
+ LAPB_STATE_1, /* Awaiting Connection State */
+ LAPB_STATE_2, /* Awaiting Disconnection State */
+ LAPB_STATE_3, /* Data Transfer State */
+ LAPB_STATE_4 /* Frame Reject State */
+};
+
+#define LAPB_DEFAULT_MODE (LAPB_STANDARD | LAPB_SLP | LAPB_DTE)
+#define LAPB_DEFAULT_WINDOW 7 /* Window=7 */
+#define LAPB_DEFAULT_T1 (5 * HZ) /* T1=5s */
+#define LAPB_DEFAULT_T2 (1 * HZ) /* T2=1s */
+#define LAPB_DEFAULT_N2 20 /* N2=20 */
+
+#define LAPB_SMODULUS 8
+#define LAPB_EMODULUS 128
+
+/*
+ * Information about the current frame.
+ */
+struct lapb_frame {
+ unsigned short type; /* Parsed type */
+ unsigned short nr, ns; /* N(R), N(S) */
+ unsigned char cr; /* Command/Response */
+ unsigned char pf; /* Poll/Final */
+ unsigned char control[2]; /* Original control data*/
+};
+
+/*
+ * The per LAPB connection control structure.
+ */
+struct lapb_cb {
+ struct list_head node;
+ struct net_device *dev;
+
+ /* Link status fields */
+ unsigned int mode;
+ unsigned char state;
+ unsigned short vs, vr, va;
+ unsigned char condition;
+ unsigned short n2, n2count;
+ unsigned short t1, t2;
+ struct timer_list t1timer, t2timer;
+ bool t1timer_running, t2timer_running;
+
+ /* Internal control information */
+ struct sk_buff_head write_queue;
+ struct sk_buff_head ack_queue;
+ unsigned char window;
+ const struct lapb_register_struct *callbacks;
+
+ /* FRMR control information */
+ struct lapb_frame frmr_data;
+ unsigned char frmr_type;
+
+ spinlock_t lock;
+ refcount_t refcnt;
+};
+
+/* lapb_iface.c */
+void lapb_connect_confirmation(struct lapb_cb *lapb, int);
+void lapb_connect_indication(struct lapb_cb *lapb, int);
+void lapb_disconnect_confirmation(struct lapb_cb *lapb, int);
+void lapb_disconnect_indication(struct lapb_cb *lapb, int);
+int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *);
+int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *);
+
+/* lapb_in.c */
+void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *);
+
+/* lapb_out.c */
+void lapb_kick(struct lapb_cb *lapb);
+void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *, int);
+void lapb_establish_data_link(struct lapb_cb *lapb);
+void lapb_enquiry_response(struct lapb_cb *lapb);
+void lapb_timeout_response(struct lapb_cb *lapb);
+void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short);
+void lapb_check_need_response(struct lapb_cb *lapb, int, int);
+
+/* lapb_subr.c */
+void lapb_clear_queues(struct lapb_cb *lapb);
+void lapb_frames_acked(struct lapb_cb *lapb, unsigned short);
+void lapb_requeue_frames(struct lapb_cb *lapb);
+int lapb_validate_nr(struct lapb_cb *lapb, unsigned short);
+int lapb_decode(struct lapb_cb *lapb, struct sk_buff *, struct lapb_frame *);
+void lapb_send_control(struct lapb_cb *lapb, int, int, int);
+void lapb_transmit_frmr(struct lapb_cb *lapb);
+
+/* lapb_timer.c */
+void lapb_start_t1timer(struct lapb_cb *lapb);
+void lapb_start_t2timer(struct lapb_cb *lapb);
+void lapb_stop_t1timer(struct lapb_cb *lapb);
+void lapb_stop_t2timer(struct lapb_cb *lapb);
+int lapb_t1timer_running(struct lapb_cb *lapb);
+
+/*
+ * Debug levels.
+ * 0 = Off
+ * 1 = State Changes
+ * 2 = Packets I/O and State Changes
+ * 3 = Hex dumps, Packets I/O and State Changes.
+ */
+#define LAPB_DEBUG 0
+
+#define lapb_dbg(level, fmt, ...) \
+do { \
+ if (level < LAPB_DEBUG) \
+ pr_debug(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#endif
diff --git a/include/net/lib80211.h b/include/net/lib80211.h
new file mode 100644
index 000000000..8b47d3a51
--- /dev/null
+++ b/include/net/lib80211.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * lib80211.h -- common bits for IEEE802.11 wireless drivers
+ *
+ * Copyright (c) 2008, John W. Linville <linville@tuxdriver.com>
+ *
+ * Some bits copied from old ieee80211 component, w/ original copyright
+ * notices below:
+ *
+ * Original code based on Host AP (software wireless LAN access point) driver
+ * for Intersil Prism2/2.5/3.
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <j@w1.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi>
+ *
+ * Adaption to a generic IEEE 802.11 stack by James Ketrenos
+ * <jketreno@linux.intel.com>
+ *
+ * Copyright (c) 2004, Intel Corporation
+ *
+ */
+
+#ifndef LIB80211_H
+#define LIB80211_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/if.h>
+#include <linux/skbuff.h>
+#include <linux/ieee80211.h>
+#include <linux/timer.h>
+#include <linux/seq_file.h>
+
+#define NUM_WEP_KEYS 4
+
+enum {
+ IEEE80211_CRYPTO_TKIP_COUNTERMEASURES = (1 << 0),
+};
+
+struct module;
+
+struct lib80211_crypto_ops {
+ const char *name;
+ struct list_head list;
+
+ /* init new crypto context (e.g., allocate private data space,
+ * select IV, etc.); returns NULL on failure or pointer to allocated
+ * private data on success */
+ void *(*init) (int keyidx);
+
+ /* deinitialize crypto context and free allocated private data */
+ void (*deinit) (void *priv);
+
+ /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
+ * value from decrypt_mpdu is passed as the keyidx value for
+ * decrypt_msdu. skb must have enough head and tail room for the
+ * encryption; if not, error will be returned; these functions are
+ * called for all MPDUs (i.e., fragments).
+ */
+ int (*encrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv);
+ int (*decrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv);
+
+ /* These functions are called for full MSDUs, i.e. full frames.
+ * These can be NULL if full MSDU operations are not needed. */
+ int (*encrypt_msdu) (struct sk_buff * skb, int hdr_len, void *priv);
+ int (*decrypt_msdu) (struct sk_buff * skb, int keyidx, int hdr_len,
+ void *priv);
+
+ int (*set_key) (void *key, int len, u8 * seq, void *priv);
+ int (*get_key) (void *key, int len, u8 * seq, void *priv);
+
+ /* procfs handler for printing out key information and possible
+ * statistics */
+ void (*print_stats) (struct seq_file *m, void *priv);
+
+ /* Crypto specific flag get/set for configuration settings */
+ unsigned long (*get_flags) (void *priv);
+ unsigned long (*set_flags) (unsigned long flags, void *priv);
+
+ /* maximum number of bytes added by encryption; encrypt buf is
+ * allocated with extra_prefix_len bytes, copy of in_buf, and
+ * extra_postfix_len; encrypt need not use all this space, but
+ * the result must start at the beginning of the buffer and correct
+ * length must be returned */
+ int extra_mpdu_prefix_len, extra_mpdu_postfix_len;
+ int extra_msdu_prefix_len, extra_msdu_postfix_len;
+
+ struct module *owner;
+};
+
+struct lib80211_crypt_data {
+ struct list_head list; /* delayed deletion list */
+ struct lib80211_crypto_ops *ops;
+ void *priv;
+ atomic_t refcnt;
+};
+
+struct lib80211_crypt_info {
+ char *name;
+ /* Most clients will already have a lock,
+ so just point to that. */
+ spinlock_t *lock;
+
+ struct lib80211_crypt_data *crypt[NUM_WEP_KEYS];
+ int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
+ struct list_head crypt_deinit_list;
+ struct timer_list crypt_deinit_timer;
+ int crypt_quiesced;
+};
+
+int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name,
+ spinlock_t *lock);
+void lib80211_crypt_info_free(struct lib80211_crypt_info *info);
+int lib80211_register_crypto_ops(struct lib80211_crypto_ops *ops);
+int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops);
+struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name);
+void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info,
+ struct lib80211_crypt_data **crypt);
+
+#endif /* LIB80211_H */
diff --git a/include/net/llc.h b/include/net/llc.h
new file mode 100644
index 000000000..e250dca03
--- /dev/null
+++ b/include/net/llc.h
@@ -0,0 +1,165 @@
+#ifndef LLC_H
+#define LLC_H
+/*
+ * Copyright (c) 1997 by Procom Technology, Inc.
+ * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/rculist_nulls.h>
+#include <linux/hash.h>
+#include <linux/jhash.h>
+
+#include <linux/atomic.h>
+
+struct net_device;
+struct packet_type;
+struct sk_buff;
+
+struct llc_addr {
+ unsigned char lsap;
+ unsigned char mac[IFHWADDRLEN];
+};
+
+#define LLC_SAP_STATE_INACTIVE 1
+#define LLC_SAP_STATE_ACTIVE 2
+
+#define LLC_SK_DEV_HASH_BITS 6
+#define LLC_SK_DEV_HASH_ENTRIES (1<<LLC_SK_DEV_HASH_BITS)
+
+#define LLC_SK_LADDR_HASH_BITS 6
+#define LLC_SK_LADDR_HASH_ENTRIES (1<<LLC_SK_LADDR_HASH_BITS)
+
+/**
+ * struct llc_sap - Defines the SAP component
+ *
+ * @station - station this sap belongs to
+ * @state - sap state
+ * @p_bit - only lowest-order bit used
+ * @f_bit - only lowest-order bit used
+ * @laddr - SAP value in this 'lsap'
+ * @node - entry in station sap_list
+ * @sk_list - LLC sockets this one manages
+ */
+struct llc_sap {
+ unsigned char state;
+ unsigned char p_bit;
+ unsigned char f_bit;
+ refcount_t refcnt;
+ int (*rcv_func)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt,
+ struct net_device *orig_dev);
+ struct llc_addr laddr;
+ struct list_head node;
+ spinlock_t sk_lock;
+ int sk_count;
+ struct hlist_nulls_head sk_laddr_hash[LLC_SK_LADDR_HASH_ENTRIES];
+ struct hlist_head sk_dev_hash[LLC_SK_DEV_HASH_ENTRIES];
+ struct rcu_head rcu;
+};
+
+static inline
+struct hlist_head *llc_sk_dev_hash(struct llc_sap *sap, int ifindex)
+{
+ u32 bucket = hash_32(ifindex, LLC_SK_DEV_HASH_BITS);
+
+ return &sap->sk_dev_hash[bucket];
+}
+
+static inline
+u32 llc_sk_laddr_hashfn(struct llc_sap *sap, const struct llc_addr *laddr)
+{
+ return hash_32(jhash(laddr->mac, sizeof(laddr->mac), 0),
+ LLC_SK_LADDR_HASH_BITS);
+}
+
+static inline
+struct hlist_nulls_head *llc_sk_laddr_hash(struct llc_sap *sap,
+ const struct llc_addr *laddr)
+{
+ return &sap->sk_laddr_hash[llc_sk_laddr_hashfn(sap, laddr)];
+}
+
+#define LLC_DEST_INVALID 0 /* Invalid LLC PDU type */
+#define LLC_DEST_SAP 1 /* Type 1 goes here */
+#define LLC_DEST_CONN 2 /* Type 2 goes here */
+
+extern struct list_head llc_sap_list;
+
+int llc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+ struct net_device *orig_dev);
+
+int llc_mac_hdr_init(struct sk_buff *skb, const unsigned char *sa,
+ const unsigned char *da);
+
+void llc_add_pack(int type,
+ void (*handler)(struct llc_sap *sap, struct sk_buff *skb));
+void llc_remove_pack(int type);
+
+void llc_set_station_handler(void (*handler)(struct sk_buff *skb));
+
+struct llc_sap *llc_sap_open(unsigned char lsap,
+ int (*rcv)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt,
+ struct net_device *orig_dev));
+static inline void llc_sap_hold(struct llc_sap *sap)
+{
+ refcount_inc(&sap->refcnt);
+}
+
+static inline bool llc_sap_hold_safe(struct llc_sap *sap)
+{
+ return refcount_inc_not_zero(&sap->refcnt);
+}
+
+void llc_sap_close(struct llc_sap *sap);
+
+static inline void llc_sap_put(struct llc_sap *sap)
+{
+ if (refcount_dec_and_test(&sap->refcnt))
+ llc_sap_close(sap);
+}
+
+struct llc_sap *llc_sap_find(unsigned char sap_value);
+
+int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
+ const unsigned char *dmac, unsigned char dsap);
+
+void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
+void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
+
+void llc_station_init(void);
+void llc_station_exit(void);
+
+#ifdef CONFIG_PROC_FS
+int llc_proc_init(void);
+void llc_proc_exit(void);
+#else
+#define llc_proc_init() (0)
+#define llc_proc_exit() do { } while(0)
+#endif /* CONFIG_PROC_FS */
+#ifdef CONFIG_SYSCTL
+int llc_sysctl_init(void);
+void llc_sysctl_exit(void);
+
+extern int sysctl_llc2_ack_timeout;
+extern int sysctl_llc2_busy_timeout;
+extern int sysctl_llc2_p_timeout;
+extern int sysctl_llc2_rej_timeout;
+#else
+#define llc_sysctl_init() (0)
+#define llc_sysctl_exit() do { } while(0)
+#endif /* CONFIG_SYSCTL */
+#endif /* LLC_H */
diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
new file mode 100644
index 000000000..3e1f76786
--- /dev/null
+++ b/include/net/llc_c_ac.h
@@ -0,0 +1,187 @@
+#ifndef LLC_C_AC_H
+#define LLC_C_AC_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+/* Connection component state transition actions */
+/*
+ * Connection state transition actions
+ * (Fb = F bit; Pb = P bit; Xb = X bit)
+ */
+
+#include <linux/types.h>
+
+struct sk_buff;
+struct sock;
+struct timer_list;
+
+#define LLC_CONN_AC_CLR_REMOTE_BUSY 1
+#define LLC_CONN_AC_CONN_IND 2
+#define LLC_CONN_AC_CONN_CONFIRM 3
+#define LLC_CONN_AC_DATA_IND 4
+#define LLC_CONN_AC_DISC_IND 5
+#define LLC_CONN_AC_RESET_IND 6
+#define LLC_CONN_AC_RESET_CONFIRM 7
+#define LLC_CONN_AC_REPORT_STATUS 8
+#define LLC_CONN_AC_CLR_REMOTE_BUSY_IF_Fb_EQ_1 9
+#define LLC_CONN_AC_STOP_REJ_TMR_IF_DATA_FLAG_EQ_2 10
+#define LLC_CONN_AC_SEND_DISC_CMD_Pb_SET_X 11
+#define LLC_CONN_AC_SEND_DM_RSP_Fb_SET_Pb 12
+#define LLC_CONN_AC_SEND_DM_RSP_Fb_SET_1 13
+#define LLC_CONN_AC_SEND_DM_RSP_Fb_SET_F_FLAG 14
+#define LLC_CONN_AC_SEND_FRMR_RSP_Fb_SET_X 15
+#define LLC_CONN_AC_RESEND_FRMR_RSP_Fb_SET_0 16
+#define LLC_CONN_AC_RESEND_FRMR_RSP_Fb_SET_Pb 17
+#define LLC_CONN_AC_SEND_I_CMD_Pb_SET_1 18
+#define LLC_CONN_AC_RESEND_I_CMD_Pb_SET_1 19
+#define LLC_CONN_AC_RESEND_I_CMD_Pb_SET_1_OR_SEND_RR 20
+#define LLC_CONN_AC_SEND_I_XXX_Xb_SET_0 21
+#define LLC_CONN_AC_RESEND_I_XXX_Xb_SET_0 22
+#define LLC_CONN_AC_RESEND_I_XXX_Xb_SET_0_OR_SEND_RR 23
+#define LLC_CONN_AC_RESEND_I_RSP_Fb_SET_1 24
+#define LLC_CONN_AC_SEND_REJ_CMD_Pb_SET_1 25
+#define LLC_CONN_AC_SEND_REJ_RSP_Fb_SET_1 26
+#define LLC_CONN_AC_SEND_REJ_XXX_Xb_SET_0 27
+#define LLC_CONN_AC_SEND_RNR_CMD_Pb_SET_1 28
+#define LLC_CONN_AC_SEND_RNR_RSP_Fb_SET_1 29
+#define LLC_CONN_AC_SEND_RNR_XXX_Xb_SET_0 30
+#define LLC_CONN_AC_SET_REMOTE_BUSY 31
+#define LLC_CONN_AC_OPTIONAL_SEND_RNR_XXX_Xb_SET_0 32
+#define LLC_CONN_AC_SEND_RR_CMD_Pb_SET_1 33
+#define LLC_CONN_AC_SEND_ACK_CMD_Pb_SET_1 34
+#define LLC_CONN_AC_SEND_RR_RSP_Fb_SET_1 35
+#define LLC_CONN_AC_SEND_ACK_RSP_Fb_SET_1 36
+#define LLC_CONN_AC_SEND_RR_XXX_Xb_SET_0 37
+#define LLC_CONN_AC_SEND_ACK_XXX_Xb_SET_0 38
+#define LLC_CONN_AC_SEND_SABME_CMD_Pb_SET_X 39
+#define LLC_CONN_AC_SEND_UA_RSP_Fb_SET_Pb 40
+#define LLC_CONN_AC_SEND_UA_RSP_Fb_SET_F_FLAG 41
+#define LLC_CONN_AC_S_FLAG_SET_0 42
+#define LLC_CONN_AC_S_FLAG_SET_1 43
+#define LLC_CONN_AC_START_P_TMR 44
+#define LLC_CONN_AC_START_ACK_TMR 45
+#define LLC_CONN_AC_START_REJ_TMR 46
+#define LLC_CONN_AC_START_ACK_TMR_IF_NOT_RUNNING 47
+#define LLC_CONN_AC_STOP_ACK_TMR 48
+#define LLC_CONN_AC_STOP_P_TMR 49
+#define LLC_CONN_AC_STOP_REJ_TMR 50
+#define LLC_CONN_AC_STOP_ALL_TMRS 51
+#define LLC_CONN_AC_STOP_OTHER_TMRS 52
+#define LLC_CONN_AC_UPDATE_Nr_RECEIVED 53
+#define LLC_CONN_AC_UPDATE_P_FLAG 54
+#define LLC_CONN_AC_DATA_FLAG_SET_2 55
+#define LLC_CONN_AC_DATA_FLAG_SET_0 56
+#define LLC_CONN_AC_DATA_FLAG_SET_1 57
+#define LLC_CONN_AC_DATA_FLAG_SET_1_IF_DATA_FLAG_EQ_0 58
+#define LLC_CONN_AC_P_FLAG_SET_0 59
+#define LLC_CONN_AC_P_FLAG_SET_P 60
+#define LLC_CONN_AC_REMOTE_BUSY_SET_0 61
+#define LLC_CONN_AC_RETRY_CNT_SET_0 62
+#define LLC_CONN_AC_RETRY_CNT_INC_BY_1 63
+#define LLC_CONN_AC_Vr_SET_0 64
+#define LLC_CONN_AC_Vr_INC_BY_1 65
+#define LLC_CONN_AC_Vs_SET_0 66
+#define LLC_CONN_AC_Vs_SET_Nr 67
+#define LLC_CONN_AC_F_FLAG_SET_P 68
+#define LLC_CONN_AC_STOP_SENDACK_TMR 70
+#define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
+
+typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
+
+int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb);
+int llc_conn_disc(struct sock *sk, struct sk_buff *skb);
+int llc_conn_reset(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb);
+u8 llc_circular_between(u8 a, u8 b, u8 c);
+int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_rsp_as_ack(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb);
+
+void llc_conn_busy_tmr_cb(struct timer_list *t);
+void llc_conn_pf_cycle_tmr_cb(struct timer_list *t);
+void llc_conn_ack_tmr_cb(struct timer_list *t);
+void llc_conn_rej_tmr_cb(struct timer_list *t);
+
+void llc_conn_set_p_flag(struct sock *sk, u8 value);
+#endif /* LLC_C_AC_H */
diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
new file mode 100644
index 000000000..3948cf111
--- /dev/null
+++ b/include/net/llc_c_ev.h
@@ -0,0 +1,224 @@
+#ifndef LLC_C_EV_H
+#define LLC_C_EV_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+
+#include <net/sock.h>
+
+/* Connection component state transition event qualifiers */
+/* Types of events (possible values in 'ev->type') */
+#define LLC_CONN_EV_TYPE_SIMPLE 1
+#define LLC_CONN_EV_TYPE_CONDITION 2
+#define LLC_CONN_EV_TYPE_PRIM 3
+#define LLC_CONN_EV_TYPE_PDU 4 /* command/response PDU */
+#define LLC_CONN_EV_TYPE_ACK_TMR 5
+#define LLC_CONN_EV_TYPE_P_TMR 6
+#define LLC_CONN_EV_TYPE_REJ_TMR 7
+#define LLC_CONN_EV_TYPE_BUSY_TMR 8
+#define LLC_CONN_EV_TYPE_RPT_STATUS 9
+#define LLC_CONN_EV_TYPE_SENDACK_TMR 10
+
+#define NBR_CONN_EV 5
+/* Connection events which cause state transitions when fully qualified */
+
+#define LLC_CONN_EV_CONN_REQ 1
+#define LLC_CONN_EV_CONN_RESP 2
+#define LLC_CONN_EV_DATA_REQ 3
+#define LLC_CONN_EV_DISC_REQ 4
+#define LLC_CONN_EV_RESET_REQ 5
+#define LLC_CONN_EV_RESET_RESP 6
+#define LLC_CONN_EV_LOCAL_BUSY_DETECTED 7
+#define LLC_CONN_EV_LOCAL_BUSY_CLEARED 8
+#define LLC_CONN_EV_RX_BAD_PDU 9
+#define LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X 10
+#define LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X 11
+#define LLC_CONN_EV_RX_FRMR_RSP_Fbit_SET_X 12
+#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_X 13
+#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_X_UNEXPD_Ns 14
+#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_X_INVAL_Ns 15
+#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_X 16
+#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_UNEXPD_Ns 17
+#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_INVAL_Ns 18
+#define LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_X 19
+#define LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X 20
+#define LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_X 21
+#define LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_X 22
+#define LLC_CONN_EV_RX_RR_CMD_Pbit_SET_X 23
+#define LLC_CONN_EV_RX_RR_RSP_Fbit_SET_X 24
+#define LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X 25
+#define LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X 26
+#define LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_X 27
+#define LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_X 28
+#define LLC_CONN_EV_RX_XXX_YYY 29
+#define LLC_CONN_EV_RX_ZZZ_CMD_Pbit_SET_X_INVAL_Nr 30
+#define LLC_CONN_EV_RX_ZZZ_RSP_Fbit_SET_X_INVAL_Nr 31
+#define LLC_CONN_EV_P_TMR_EXP 32
+#define LLC_CONN_EV_ACK_TMR_EXP 33
+#define LLC_CONN_EV_REJ_TMR_EXP 34
+#define LLC_CONN_EV_BUSY_TMR_EXP 35
+#define LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_1 36
+#define LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_0 37
+#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns 38
+#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns 39
+#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns 40
+#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns 41
+#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 42
+#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 43
+#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 44
+#define LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 45
+#define LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 46
+#define LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 47
+#define LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 48
+#define LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 49
+#define LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 50
+#define LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 51
+#define LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 52
+#define LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 53
+#define LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 54
+#define LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 55
+#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 56
+#define LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 57
+#define LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_1 58
+#define LLC_CONN_EV_TX_BUFF_FULL 59
+
+#define LLC_CONN_EV_INIT_P_F_CYCLE 100
+/*
+ * Connection event qualifiers; for some events a certain combination of
+ * these qualifiers must be TRUE before event recognized valid for state;
+ * these constants act as indexes into the Event Qualifier function
+ * table
+ */
+#define LLC_CONN_EV_QFY_DATA_FLAG_EQ_1 1
+#define LLC_CONN_EV_QFY_DATA_FLAG_EQ_0 2
+#define LLC_CONN_EV_QFY_DATA_FLAG_EQ_2 3
+#define LLC_CONN_EV_QFY_P_FLAG_EQ_1 4
+#define LLC_CONN_EV_QFY_P_FLAG_EQ_0 5
+#define LLC_CONN_EV_QFY_P_FLAG_EQ_Fbit 6
+#define LLC_CONN_EV_QFY_REMOTE_BUSY_EQ_0 7
+#define LLC_CONN_EV_QFY_RETRY_CNT_LT_N2 8
+#define LLC_CONN_EV_QFY_RETRY_CNT_GTE_N2 9
+#define LLC_CONN_EV_QFY_S_FLAG_EQ_1 10
+#define LLC_CONN_EV_QFY_S_FLAG_EQ_0 11
+#define LLC_CONN_EV_QFY_INIT_P_F_CYCLE 12
+
+struct llc_conn_state_ev {
+ u8 type;
+ u8 prim;
+ u8 prim_type;
+ u8 reason;
+ u8 status;
+ u8 ind_prim;
+ u8 cfm_prim;
+};
+
+static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
+{
+ return (struct llc_conn_state_ev *)skb->cb;
+}
+
+typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
+typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
+
+int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
+/* NOT_USED functions and their variations */
+int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb);
+
+/* Available connection action qualifiers */
+int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb);
+
+static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb)
+{
+ return atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
+ (unsigned int)sk->sk_rcvbuf;
+}
+#endif /* LLC_C_EV_H */
diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
new file mode 100644
index 000000000..53823d61d
--- /dev/null
+++ b/include/net/llc_c_st.h
@@ -0,0 +1,52 @@
+#ifndef LLC_C_ST_H
+#define LLC_C_ST_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+
+#include <net/llc_c_ac.h>
+#include <net/llc_c_ev.h>
+
+/* Connection component state management */
+/* connection states */
+#define LLC_CONN_OUT_OF_SVC 0 /* prior to allocation */
+
+#define LLC_CONN_STATE_ADM 1 /* disc, initial state */
+#define LLC_CONN_STATE_SETUP 2 /* disconnected state */
+#define LLC_CONN_STATE_NORMAL 3 /* connected state */
+#define LLC_CONN_STATE_BUSY 4 /* connected state */
+#define LLC_CONN_STATE_REJ 5 /* connected state */
+#define LLC_CONN_STATE_AWAIT 6 /* connected state */
+#define LLC_CONN_STATE_AWAIT_BUSY 7 /* connected state */
+#define LLC_CONN_STATE_AWAIT_REJ 8 /* connected state */
+#define LLC_CONN_STATE_D_CONN 9 /* disconnected state */
+#define LLC_CONN_STATE_RESET 10 /* disconnected state */
+#define LLC_CONN_STATE_ERROR 11 /* disconnected state */
+#define LLC_CONN_STATE_TEMP 12 /* disconnected state */
+
+#define NBR_CONN_STATES 12 /* size of state table */
+#define NO_STATE_CHANGE 100
+
+/* Connection state table structure */
+struct llc_conn_state_trans {
+ llc_conn_ev_t ev;
+ u8 next_state;
+ const llc_conn_ev_qfyr_t *ev_qualifiers;
+ const llc_conn_action_t *ev_actions;
+};
+
+struct llc_conn_state {
+ u8 current_state;
+ struct llc_conn_state_trans **transitions;
+};
+
+extern struct llc_conn_state llc_conn_state_table[];
+#endif /* LLC_C_ST_H */
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
new file mode 100644
index 000000000..2c1ea3414
--- /dev/null
+++ b/include/net/llc_conn.h
@@ -0,0 +1,120 @@
+#ifndef LLC_CONN_H
+#define LLC_CONN_H
+/*
+ * Copyright (c) 1997 by Procom Technology, Inc.
+ * 2001, 2002 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+#include <linux/timer.h>
+#include <net/llc_if.h>
+#include <net/sock.h>
+#include <linux/llc.h>
+
+#define LLC_EVENT 1
+#define LLC_PACKET 2
+
+#define LLC2_P_TIME 2
+#define LLC2_ACK_TIME 1
+#define LLC2_REJ_TIME 3
+#define LLC2_BUSY_TIME 3
+
+struct llc_timer {
+ struct timer_list timer;
+ unsigned long expire; /* timer expire time */
+};
+
+struct llc_sock {
+ /* struct sock must be the first member of llc_sock */
+ struct sock sk;
+ struct sockaddr_llc addr; /* address sock is bound to */
+ u8 state; /* state of connection */
+ struct llc_sap *sap; /* pointer to parent SAP */
+ struct llc_addr laddr; /* lsap/mac pair */
+ struct llc_addr daddr; /* dsap/mac pair */
+ struct net_device *dev; /* device to send to remote */
+ netdevice_tracker dev_tracker;
+ u32 copied_seq; /* head of yet unread data */
+ u8 retry_count; /* number of retries */
+ u8 ack_must_be_send;
+ u8 first_pdu_Ns;
+ u8 npta;
+ struct llc_timer ack_timer;
+ struct llc_timer pf_cycle_timer;
+ struct llc_timer rej_sent_timer;
+ struct llc_timer busy_state_timer; /* ind busy clr at remote LLC */
+ u8 vS; /* seq# next in-seq I-PDU tx'd*/
+ u8 vR; /* seq# next in-seq I-PDU rx'd*/
+ u32 n2; /* max nbr re-tx's for timeout*/
+ u32 n1; /* max nbr octets in I PDU */
+ u8 k; /* tx window size; max = 127 */
+ u8 rw; /* rx window size; max = 127 */
+ u8 p_flag; /* state flags */
+ u8 f_flag;
+ u8 s_flag;
+ u8 data_flag;
+ u8 remote_busy_flag;
+ u8 cause_flag;
+ struct sk_buff_head pdu_unack_q; /* PUDs sent/waiting ack */
+ u16 link; /* network layer link number */
+ u8 X; /* a temporary variable */
+ u8 ack_pf; /* this flag indicates what is
+ the P-bit of acknowledge */
+ u8 failed_data_req; /* recognize that already exist a
+ failed llc_data_req_handler
+ (tx_buffer_full or unacceptable
+ state */
+ u8 dec_step;
+ u8 inc_cntr;
+ u8 dec_cntr;
+ u8 connect_step;
+ u8 last_nr; /* NR of last pdu received */
+ u32 rx_pdu_hdr; /* used for saving header of last pdu
+ received and caused sending FRMR.
+ Used for resending FRMR */
+ u32 cmsg_flags;
+ struct hlist_node dev_hash_node;
+};
+
+static inline struct llc_sock *llc_sk(const struct sock *sk)
+{
+ return (struct llc_sock *)sk;
+}
+
+static __inline__ void llc_set_backlog_type(struct sk_buff *skb, char type)
+{
+ skb->cb[sizeof(skb->cb) - 1] = type;
+}
+
+static __inline__ char llc_backlog_type(struct sk_buff *skb)
+{
+ return skb->cb[sizeof(skb->cb) - 1];
+}
+
+struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
+ struct proto *prot, int kern);
+void llc_sk_stop_all_timers(struct sock *sk, bool sync);
+void llc_sk_free(struct sock *sk);
+
+void llc_sk_reset(struct sock *sk);
+
+/* Access to a connection */
+int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
+void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
+int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr, u16 *how_many_unacked);
+struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr,
+ struct llc_addr *laddr);
+void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
+void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
+
+u8 llc_data_accept_state(u8 state);
+void llc_build_offset_table(void);
+#endif /* LLC_CONN_H */
diff --git a/include/net/llc_if.h b/include/net/llc_if.h
new file mode 100644
index 000000000..c72570a21
--- /dev/null
+++ b/include/net/llc_if.h
@@ -0,0 +1,69 @@
+#ifndef LLC_IF_H
+#define LLC_IF_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+/* Defines LLC interface to network layer */
+/* Available primitives */
+#include <linux/if.h>
+#include <linux/if_arp.h>
+#include <linux/llc.h>
+#include <linux/etherdevice.h>
+#include <net/llc.h>
+
+#define LLC_DATAUNIT_PRIM 1
+#define LLC_CONN_PRIM 2
+#define LLC_DATA_PRIM 3
+#define LLC_DISC_PRIM 4
+#define LLC_RESET_PRIM 5
+#define LLC_FLOWCONTROL_PRIM 6 /* Not supported at this time */
+#define LLC_DISABLE_PRIM 7
+#define LLC_XID_PRIM 8
+#define LLC_TEST_PRIM 9
+#define LLC_SAP_ACTIVATION 10
+#define LLC_SAP_DEACTIVATION 11
+
+#define LLC_NBR_PRIMITIVES 11
+
+#define LLC_IND 1
+#define LLC_CONFIRM 2
+
+/* Primitive type */
+#define LLC_PRIM_TYPE_REQ 1
+#define LLC_PRIM_TYPE_IND 2
+#define LLC_PRIM_TYPE_RESP 3
+#define LLC_PRIM_TYPE_CONFIRM 4
+
+/* Reset reasons, remote entity or local LLC */
+#define LLC_RESET_REASON_REMOTE 1
+#define LLC_RESET_REASON_LOCAL 2
+
+/* Disconnect reasons */
+#define LLC_DISC_REASON_RX_DM_RSP_PDU 0
+#define LLC_DISC_REASON_RX_DISC_CMD_PDU 1
+#define LLC_DISC_REASON_ACK_TMR_EXP 2
+
+/* Confirm reasons */
+#define LLC_STATUS_CONN 0 /* connect confirm & reset confirm */
+#define LLC_STATUS_DISC 1 /* connect confirm & reset confirm */
+#define LLC_STATUS_FAILED 2 /* connect confirm & reset confirm */
+#define LLC_STATUS_IMPOSSIBLE 3 /* connect confirm */
+#define LLC_STATUS_RECEIVED 4 /* data conn */
+#define LLC_STATUS_REMOTE_BUSY 5 /* data conn */
+#define LLC_STATUS_REFUSE 6 /* data conn */
+#define LLC_STATUS_CONFLICT 7 /* disconnect conn */
+#define LLC_STATUS_RESET_DONE 8 /* */
+
+int llc_establish_connection(struct sock *sk, const u8 *lmac, u8 *dmac,
+ u8 dsap);
+int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
+int llc_send_disc(struct sock *sk);
+#endif /* LLC_IF_H */
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
new file mode 100644
index 000000000..581cd37aa
--- /dev/null
+++ b/include/net/llc_pdu.h
@@ -0,0 +1,442 @@
+#ifndef LLC_PDU_H
+#define LLC_PDU_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+
+#include <linux/if_ether.h>
+
+/* Lengths of frame formats */
+#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
+#define LLC_PDU_LEN_S 4
+#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
+/* header and 1 control byte and XID info */
+#define LLC_PDU_LEN_U_XID (LLC_PDU_LEN_U + sizeof(struct llc_xid_info))
+/* Known SAP addresses */
+#define LLC_GLOBAL_SAP 0xFF
+#define LLC_NULL_SAP 0x00 /* not network-layer visible */
+#define LLC_MGMT_INDIV 0x02 /* station LLC mgmt indiv addr */
+#define LLC_MGMT_GRP 0x03 /* station LLC mgmt group addr */
+#define LLC_RDE_SAP 0xA6 /* route ... */
+
+/* SAP field bit masks */
+#define LLC_ISO_RESERVED_SAP 0x02
+#define LLC_SAP_GROUP_DSAP 0x01
+#define LLC_SAP_RESP_SSAP 0x01
+
+/* Group/individual DSAP indicator is DSAP field */
+#define LLC_PDU_GROUP_DSAP_MASK 0x01
+#define LLC_PDU_IS_GROUP_DSAP(pdu) \
+ ((pdu->dsap & LLC_PDU_GROUP_DSAP_MASK) ? 0 : 1)
+#define LLC_PDU_IS_INDIV_DSAP(pdu) \
+ (!(pdu->dsap & LLC_PDU_GROUP_DSAP_MASK) ? 0 : 1)
+
+/* Command/response PDU indicator in SSAP field */
+#define LLC_PDU_CMD_RSP_MASK 0x01
+#define LLC_PDU_CMD 0
+#define LLC_PDU_RSP 1
+#define LLC_PDU_IS_CMD(pdu) ((pdu->ssap & LLC_PDU_RSP) ? 0 : 1)
+#define LLC_PDU_IS_RSP(pdu) ((pdu->ssap & LLC_PDU_RSP) ? 1 : 0)
+
+/* Get PDU type from 2 lowest-order bits of control field first byte */
+#define LLC_PDU_TYPE_I_MASK 0x01 /* 16-bit control field */
+#define LLC_PDU_TYPE_S_MASK 0x03
+#define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */
+#define LLC_PDU_TYPE_MASK 0x03
+
+#define LLC_PDU_TYPE_I 0 /* first bit */
+#define LLC_PDU_TYPE_S 1 /* first two bits */
+#define LLC_PDU_TYPE_U 3 /* first two bits */
+#define LLC_PDU_TYPE_U_XID 4 /* private type for detecting XID commands */
+
+#define LLC_PDU_TYPE_IS_I(pdu) \
+ ((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0)
+
+#define LLC_PDU_TYPE_IS_U(pdu) \
+ (((pdu->ctrl_1 & LLC_PDU_TYPE_U_MASK) == LLC_PDU_TYPE_U) ? 1 : 0)
+
+#define LLC_PDU_TYPE_IS_S(pdu) \
+ (((pdu->ctrl_1 & LLC_PDU_TYPE_S_MASK) == LLC_PDU_TYPE_S) ? 1 : 0)
+
+/* U-format PDU control field masks */
+#define LLC_U_PF_BIT_MASK 0x10 /* P/F bit mask */
+#define LLC_U_PF_IS_1(pdu) ((pdu->ctrl_1 & LLC_U_PF_BIT_MASK) ? 1 : 0)
+#define LLC_U_PF_IS_0(pdu) ((!(pdu->ctrl_1 & LLC_U_PF_BIT_MASK)) ? 1 : 0)
+
+#define LLC_U_PDU_CMD_MASK 0xEC /* cmd/rsp mask */
+#define LLC_U_PDU_CMD(pdu) (pdu->ctrl_1 & LLC_U_PDU_CMD_MASK)
+#define LLC_U_PDU_RSP(pdu) (pdu->ctrl_1 & LLC_U_PDU_CMD_MASK)
+
+#define LLC_1_PDU_CMD_UI 0x00 /* Type 1 cmds/rsps */
+#define LLC_1_PDU_CMD_XID 0xAC
+#define LLC_1_PDU_CMD_TEST 0xE0
+
+#define LLC_2_PDU_CMD_SABME 0x6C /* Type 2 cmds/rsps */
+#define LLC_2_PDU_CMD_DISC 0x40
+#define LLC_2_PDU_RSP_UA 0x60
+#define LLC_2_PDU_RSP_DM 0x0C
+#define LLC_2_PDU_RSP_FRMR 0x84
+
+/* Type 1 operations */
+
+/* XID information field bit masks */
+
+/* LLC format identifier (byte 1) */
+#define LLC_XID_FMT_ID 0x81 /* first byte must be this */
+
+/* LLC types/classes identifier (byte 2) */
+#define LLC_XID_CLASS_ZEROS_MASK 0xE0 /* these must be zeros */
+#define LLC_XID_CLASS_MASK 0x1F /* AND with byte to get below */
+
+#define LLC_XID_NULL_CLASS_1 0x01 /* if NULL LSAP...use these */
+#define LLC_XID_NULL_CLASS_2 0x03
+#define LLC_XID_NULL_CLASS_3 0x05
+#define LLC_XID_NULL_CLASS_4 0x07
+
+#define LLC_XID_NNULL_TYPE_1 0x01 /* if non-NULL LSAP...use these */
+#define LLC_XID_NNULL_TYPE_2 0x02
+#define LLC_XID_NNULL_TYPE_3 0x04
+#define LLC_XID_NNULL_TYPE_1_2 0x03
+#define LLC_XID_NNULL_TYPE_1_3 0x05
+#define LLC_XID_NNULL_TYPE_2_3 0x06
+#define LLC_XID_NNULL_ALL 0x07
+
+/* Sender Receive Window (byte 3) */
+#define LLC_XID_RW_MASK 0xFE /* AND with value to get below */
+
+#define LLC_XID_MIN_RW 0x02 /* lowest-order bit always zero */
+
+/* Type 2 operations */
+
+#define LLC_2_SEQ_NBR_MODULO ((u8) 128)
+
+/* I-PDU masks ('ctrl' is I-PDU control word) */
+#define LLC_I_GET_NS(pdu) (u8)((pdu->ctrl_1 & 0xFE) >> 1)
+#define LLC_I_GET_NR(pdu) (u8)((pdu->ctrl_2 & 0xFE) >> 1)
+
+#define LLC_I_PF_BIT_MASK 0x01
+
+#define LLC_I_PF_IS_0(pdu) ((!(pdu->ctrl_2 & LLC_I_PF_BIT_MASK)) ? 1 : 0)
+#define LLC_I_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_I_PF_BIT_MASK) ? 1 : 0)
+
+/* S-PDU supervisory commands and responses */
+
+#define LLC_S_PDU_CMD_MASK 0x0C
+#define LLC_S_PDU_CMD(pdu) (pdu->ctrl_1 & LLC_S_PDU_CMD_MASK)
+#define LLC_S_PDU_RSP(pdu) (pdu->ctrl_1 & LLC_S_PDU_CMD_MASK)
+
+#define LLC_2_PDU_CMD_RR 0x00 /* rx ready cmd */
+#define LLC_2_PDU_RSP_RR 0x00 /* rx ready rsp */
+#define LLC_2_PDU_CMD_REJ 0x08 /* reject PDU cmd */
+#define LLC_2_PDU_RSP_REJ 0x08 /* reject PDU rsp */
+#define LLC_2_PDU_CMD_RNR 0x04 /* rx not ready cmd */
+#define LLC_2_PDU_RSP_RNR 0x04 /* rx not ready rsp */
+
+#define LLC_S_PF_BIT_MASK 0x01
+#define LLC_S_PF_IS_0(pdu) ((!(pdu->ctrl_2 & LLC_S_PF_BIT_MASK)) ? 1 : 0)
+#define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0)
+
+#define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1)
+#define PDU_GET_NEXT_Vr(sn) (((sn) + 1) & ~LLC_2_SEQ_NBR_MODULO)
+
+/* FRMR information field macros */
+
+#define FRMR_INFO_LENGTH 5 /* 5 bytes of information */
+
+/*
+ * info is pointer to FRMR info field structure; 'rej_ctrl' is byte pointer
+ * (if U-PDU) or word pointer to rejected PDU control field
+ */
+#define FRMR_INFO_SET_REJ_CNTRL(info,rej_ctrl) \
+ info->rej_pdu_ctrl = ((*((u8 *) rej_ctrl) & \
+ LLC_PDU_TYPE_U) != LLC_PDU_TYPE_U ? \
+ (u16)*((u16 *) rej_ctrl) : \
+ (((u16) *((u8 *) rej_ctrl)) & 0x00FF))
+
+/*
+ * Info is pointer to FRMR info field structure; 'vs' is a byte containing
+ * send state variable value in low-order 7 bits (insure the lowest-order
+ * bit remains zero (0))
+ */
+#define FRMR_INFO_SET_Vs(info,vs) (info->curr_ssv = (((u8) vs) << 1))
+#define FRMR_INFO_SET_Vr(info,vr) (info->curr_rsv = (((u8) vr) << 1))
+
+/*
+ * Info is pointer to FRMR info field structure; 'cr' is a byte containing
+ * the C/R bit value in the low-order bit
+ */
+#define FRMR_INFO_SET_C_R_BIT(info, cr) (info->curr_rsv |= (((u8) cr) & 0x01))
+
+/*
+ * In the remaining five macros, 'info' is pointer to FRMR info field
+ * structure; 'ind' is a byte containing the bit value to set in the
+ * lowest-order bit)
+ */
+#define FRMR_INFO_SET_INVALID_PDU_CTRL_IND(info, ind) \
+ (info->ind_bits = ((info->ind_bits & 0xFE) | (((u8) ind) & 0x01)))
+
+#define FRMR_INFO_SET_INVALID_PDU_INFO_IND(info, ind) \
+ (info->ind_bits = ( (info->ind_bits & 0xFD) | (((u8) ind) & 0x02)))
+
+#define FRMR_INFO_SET_PDU_INFO_2LONG_IND(info, ind) \
+ (info->ind_bits = ( (info->ind_bits & 0xFB) | (((u8) ind) & 0x04)))
+
+#define FRMR_INFO_SET_PDU_INVALID_Nr_IND(info, ind) \
+ (info->ind_bits = ( (info->ind_bits & 0xF7) | (((u8) ind) & 0x08)))
+
+#define FRMR_INFO_SET_PDU_INVALID_Ns_IND(info, ind) \
+ (info->ind_bits = ( (info->ind_bits & 0xEF) | (((u8) ind) & 0x10)))
+
+/* Sequence-numbered PDU format (4 bytes in length) */
+struct llc_pdu_sn {
+ u8 dsap;
+ u8 ssap;
+ u8 ctrl_1;
+ u8 ctrl_2;
+} __packed;
+
+static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb)
+{
+ return (struct llc_pdu_sn *)skb_network_header(skb);
+}
+
+/* Un-numbered PDU format (3 bytes in length) */
+struct llc_pdu_un {
+ u8 dsap;
+ u8 ssap;
+ u8 ctrl_1;
+} __packed;
+
+static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
+{
+ return (struct llc_pdu_un *)skb_network_header(skb);
+}
+
+/**
+ * llc_pdu_header_init - initializes pdu header
+ * @skb: input skb that header must be set into it.
+ * @type: type of PDU (U, I or S).
+ * @ssap: source sap.
+ * @dsap: destination sap.
+ * @cr: command/response bit (0 or 1).
+ *
+ * This function sets DSAP, SSAP and command/Response bit in LLC header.
+ */
+static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
+ u8 ssap, u8 dsap, u8 cr)
+{
+ int hlen = 4; /* default value for I and S types */
+ struct llc_pdu_un *pdu;
+
+ switch (type) {
+ case LLC_PDU_TYPE_U:
+ hlen = 3;
+ break;
+ case LLC_PDU_TYPE_U_XID:
+ hlen = 6;
+ break;
+ }
+
+ skb_push(skb, hlen);
+ skb_reset_network_header(skb);
+ pdu = llc_pdu_un_hdr(skb);
+ pdu->dsap = dsap;
+ pdu->ssap = ssap;
+ pdu->ssap |= cr;
+}
+
+/**
+ * llc_pdu_decode_sa - extracs source address (MAC) of input frame
+ * @skb: input skb that source address must be extracted from it.
+ * @sa: pointer to source address (6 byte array).
+ *
+ * This function extracts source address(MAC) of input frame.
+ */
+static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
+{
+ memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
+}
+
+/**
+ * llc_pdu_decode_da - extracts dest address of input frame
+ * @skb: input skb that destination address must be extracted from it
+ * @sa: pointer to destination address (6 byte array).
+ *
+ * This function extracts destination address(MAC) of input frame.
+ */
+static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
+{
+ memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
+}
+
+/**
+ * llc_pdu_decode_ssap - extracts source SAP of input frame
+ * @skb: input skb that source SAP must be extracted from it.
+ * @ssap: source SAP (output argument).
+ *
+ * This function extracts source SAP of input frame. Right bit of SSAP is
+ * command/response bit.
+ */
+static inline void llc_pdu_decode_ssap(struct sk_buff *skb, u8 *ssap)
+{
+ *ssap = llc_pdu_un_hdr(skb)->ssap & 0xFE;
+}
+
+/**
+ * llc_pdu_decode_dsap - extracts dest SAP of input frame
+ * @skb: input skb that destination SAP must be extracted from it.
+ * @dsap: destination SAP (output argument).
+ *
+ * This function extracts destination SAP of input frame. right bit of
+ * DSAP designates individual/group SAP.
+ */
+static inline void llc_pdu_decode_dsap(struct sk_buff *skb, u8 *dsap)
+{
+ *dsap = llc_pdu_un_hdr(skb)->dsap & 0xFE;
+}
+
+/**
+ * llc_pdu_init_as_ui_cmd - sets LLC header as UI PDU
+ * @skb: input skb that header must be set into it.
+ *
+ * This function sets third byte of LLC header as a UI PDU.
+ */
+static inline void llc_pdu_init_as_ui_cmd(struct sk_buff *skb)
+{
+ struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
+
+ pdu->ctrl_1 = LLC_PDU_TYPE_U;
+ pdu->ctrl_1 |= LLC_1_PDU_CMD_UI;
+}
+
+/**
+ * llc_pdu_init_as_test_cmd - sets PDU as TEST
+ * @skb - Address of the skb to build
+ *
+ * Sets a PDU as TEST
+ */
+static inline void llc_pdu_init_as_test_cmd(struct sk_buff *skb)
+{
+ struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
+
+ pdu->ctrl_1 = LLC_PDU_TYPE_U;
+ pdu->ctrl_1 |= LLC_1_PDU_CMD_TEST;
+ pdu->ctrl_1 |= LLC_U_PF_BIT_MASK;
+}
+
+/**
+ * llc_pdu_init_as_test_rsp - build TEST response PDU
+ * @skb: Address of the skb to build
+ * @ev_skb: The received TEST command PDU frame
+ *
+ * Builds a pdu frame as a TEST response.
+ */
+static inline void llc_pdu_init_as_test_rsp(struct sk_buff *skb,
+ struct sk_buff *ev_skb)
+{
+ struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
+
+ pdu->ctrl_1 = LLC_PDU_TYPE_U;
+ pdu->ctrl_1 |= LLC_1_PDU_CMD_TEST;
+ pdu->ctrl_1 |= LLC_U_PF_BIT_MASK;
+ if (ev_skb->protocol == htons(ETH_P_802_2)) {
+ struct llc_pdu_un *ev_pdu = llc_pdu_un_hdr(ev_skb);
+ int dsize;
+
+ dsize = ntohs(eth_hdr(ev_skb)->h_proto) - 3;
+ memcpy(((u8 *)pdu) + 3, ((u8 *)ev_pdu) + 3, dsize);
+ skb_put(skb, dsize);
+ }
+}
+
+/* LLC Type 1 XID command/response information fields format */
+struct llc_xid_info {
+ u8 fmt_id; /* always 0x81 for LLC */
+ u8 type; /* different if NULL/non-NULL LSAP */
+ u8 rw; /* sender receive window */
+} __packed;
+
+/**
+ * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID
+ * @skb: input skb that header must be set into it.
+ *
+ * This function sets third,fourth,fifth and sixth bytes of LLC header as
+ * a XID PDU.
+ */
+static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb,
+ u8 svcs_supported, u8 rx_window)
+{
+ struct llc_xid_info *xid_info;
+ struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
+
+ pdu->ctrl_1 = LLC_PDU_TYPE_U;
+ pdu->ctrl_1 |= LLC_1_PDU_CMD_XID;
+ pdu->ctrl_1 |= LLC_U_PF_BIT_MASK;
+ xid_info = (struct llc_xid_info *)(((u8 *)&pdu->ctrl_1) + 1);
+ xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */
+ xid_info->type = svcs_supported;
+ xid_info->rw = rx_window << 1; /* size of receive window */
+
+ /* no need to push/put since llc_pdu_header_init() has already
+ * pushed 3 + 3 bytes
+ */
+}
+
+/**
+ * llc_pdu_init_as_xid_rsp - builds XID response PDU
+ * @skb: Address of the skb to build
+ * @svcs_supported: The class of the LLC (I or II)
+ * @rx_window: The size of the receive window of the LLC
+ *
+ * Builds a pdu frame as an XID response.
+ */
+static inline void llc_pdu_init_as_xid_rsp(struct sk_buff *skb,
+ u8 svcs_supported, u8 rx_window)
+{
+ struct llc_xid_info *xid_info;
+ struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
+
+ pdu->ctrl_1 = LLC_PDU_TYPE_U;
+ pdu->ctrl_1 |= LLC_1_PDU_CMD_XID;
+ pdu->ctrl_1 |= LLC_U_PF_BIT_MASK;
+
+ xid_info = (struct llc_xid_info *)(((u8 *)&pdu->ctrl_1) + 1);
+ xid_info->fmt_id = LLC_XID_FMT_ID;
+ xid_info->type = svcs_supported;
+ xid_info->rw = rx_window << 1;
+ skb_put(skb, sizeof(struct llc_xid_info));
+}
+
+/* LLC Type 2 FRMR response information field format */
+struct llc_frmr_info {
+ u16 rej_pdu_ctrl; /* bits 1-8 if U-PDU */
+ u8 curr_ssv; /* current send state variable val */
+ u8 curr_rsv; /* current receive state variable */
+ u8 ind_bits; /* indicator bits set with macro */
+} __packed;
+
+void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
+void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
+void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit);
+void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit);
+void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr);
+void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit);
+void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit);
+void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, struct llc_pdu_sn *prev_pdu,
+ u8 f_bit, u8 vs, u8 vr, u8 vzyxw);
+void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit);
+#endif /* LLC_PDU_H */
diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
new file mode 100644
index 000000000..f71790305
--- /dev/null
+++ b/include/net/llc_s_ac.h
@@ -0,0 +1,41 @@
+#ifndef LLC_S_AC_H
+#define LLC_S_AC_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+
+struct llc_sap;
+struct sk_buff;
+
+/* SAP component actions */
+#define SAP_ACT_UNITDATA_IND 1
+#define SAP_ACT_SEND_UI 2
+#define SAP_ACT_SEND_XID_C 3
+#define SAP_ACT_SEND_XID_R 4
+#define SAP_ACT_SEND_TEST_C 5
+#define SAP_ACT_SEND_TEST_R 6
+#define SAP_ACT_REPORT_STATUS 7
+#define SAP_ACT_XID_IND 8
+#define SAP_ACT_TEST_IND 9
+
+/* All action functions must look like this */
+typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
+
+int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_report_status(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb);
+#endif /* LLC_S_AC_H */
diff --git a/include/net/llc_s_ev.h b/include/net/llc_s_ev.h
new file mode 100644
index 000000000..fb7df1d70
--- /dev/null
+++ b/include/net/llc_s_ev.h
@@ -0,0 +1,67 @@
+#ifndef LLC_S_EV_H
+#define LLC_S_EV_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+
+#include <linux/skbuff.h>
+#include <net/llc.h>
+
+/* Defines SAP component events */
+/* Types of events (possible values in 'ev->type') */
+#define LLC_SAP_EV_TYPE_SIMPLE 1
+#define LLC_SAP_EV_TYPE_CONDITION 2
+#define LLC_SAP_EV_TYPE_PRIM 3
+#define LLC_SAP_EV_TYPE_PDU 4 /* command/response PDU */
+#define LLC_SAP_EV_TYPE_ACK_TMR 5
+#define LLC_SAP_EV_TYPE_RPT_STATUS 6
+
+#define LLC_SAP_EV_ACTIVATION_REQ 1
+#define LLC_SAP_EV_RX_UI 2
+#define LLC_SAP_EV_UNITDATA_REQ 3
+#define LLC_SAP_EV_XID_REQ 4
+#define LLC_SAP_EV_RX_XID_C 5
+#define LLC_SAP_EV_RX_XID_R 6
+#define LLC_SAP_EV_TEST_REQ 7
+#define LLC_SAP_EV_RX_TEST_C 8
+#define LLC_SAP_EV_RX_TEST_R 9
+#define LLC_SAP_EV_DEACTIVATION_REQ 10
+
+struct llc_sap_state_ev {
+ u8 prim;
+ u8 prim_type;
+ u8 type;
+ u8 reason;
+ u8 ind_cfm_flag;
+ struct llc_addr saddr;
+ struct llc_addr daddr;
+};
+
+static __inline__ struct llc_sap_state_ev *llc_sap_ev(struct sk_buff *skb)
+{
+ return (struct llc_sap_state_ev *)skb->cb;
+}
+
+struct llc_sap;
+
+typedef int (*llc_sap_ev_t)(struct llc_sap *sap, struct sk_buff *skb);
+
+int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_deactivation_req(struct llc_sap *sap, struct sk_buff *skb);
+#endif /* LLC_S_EV_H */
diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
new file mode 100644
index 000000000..ed5b2fa40
--- /dev/null
+++ b/include/net/llc_s_st.h
@@ -0,0 +1,38 @@
+#ifndef LLC_S_ST_H
+#define LLC_S_ST_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <net/llc_s_ac.h>
+#include <net/llc_s_ev.h>
+
+struct llc_sap_state_trans;
+
+#define LLC_NR_SAP_STATES 2 /* size of state table */
+
+/* structures and types */
+/* SAP state table structure */
+struct llc_sap_state_trans {
+ llc_sap_ev_t ev;
+ u8 next_state;
+ const llc_sap_action_t *ev_actions;
+};
+
+struct llc_sap_state {
+ u8 curr_state;
+ struct llc_sap_state_trans **transitions;
+};
+
+/* only access to SAP state table */
+extern struct llc_sap_state llc_sap_state_table[LLC_NR_SAP_STATES];
+#endif /* LLC_S_ST_H */
diff --git a/include/net/llc_sap.h b/include/net/llc_sap.h
new file mode 100644
index 000000000..1e4df9fd9
--- /dev/null
+++ b/include/net/llc_sap.h
@@ -0,0 +1,32 @@
+#ifndef LLC_SAP_H
+#define LLC_SAP_H
+
+#include <asm/types.h>
+
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+struct llc_sap;
+struct net_device;
+struct sk_buff;
+struct sock;
+
+void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb);
+void llc_save_primitive(struct sock *sk, struct sk_buff *skb,
+ unsigned char prim);
+struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev,
+ u8 type, u32 data_size);
+
+void llc_build_and_send_test_pkt(struct llc_sap *sap, struct sk_buff *skb,
+ unsigned char *dmac, unsigned char dsap);
+void llc_build_and_send_xid_pkt(struct llc_sap *sap, struct sk_buff *skb,
+ unsigned char *dmac, unsigned char dsap);
+#endif /* LLC_SAP_H */
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
new file mode 100644
index 000000000..53bd2d02a
--- /dev/null
+++ b/include/net/lwtunnel.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_LWTUNNEL_H
+#define __NET_LWTUNNEL_H 1
+
+#include <linux/lwtunnel.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/route.h>
+
+#define LWTUNNEL_HASH_BITS 7
+#define LWTUNNEL_HASH_SIZE (1 << LWTUNNEL_HASH_BITS)
+
+/* lw tunnel state flags */
+#define LWTUNNEL_STATE_OUTPUT_REDIRECT BIT(0)
+#define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1)
+#define LWTUNNEL_STATE_XMIT_REDIRECT BIT(2)
+
+/* LWTUNNEL_XMIT_CONTINUE should be distinguishable from dst_output return
+ * values (NET_XMIT_xxx and NETDEV_TX_xxx in linux/netdevice.h) for safety.
+ */
+enum {
+ LWTUNNEL_XMIT_DONE,
+ LWTUNNEL_XMIT_CONTINUE = 0x100,
+};
+
+
+struct lwtunnel_state {
+ __u16 type;
+ __u16 flags;
+ __u16 headroom;
+ atomic_t refcnt;
+ int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb);
+ int (*orig_input)(struct sk_buff *);
+ struct rcu_head rcu;
+ __u8 data[];
+};
+
+struct lwtunnel_encap_ops {
+ int (*build_state)(struct net *net, struct nlattr *encap,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **ts,
+ struct netlink_ext_ack *extack);
+ void (*destroy_state)(struct lwtunnel_state *lws);
+ int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
+ int (*input)(struct sk_buff *skb);
+ int (*fill_encap)(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate);
+ int (*get_encap_size)(struct lwtunnel_state *lwtstate);
+ int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
+ int (*xmit)(struct sk_buff *skb);
+
+ struct module *owner;
+};
+
+#ifdef CONFIG_LWTUNNEL
+
+DECLARE_STATIC_KEY_FALSE(nf_hooks_lwtunnel_enabled);
+
+void lwtstate_free(struct lwtunnel_state *lws);
+
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+ if (lws)
+ atomic_inc(&lws->refcnt);
+
+ return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+ if (!lws)
+ return;
+
+ if (atomic_dec_and_test(&lws->refcnt))
+ lwtstate_free(lws);
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+ if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_OUTPUT_REDIRECT))
+ return true;
+
+ return false;
+}
+
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+ if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_INPUT_REDIRECT))
+ return true;
+
+ return false;
+}
+
+static inline bool lwtunnel_xmit_redirect(struct lwtunnel_state *lwtstate)
+{
+ if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_XMIT_REDIRECT))
+ return true;
+
+ return false;
+}
+
+static inline unsigned int lwtunnel_headroom(struct lwtunnel_state *lwtstate,
+ unsigned int mtu)
+{
+ if ((lwtunnel_xmit_redirect(lwtstate) ||
+ lwtunnel_output_redirect(lwtstate)) && lwtstate->headroom < mtu)
+ return lwtstate->headroom;
+
+ return 0;
+}
+
+int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num);
+int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num);
+int lwtunnel_valid_encap_type(u16 encap_type,
+ struct netlink_ext_ack *extack);
+int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len,
+ struct netlink_ext_ack *extack);
+int lwtunnel_build_state(struct net *net, u16 encap_type,
+ struct nlattr *encap,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **lws,
+ struct netlink_ext_ack *extack);
+int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate,
+ int encap_attr, int encap_type_attr);
+int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
+struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
+int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
+int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb);
+int lwtunnel_input(struct sk_buff *skb);
+int lwtunnel_xmit(struct sk_buff *skb);
+int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
+ bool ingress);
+
+static inline void lwtunnel_set_redirect(struct dst_entry *dst)
+{
+ if (lwtunnel_output_redirect(dst->lwtstate)) {
+ dst->lwtstate->orig_output = dst->output;
+ dst->output = lwtunnel_output;
+ }
+ if (lwtunnel_input_redirect(dst->lwtstate)) {
+ dst->lwtstate->orig_input = dst->input;
+ dst->input = lwtunnel_input;
+ }
+}
+#else
+
+static inline void lwtstate_free(struct lwtunnel_state *lws)
+{
+}
+
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+ return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+ return false;
+}
+
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+ return false;
+}
+
+static inline bool lwtunnel_xmit_redirect(struct lwtunnel_state *lwtstate)
+{
+ return false;
+}
+
+static inline void lwtunnel_set_redirect(struct dst_entry *dst)
+{
+}
+
+static inline unsigned int lwtunnel_headroom(struct lwtunnel_state *lwtstate,
+ unsigned int mtu)
+{
+ return 0;
+}
+
+static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num)
+{
+ return -EOPNOTSUPP;
+
+}
+
+static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_valid_encap_type(u16 encap_type,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG(extack, "CONFIG_LWTUNNEL is not enabled in this kernel");
+ return -EOPNOTSUPP;
+}
+static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len,
+ struct netlink_ext_ack *extack)
+{
+ /* return 0 since we are not walking attr looking for
+ * RTA_ENCAP_TYPE attribute on nexthops.
+ */
+ return 0;
+}
+
+static inline int lwtunnel_build_state(struct net *net, u16 encap_type,
+ struct nlattr *encap,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **lws,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_fill_encap(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate,
+ int encap_attr, int encap_type_attr)
+{
+ return 0;
+}
+
+static inline int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
+{
+ return 0;
+}
+
+static inline struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len)
+{
+ return NULL;
+}
+
+static inline int lwtunnel_cmp_encap(struct lwtunnel_state *a,
+ struct lwtunnel_state *b)
+{
+ return 0;
+}
+
+static inline int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_input(struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_xmit(struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_LWTUNNEL */
+
+#define MODULE_ALIAS_RTNL_LWT(encap_type) MODULE_ALIAS("rtnl-lwt-" __stringify(encap_type))
+
+#endif /* __NET_LWTUNNEL_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
new file mode 100644
index 000000000..43173204d
--- /dev/null
+++ b/include/net/mac80211.h
@@ -0,0 +1,7227 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * mac80211 <-> driver interface
+ *
+ * Copyright 2002-2005, Devicescape Software, Inc.
+ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
+ * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 - 2022 Intel Corporation
+ */
+
+#ifndef MAC80211_H
+#define MAC80211_H
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/ieee80211.h>
+#include <linux/lockdep.h>
+#include <net/cfg80211.h>
+#include <net/codel.h>
+#include <net/ieee80211_radiotap.h>
+#include <asm/unaligned.h>
+
+/**
+ * DOC: Introduction
+ *
+ * mac80211 is the Linux stack for 802.11 hardware that implements
+ * only partial functionality in hard- or firmware. This document
+ * defines the interface between mac80211 and low-level hardware
+ * drivers.
+ */
+
+/**
+ * DOC: Calling mac80211 from interrupts
+ *
+ * Only ieee80211_tx_status_irqsafe() and ieee80211_rx_irqsafe() can be
+ * called in hardware interrupt context. The low-level driver must not call any
+ * other functions in hardware interrupt context. If there is a need for such
+ * call, the low-level driver should first ACK the interrupt and perform the
+ * IEEE 802.11 code call after this, e.g. from a scheduled workqueue or even
+ * tasklet function.
+ *
+ * NOTE: If the driver opts to use the _irqsafe() functions, it may not also
+ * use the non-IRQ-safe functions!
+ */
+
+/**
+ * DOC: Warning
+ *
+ * If you're reading this document and not the header file itself, it will
+ * be incomplete because not all documentation has been converted yet.
+ */
+
+/**
+ * DOC: Frame format
+ *
+ * As a general rule, when frames are passed between mac80211 and the driver,
+ * they start with the IEEE 802.11 header and include the same octets that are
+ * sent over the air except for the FCS which should be calculated by the
+ * hardware.
+ *
+ * There are, however, various exceptions to this rule for advanced features:
+ *
+ * The first exception is for hardware encryption and decryption offload
+ * where the IV/ICV may or may not be generated in hardware.
+ *
+ * Secondly, when the hardware handles fragmentation, the frame handed to
+ * the driver from mac80211 is the MSDU, not the MPDU.
+ */
+
+/**
+ * DOC: mac80211 workqueue
+ *
+ * mac80211 provides its own workqueue for drivers and internal mac80211 use.
+ * The workqueue is a single threaded workqueue and can only be accessed by
+ * helpers for sanity checking. Drivers must ensure all work added onto the
+ * mac80211 workqueue should be cancelled on the driver stop() callback.
+ *
+ * mac80211 will flushed the workqueue upon interface removal and during
+ * suspend.
+ *
+ * All work performed on the mac80211 workqueue must not acquire the RTNL lock.
+ *
+ */
+
+/**
+ * DOC: mac80211 software tx queueing
+ *
+ * mac80211 provides an optional intermediate queueing implementation designed
+ * to allow the driver to keep hardware queues short and provide some fairness
+ * between different stations/interfaces.
+ * In this model, the driver pulls data frames from the mac80211 queue instead
+ * of letting mac80211 push them via drv_tx().
+ * Other frames (e.g. control or management) are still pushed using drv_tx().
+ *
+ * Drivers indicate that they use this model by implementing the .wake_tx_queue
+ * driver operation.
+ *
+ * Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with
+ * another per-sta for non-data/non-mgmt and bufferable management frames, and
+ * a single per-vif queue for multicast data frames.
+ *
+ * The driver is expected to initialize its private per-queue data for stations
+ * and interfaces in the .add_interface and .sta_add ops.
+ *
+ * The driver can't access the queue directly. To dequeue a frame from a
+ * txq, it calls ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a
+ * queue, it calls the .wake_tx_queue driver op.
+ *
+ * Drivers can optionally delegate responsibility for scheduling queues to
+ * mac80211, to take advantage of airtime fairness accounting. In this case, to
+ * obtain the next queue to pull frames from, the driver calls
+ * ieee80211_next_txq(). The driver is then expected to return the txq using
+ * ieee80211_return_txq().
+ *
+ * For AP powersave TIM handling, the driver only needs to indicate if it has
+ * buffered packets in the driver specific data structures by calling
+ * ieee80211_sta_set_buffered(). For frames buffered in the ieee80211_txq
+ * struct, mac80211 sets the appropriate TIM PVB bits and calls
+ * .release_buffered_frames().
+ * In that callback the driver is therefore expected to release its own
+ * buffered frames and afterwards also frames from the ieee80211_txq (obtained
+ * via the usual ieee80211_tx_dequeue).
+ */
+
+/**
+ * DOC: HW timestamping
+ *
+ * Timing Measurement and Fine Timing Measurement require accurate timestamps
+ * of the action frames TX/RX and their respective acks.
+ *
+ * To report hardware timestamps for Timing Measurement or Fine Timing
+ * Measurement frame RX, the low level driver should set the SKB's hwtstamp
+ * field to the frame RX timestamp and report the ack TX timestamp in the
+ * ieee80211_rx_status struct.
+ *
+ * Similarly, To report hardware timestamps for Timing Measurement or Fine
+ * Timing Measurement frame TX, the driver should set the SKB's hwtstamp field
+ * to the frame TX timestamp and report the ack RX timestamp in the
+ * ieee80211_tx_status struct.
+ */
+struct device;
+
+/**
+ * enum ieee80211_max_queues - maximum number of queues
+ *
+ * @IEEE80211_MAX_QUEUES: Maximum number of regular device queues.
+ * @IEEE80211_MAX_QUEUE_MAP: bitmap with maximum queues set
+ */
+enum ieee80211_max_queues {
+ IEEE80211_MAX_QUEUES = 16,
+ IEEE80211_MAX_QUEUE_MAP = BIT(IEEE80211_MAX_QUEUES) - 1,
+};
+
+#define IEEE80211_INVAL_HW_QUEUE 0xff
+
+/**
+ * enum ieee80211_ac_numbers - AC numbers as used in mac80211
+ * @IEEE80211_AC_VO: voice
+ * @IEEE80211_AC_VI: video
+ * @IEEE80211_AC_BE: best effort
+ * @IEEE80211_AC_BK: background
+ */
+enum ieee80211_ac_numbers {
+ IEEE80211_AC_VO = 0,
+ IEEE80211_AC_VI = 1,
+ IEEE80211_AC_BE = 2,
+ IEEE80211_AC_BK = 3,
+};
+
+/**
+ * struct ieee80211_tx_queue_params - transmit queue configuration
+ *
+ * The information provided in this structure is required for QoS
+ * transmit queue configuration. Cf. IEEE 802.11 7.3.2.29.
+ *
+ * @aifs: arbitration interframe space [0..255]
+ * @cw_min: minimum contention window [a value of the form
+ * 2^n-1 in the range 1..32767]
+ * @cw_max: maximum contention window [like @cw_min]
+ * @txop: maximum burst time in units of 32 usecs, 0 meaning disabled
+ * @acm: is mandatory admission control required for the access category
+ * @uapsd: is U-APSD mode enabled for the queue
+ * @mu_edca: is the MU EDCA configured
+ * @mu_edca_param_rec: MU EDCA Parameter Record for HE
+ */
+struct ieee80211_tx_queue_params {
+ u16 txop;
+ u16 cw_min;
+ u16 cw_max;
+ u8 aifs;
+ bool acm;
+ bool uapsd;
+ bool mu_edca;
+ struct ieee80211_he_mu_edca_param_ac_rec mu_edca_param_rec;
+};
+
+struct ieee80211_low_level_stats {
+ unsigned int dot11ACKFailureCount;
+ unsigned int dot11RTSFailureCount;
+ unsigned int dot11FCSErrorCount;
+ unsigned int dot11RTSSuccessCount;
+};
+
+/**
+ * enum ieee80211_chanctx_change - change flag for channel context
+ * @IEEE80211_CHANCTX_CHANGE_WIDTH: The channel width changed
+ * @IEEE80211_CHANCTX_CHANGE_RX_CHAINS: The number of RX chains changed
+ * @IEEE80211_CHANCTX_CHANGE_RADAR: radar detection flag changed
+ * @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel,
+ * this is used only with channel switching with CSA
+ * @IEEE80211_CHANCTX_CHANGE_MIN_WIDTH: The min required channel width changed
+ */
+enum ieee80211_chanctx_change {
+ IEEE80211_CHANCTX_CHANGE_WIDTH = BIT(0),
+ IEEE80211_CHANCTX_CHANGE_RX_CHAINS = BIT(1),
+ IEEE80211_CHANCTX_CHANGE_RADAR = BIT(2),
+ IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(3),
+ IEEE80211_CHANCTX_CHANGE_MIN_WIDTH = BIT(4),
+};
+
+/**
+ * struct ieee80211_chanctx_conf - channel context that vifs may be tuned to
+ *
+ * This is the driver-visible part. The ieee80211_chanctx
+ * that contains it is visible in mac80211 only.
+ *
+ * @def: the channel definition
+ * @min_def: the minimum channel definition currently required.
+ * @rx_chains_static: The number of RX chains that must always be
+ * active on the channel to receive MIMO transmissions
+ * @rx_chains_dynamic: The number of RX chains that must be enabled
+ * after RTS/CTS handshake to receive SMPS MIMO transmissions;
+ * this will always be >= @rx_chains_static.
+ * @radar_enabled: whether radar detection is enabled on this channel.
+ * @drv_priv: data area for driver use, will always be aligned to
+ * sizeof(void *), size is determined in hw information.
+ */
+struct ieee80211_chanctx_conf {
+ struct cfg80211_chan_def def;
+ struct cfg80211_chan_def min_def;
+
+ u8 rx_chains_static, rx_chains_dynamic;
+
+ bool radar_enabled;
+
+ u8 drv_priv[] __aligned(sizeof(void *));
+};
+
+/**
+ * enum ieee80211_chanctx_switch_mode - channel context switch mode
+ * @CHANCTX_SWMODE_REASSIGN_VIF: Both old and new contexts already
+ * exist (and will continue to exist), but the virtual interface
+ * needs to be switched from one to the other.
+ * @CHANCTX_SWMODE_SWAP_CONTEXTS: The old context exists but will stop
+ * to exist with this call, the new context doesn't exist but
+ * will be active after this call, the virtual interface switches
+ * from the old to the new (note that the driver may of course
+ * implement this as an on-the-fly chandef switch of the existing
+ * hardware context, but the mac80211 pointer for the old context
+ * will cease to exist and only the new one will later be used
+ * for changes/removal.)
+ */
+enum ieee80211_chanctx_switch_mode {
+ CHANCTX_SWMODE_REASSIGN_VIF,
+ CHANCTX_SWMODE_SWAP_CONTEXTS,
+};
+
+/**
+ * struct ieee80211_vif_chanctx_switch - vif chanctx switch information
+ *
+ * This is structure is used to pass information about a vif that
+ * needs to switch from one chanctx to another. The
+ * &ieee80211_chanctx_switch_mode defines how the switch should be
+ * done.
+ *
+ * @vif: the vif that should be switched from old_ctx to new_ctx
+ * @link_conf: the link conf that's switching
+ * @old_ctx: the old context to which the vif was assigned
+ * @new_ctx: the new context to which the vif must be assigned
+ */
+struct ieee80211_vif_chanctx_switch {
+ struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_chanctx_conf *old_ctx;
+ struct ieee80211_chanctx_conf *new_ctx;
+};
+
+/**
+ * enum ieee80211_bss_change - BSS change notification flags
+ *
+ * These flags are used with the bss_info_changed(), link_info_changed()
+ * and vif_cfg_changed() callbacks to indicate which parameter(s) changed.
+ *
+ * @BSS_CHANGED_ASSOC: association status changed (associated/disassociated),
+ * also implies a change in the AID.
+ * @BSS_CHANGED_ERP_CTS_PROT: CTS protection changed
+ * @BSS_CHANGED_ERP_PREAMBLE: preamble changed
+ * @BSS_CHANGED_ERP_SLOT: slot timing changed
+ * @BSS_CHANGED_HT: 802.11n parameters changed
+ * @BSS_CHANGED_BASIC_RATES: Basic rateset changed
+ * @BSS_CHANGED_BEACON_INT: Beacon interval changed
+ * @BSS_CHANGED_BSSID: BSSID changed, for whatever
+ * reason (IBSS and managed mode)
+ * @BSS_CHANGED_BEACON: Beacon data changed, retrieve
+ * new beacon (beaconing modes)
+ * @BSS_CHANGED_BEACON_ENABLED: Beaconing should be
+ * enabled/disabled (beaconing modes)
+ * @BSS_CHANGED_CQM: Connection quality monitor config changed
+ * @BSS_CHANGED_IBSS: IBSS join status changed
+ * @BSS_CHANGED_ARP_FILTER: Hardware ARP filter address list or state changed.
+ * @BSS_CHANGED_QOS: QoS for this association was enabled/disabled. Note
+ * that it is only ever disabled for station mode.
+ * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
+ * @BSS_CHANGED_SSID: SSID changed for this BSS (AP and IBSS mode)
+ * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode)
+ * @BSS_CHANGED_PS: PS changed for this BSS (STA mode)
+ * @BSS_CHANGED_TXPOWER: TX power setting changed for this interface
+ * @BSS_CHANGED_P2P_PS: P2P powersave settings (CTWindow, opportunistic PS)
+ * changed
+ * @BSS_CHANGED_BEACON_INFO: Data from the AP's beacon became available:
+ * currently dtim_period only is under consideration.
+ * @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed,
+ * note that this is only called when it changes after the channel
+ * context had been assigned.
+ * @BSS_CHANGED_OCB: OCB join status changed
+ * @BSS_CHANGED_MU_GROUPS: VHT MU-MIMO group id or user position changed
+ * @BSS_CHANGED_KEEP_ALIVE: keep alive options (idle period or protected
+ * keep alive) changed.
+ * @BSS_CHANGED_MCAST_RATE: Multicast Rate setting changed for this interface
+ * @BSS_CHANGED_FTM_RESPONDER: fine timing measurement request responder
+ * functionality changed for this BSS (AP mode).
+ * @BSS_CHANGED_TWT: TWT status changed
+ * @BSS_CHANGED_HE_OBSS_PD: OBSS Packet Detection status changed.
+ * @BSS_CHANGED_HE_BSS_COLOR: BSS Color has changed
+ * @BSS_CHANGED_FILS_DISCOVERY: FILS discovery status changed.
+ * @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response
+ * status changed.
+ *
+ */
+enum ieee80211_bss_change {
+ BSS_CHANGED_ASSOC = 1<<0,
+ BSS_CHANGED_ERP_CTS_PROT = 1<<1,
+ BSS_CHANGED_ERP_PREAMBLE = 1<<2,
+ BSS_CHANGED_ERP_SLOT = 1<<3,
+ BSS_CHANGED_HT = 1<<4,
+ BSS_CHANGED_BASIC_RATES = 1<<5,
+ BSS_CHANGED_BEACON_INT = 1<<6,
+ BSS_CHANGED_BSSID = 1<<7,
+ BSS_CHANGED_BEACON = 1<<8,
+ BSS_CHANGED_BEACON_ENABLED = 1<<9,
+ BSS_CHANGED_CQM = 1<<10,
+ BSS_CHANGED_IBSS = 1<<11,
+ BSS_CHANGED_ARP_FILTER = 1<<12,
+ BSS_CHANGED_QOS = 1<<13,
+ BSS_CHANGED_IDLE = 1<<14,
+ BSS_CHANGED_SSID = 1<<15,
+ BSS_CHANGED_AP_PROBE_RESP = 1<<16,
+ BSS_CHANGED_PS = 1<<17,
+ BSS_CHANGED_TXPOWER = 1<<18,
+ BSS_CHANGED_P2P_PS = 1<<19,
+ BSS_CHANGED_BEACON_INFO = 1<<20,
+ BSS_CHANGED_BANDWIDTH = 1<<21,
+ BSS_CHANGED_OCB = 1<<22,
+ BSS_CHANGED_MU_GROUPS = 1<<23,
+ BSS_CHANGED_KEEP_ALIVE = 1<<24,
+ BSS_CHANGED_MCAST_RATE = 1<<25,
+ BSS_CHANGED_FTM_RESPONDER = 1<<26,
+ BSS_CHANGED_TWT = 1<<27,
+ BSS_CHANGED_HE_OBSS_PD = 1<<28,
+ BSS_CHANGED_HE_BSS_COLOR = 1<<29,
+ BSS_CHANGED_FILS_DISCOVERY = 1<<30,
+ BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
+
+ /* when adding here, make sure to change ieee80211_reconfig */
+};
+
+/*
+ * The maximum number of IPv4 addresses listed for ARP filtering. If the number
+ * of addresses for an interface increase beyond this value, hardware ARP
+ * filtering will be disabled.
+ */
+#define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4
+
+/**
+ * enum ieee80211_event_type - event to be notified to the low level driver
+ * @RSSI_EVENT: AP's rssi crossed the a threshold set by the driver.
+ * @MLME_EVENT: event related to MLME
+ * @BAR_RX_EVENT: a BAR was received
+ * @BA_FRAME_TIMEOUT: Frames were released from the reordering buffer because
+ * they timed out. This won't be called for each frame released, but only
+ * once each time the timeout triggers.
+ */
+enum ieee80211_event_type {
+ RSSI_EVENT,
+ MLME_EVENT,
+ BAR_RX_EVENT,
+ BA_FRAME_TIMEOUT,
+};
+
+/**
+ * enum ieee80211_rssi_event_data - relevant when event type is %RSSI_EVENT
+ * @RSSI_EVENT_HIGH: AP's rssi went below the threshold set by the driver.
+ * @RSSI_EVENT_LOW: AP's rssi went above the threshold set by the driver.
+ */
+enum ieee80211_rssi_event_data {
+ RSSI_EVENT_HIGH,
+ RSSI_EVENT_LOW,
+};
+
+/**
+ * struct ieee80211_rssi_event - data attached to an %RSSI_EVENT
+ * @data: See &enum ieee80211_rssi_event_data
+ */
+struct ieee80211_rssi_event {
+ enum ieee80211_rssi_event_data data;
+};
+
+/**
+ * enum ieee80211_mlme_event_data - relevant when event type is %MLME_EVENT
+ * @AUTH_EVENT: the MLME operation is authentication
+ * @ASSOC_EVENT: the MLME operation is association
+ * @DEAUTH_RX_EVENT: deauth received..
+ * @DEAUTH_TX_EVENT: deauth sent.
+ */
+enum ieee80211_mlme_event_data {
+ AUTH_EVENT,
+ ASSOC_EVENT,
+ DEAUTH_RX_EVENT,
+ DEAUTH_TX_EVENT,
+};
+
+/**
+ * enum ieee80211_mlme_event_status - relevant when event type is %MLME_EVENT
+ * @MLME_SUCCESS: the MLME operation completed successfully.
+ * @MLME_DENIED: the MLME operation was denied by the peer.
+ * @MLME_TIMEOUT: the MLME operation timed out.
+ */
+enum ieee80211_mlme_event_status {
+ MLME_SUCCESS,
+ MLME_DENIED,
+ MLME_TIMEOUT,
+};
+
+/**
+ * struct ieee80211_mlme_event - data attached to an %MLME_EVENT
+ * @data: See &enum ieee80211_mlme_event_data
+ * @status: See &enum ieee80211_mlme_event_status
+ * @reason: the reason code if applicable
+ */
+struct ieee80211_mlme_event {
+ enum ieee80211_mlme_event_data data;
+ enum ieee80211_mlme_event_status status;
+ u16 reason;
+};
+
+/**
+ * struct ieee80211_ba_event - data attached for BlockAck related events
+ * @sta: pointer to the &ieee80211_sta to which this event relates
+ * @tid: the tid
+ * @ssn: the starting sequence number (for %BAR_RX_EVENT)
+ */
+struct ieee80211_ba_event {
+ struct ieee80211_sta *sta;
+ u16 tid;
+ u16 ssn;
+};
+
+/**
+ * struct ieee80211_event - event to be sent to the driver
+ * @type: The event itself. See &enum ieee80211_event_type.
+ * @rssi: relevant if &type is %RSSI_EVENT
+ * @mlme: relevant if &type is %AUTH_EVENT
+ * @ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT
+ * @u:union holding the fields above
+ */
+struct ieee80211_event {
+ enum ieee80211_event_type type;
+ union {
+ struct ieee80211_rssi_event rssi;
+ struct ieee80211_mlme_event mlme;
+ struct ieee80211_ba_event ba;
+ } u;
+};
+
+/**
+ * struct ieee80211_mu_group_data - STA's VHT MU-MIMO group data
+ *
+ * This structure describes the group id data of VHT MU-MIMO
+ *
+ * @membership: 64 bits array - a bit is set if station is member of the group
+ * @position: 2 bits per group id indicating the position in the group
+ */
+struct ieee80211_mu_group_data {
+ u8 membership[WLAN_MEMBERSHIP_LEN];
+ u8 position[WLAN_USER_POSITION_LEN];
+};
+
+/**
+ * struct ieee80211_ftm_responder_params - FTM responder parameters
+ *
+ * @lci: LCI subelement content
+ * @civicloc: CIVIC location subelement content
+ * @lci_len: LCI data length
+ * @civicloc_len: Civic data length
+ */
+struct ieee80211_ftm_responder_params {
+ const u8 *lci;
+ const u8 *civicloc;
+ size_t lci_len;
+ size_t civicloc_len;
+};
+
+/**
+ * struct ieee80211_fils_discovery - FILS discovery parameters from
+ * IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
+ *
+ * @min_interval: Minimum packet interval in TUs (0 - 10000)
+ * @max_interval: Maximum packet interval in TUs (0 - 10000)
+ */
+struct ieee80211_fils_discovery {
+ u32 min_interval;
+ u32 max_interval;
+};
+
+/**
+ * struct ieee80211_bss_conf - holds the BSS's changing parameters
+ *
+ * This structure keeps information about a BSS (and an association
+ * to that BSS) that can change during the lifetime of the BSS.
+ *
+ * @addr: (link) address used locally
+ * @link_id: link ID, or 0 for non-MLO
+ * @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
+ * @uora_exists: is the UORA element advertised by AP
+ * @ack_enabled: indicates support to receive a multi-TID that solicits either
+ * ACK, BACK or both
+ * @uora_ocw_range: UORA element's OCW Range field
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @he_support: does this BSS support HE
+ * @twt_requester: does this BSS support TWT requester (relevant for managed
+ * mode only, set if the AP advertises TWT responder role)
+ * @twt_responder: does this BSS support TWT requester (relevant for managed
+ * mode only, set if the AP advertises TWT responder role)
+ * @twt_protected: does this BSS support protected TWT frames
+ * @twt_broadcast: does this BSS support broadcast TWT
+ * @use_cts_prot: use CTS protection
+ * @use_short_preamble: use 802.11b short preamble
+ * @use_short_slot: use short slot time (only relevant for ERP)
+ * @dtim_period: num of beacons before the next DTIM, for beaconing,
+ * valid in station mode only if after the driver was notified
+ * with the %BSS_CHANGED_BEACON_INFO flag, will be non-zero then.
+ * @sync_tsf: last beacon's/probe response's TSF timestamp (could be old
+ * as it may have been received during scanning long ago). If the
+ * HW flag %IEEE80211_HW_TIMING_BEACON_ONLY is set, then this can
+ * only come from a beacon, but might not become valid until after
+ * association when a beacon is received (which is notified with the
+ * %BSS_CHANGED_DTIM flag.). See also sync_dtim_count important notice.
+ * @sync_device_ts: the device timestamp corresponding to the sync_tsf,
+ * the driver/device can use this to calculate synchronisation
+ * (see @sync_tsf). See also sync_dtim_count important notice.
+ * @sync_dtim_count: Only valid when %IEEE80211_HW_TIMING_BEACON_ONLY
+ * is requested, see @sync_tsf/@sync_device_ts.
+ * IMPORTANT: These three sync_* parameters would possibly be out of sync
+ * by the time the driver will use them. The synchronized view is currently
+ * guaranteed only in certain callbacks.
+ * Note also that this is not used with MLD associations, mac80211 doesn't
+ * know how to track beacons for all of the links for this.
+ * @beacon_int: beacon interval
+ * @assoc_capability: capabilities taken from assoc resp
+ * @basic_rates: bitmap of basic rates, each bit stands for an
+ * index into the rate table configured by the driver in
+ * the current band.
+ * @beacon_rate: associated AP's beacon TX rate
+ * @mcast_rate: per-band multicast rate index + 1 (0: disabled)
+ * @bssid: The BSSID for this BSS
+ * @enable_beacon: whether beaconing should be enabled or not
+ * @chandef: Channel definition for this BSS -- the hardware might be
+ * configured a higher bandwidth than this BSS uses, for example.
+ * @mu_group: VHT MU-MIMO group membership data
+ * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
+ * This field is only valid when the channel is a wide HT/VHT channel.
+ * Note that with TDLS this can be the case (channel is HT, protection must
+ * be used from this field) even when the BSS association isn't using HT.
+ * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
+ * implies disabled. As with the cfg80211 callback, a change here should
+ * cause an event to be sent indicating where the current value is in
+ * relation to the newly configured threshold.
+ * @cqm_rssi_low: Connection quality monitor RSSI lower threshold, a zero value
+ * implies disabled. This is an alternative mechanism to the single
+ * threshold event and can't be enabled simultaneously with it.
+ * @cqm_rssi_high: Connection quality monitor RSSI upper threshold.
+ * @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
+ * @qos: This is a QoS-enabled BSS.
+ * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
+ * @txpower: TX power in dBm. INT_MIN means not configured.
+ * @txpower_type: TX power adjustment used to control per packet Transmit
+ * Power Control (TPC) in lower driver for the current vif. In particular
+ * TPC is enabled if value passed in %txpower_type is
+ * NL80211_TX_POWER_LIMITED (allow using less than specified from
+ * userspace), whereas TPC is disabled if %txpower_type is set to
+ * NL80211_TX_POWER_FIXED (use value configured from userspace)
+ * @p2p_noa_attr: P2P NoA attribute for P2P powersave
+ * @allow_p2p_go_ps: indication for AP or P2P GO interface, whether it's allowed
+ * to use P2P PS mechanism or not. AP/P2P GO is not allowed to use P2P PS
+ * if it has associated clients without P2P PS support.
+ * @max_idle_period: the time period during which the station can refrain from
+ * transmitting frames to its associated AP without being disassociated.
+ * In units of 1000 TUs. Zero value indicates that the AP did not include
+ * a (valid) BSS Max Idle Period Element.
+ * @protected_keep_alive: if set, indicates that the station should send an RSN
+ * protected frame to the AP to reset the idle timer at the AP for the
+ * station.
+ * @ftm_responder: whether to enable or disable fine timing measurement FTM
+ * responder functionality.
+ * @ftmr_params: configurable lci/civic parameter when enabling FTM responder.
+ * @nontransmitted: this BSS is a nontransmitted BSS profile
+ * @transmitter_bssid: the address of transmitter AP
+ * @bssid_index: index inside the multiple BSSID set
+ * @bssid_indicator: 2^bssid_indicator is the maximum number of APs in set
+ * @ema_ap: AP supports enhancements of discovery and advertisement of
+ * nontransmitted BSSIDs
+ * @profile_periodicity: the least number of beacon frames need to be received
+ * in order to discover all the nontransmitted BSSIDs in the set.
+ * @he_oper: HE operation information of the BSS (AP/Mesh) or of the AP we are
+ * connected to (STA)
+ * @he_obss_pd: OBSS Packet Detection parameters.
+ * @he_bss_color: BSS coloring settings, if BSS supports HE
+ * @fils_discovery: FILS discovery configuration
+ * @unsol_bcast_probe_resp_interval: Unsolicited broadcast probe response
+ * interval.
+ * @beacon_tx_rate: The configured beacon transmit rate that needs to be passed
+ * to driver when rate control is offloaded to firmware.
+ * @power_type: power type of BSS for 6 GHz
+ * @tx_pwr_env: transmit power envelope array of BSS.
+ * @tx_pwr_env_num: number of @tx_pwr_env.
+ * @pwr_reduction: power constraint of BSS.
+ * @eht_support: does this BSS support EHT
+ * @csa_active: marks whether a channel switch is going on. Internally it is
+ * write-protected by sdata_lock and local->mtx so holding either is fine
+ * for read access.
+ * @mu_mimo_owner: indicates interface owns MU-MIMO capability
+ * @chanctx_conf: The channel context this interface is assigned to, or %NULL
+ * when it is not assigned. This pointer is RCU-protected due to the TX
+ * path needing to access it; even though the netdev carrier will always
+ * be off when it is %NULL there can still be races and packets could be
+ * processed after it switches back to %NULL.
+ * @color_change_active: marks whether a color change is ongoing. Internally it is
+ * write-protected by sdata_lock and local->mtx so holding either is fine
+ * for read access.
+ * @color_change_color: the bss color that will be used after the change.
+ */
+struct ieee80211_bss_conf {
+ const u8 *bssid;
+ unsigned int link_id;
+ u8 addr[ETH_ALEN] __aligned(2);
+ u8 htc_trig_based_pkt_ext;
+ bool uora_exists;
+ u8 uora_ocw_range;
+ u16 frame_time_rts_th;
+ bool he_support;
+ bool twt_requester;
+ bool twt_responder;
+ bool twt_protected;
+ bool twt_broadcast;
+ /* erp related data */
+ bool use_cts_prot;
+ bool use_short_preamble;
+ bool use_short_slot;
+ bool enable_beacon;
+ u8 dtim_period;
+ u16 beacon_int;
+ u16 assoc_capability;
+ u64 sync_tsf;
+ u32 sync_device_ts;
+ u8 sync_dtim_count;
+ u32 basic_rates;
+ struct ieee80211_rate *beacon_rate;
+ int mcast_rate[NUM_NL80211_BANDS];
+ u16 ht_operation_mode;
+ s32 cqm_rssi_thold;
+ u32 cqm_rssi_hyst;
+ s32 cqm_rssi_low;
+ s32 cqm_rssi_high;
+ struct cfg80211_chan_def chandef;
+ struct ieee80211_mu_group_data mu_group;
+ bool qos;
+ bool hidden_ssid;
+ int txpower;
+ enum nl80211_tx_power_setting txpower_type;
+ struct ieee80211_p2p_noa_attr p2p_noa_attr;
+ bool allow_p2p_go_ps;
+ u16 max_idle_period;
+ bool protected_keep_alive;
+ bool ftm_responder;
+ struct ieee80211_ftm_responder_params *ftmr_params;
+ /* Multiple BSSID data */
+ bool nontransmitted;
+ u8 transmitter_bssid[ETH_ALEN];
+ u8 bssid_index;
+ u8 bssid_indicator;
+ bool ema_ap;
+ u8 profile_periodicity;
+ struct {
+ u32 params;
+ u16 nss_set;
+ } he_oper;
+ struct ieee80211_he_obss_pd he_obss_pd;
+ struct cfg80211_he_bss_color he_bss_color;
+ struct ieee80211_fils_discovery fils_discovery;
+ u32 unsol_bcast_probe_resp_interval;
+ struct cfg80211_bitrate_mask beacon_tx_rate;
+ enum ieee80211_ap_reg_power power_type;
+ struct ieee80211_tx_pwr_env tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT];
+ u8 tx_pwr_env_num;
+ u8 pwr_reduction;
+ bool eht_support;
+
+ bool csa_active;
+ bool mu_mimo_owner;
+ struct ieee80211_chanctx_conf __rcu *chanctx_conf;
+
+ bool color_change_active;
+ u8 color_change_color;
+};
+
+/**
+ * enum mac80211_tx_info_flags - flags to describe transmission information/status
+ *
+ * These flags are used with the @flags member of &ieee80211_tx_info.
+ *
+ * @IEEE80211_TX_CTL_REQ_TX_STATUS: require TX status callback for this frame.
+ * @IEEE80211_TX_CTL_ASSIGN_SEQ: The driver has to assign a sequence
+ * number to this frame, taking care of not overwriting the fragment
+ * number and increasing the sequence number only when the
+ * IEEE80211_TX_CTL_FIRST_FRAGMENT flag is set. mac80211 will properly
+ * assign sequence numbers to QoS-data frames but cannot do so correctly
+ * for non-QoS-data and management frames because beacons need them from
+ * that counter as well and mac80211 cannot guarantee proper sequencing.
+ * If this flag is set, the driver should instruct the hardware to
+ * assign a sequence number to the frame or assign one itself. Cf. IEEE
+ * 802.11-2007 7.1.3.4.1 paragraph 3. This flag will always be set for
+ * beacons and always be clear for frames without a sequence number field.
+ * @IEEE80211_TX_CTL_NO_ACK: tell the low level not to wait for an ack
+ * @IEEE80211_TX_CTL_CLEAR_PS_FILT: clear powersave filter for destination
+ * station
+ * @IEEE80211_TX_CTL_FIRST_FRAGMENT: this is a first fragment of the frame
+ * @IEEE80211_TX_CTL_SEND_AFTER_DTIM: send this frame after DTIM beacon
+ * @IEEE80211_TX_CTL_AMPDU: this frame should be sent as part of an A-MPDU
+ * @IEEE80211_TX_CTL_INJECTED: Frame was injected, internal to mac80211.
+ * @IEEE80211_TX_STAT_TX_FILTERED: The frame was not transmitted
+ * because the destination STA was in powersave mode. Note that to
+ * avoid race conditions, the filter must be set by the hardware or
+ * firmware upon receiving a frame that indicates that the station
+ * went to sleep (must be done on device to filter frames already on
+ * the queue) and may only be unset after mac80211 gives the OK for
+ * that by setting the IEEE80211_TX_CTL_CLEAR_PS_FILT (see above),
+ * since only then is it guaranteed that no more frames are in the
+ * hardware queue.
+ * @IEEE80211_TX_STAT_ACK: Frame was acknowledged
+ * @IEEE80211_TX_STAT_AMPDU: The frame was aggregated, so status
+ * is for the whole aggregation.
+ * @IEEE80211_TX_STAT_AMPDU_NO_BACK: no block ack was returned,
+ * so consider using block ack request (BAR).
+ * @IEEE80211_TX_CTL_RATE_CTRL_PROBE: internal to mac80211, can be
+ * set by rate control algorithms to indicate probe rate, will
+ * be cleared for fragmented frames (except on the last fragment)
+ * @IEEE80211_TX_INTFL_OFFCHAN_TX_OK: Internal to mac80211. Used to indicate
+ * that a frame can be transmitted while the queues are stopped for
+ * off-channel operation.
+ * @IEEE80211_TX_CTL_HW_80211_ENCAP: This frame uses hardware encapsulation
+ * (header conversion)
+ * @IEEE80211_TX_INTFL_RETRIED: completely internal to mac80211,
+ * used to indicate that a frame was already retried due to PS
+ * @IEEE80211_TX_INTFL_DONT_ENCRYPT: completely internal to mac80211,
+ * used to indicate frame should not be encrypted
+ * @IEEE80211_TX_CTL_NO_PS_BUFFER: This frame is a response to a poll
+ * frame (PS-Poll or uAPSD) or a non-bufferable MMPDU and must
+ * be sent although the station is in powersave mode.
+ * @IEEE80211_TX_CTL_MORE_FRAMES: More frames will be passed to the
+ * transmit function after the current frame, this can be used
+ * by drivers to kick the DMA queue only if unset or when the
+ * queue gets full.
+ * @IEEE80211_TX_INTFL_RETRANSMISSION: This frame is being retransmitted
+ * after TX status because the destination was asleep, it must not
+ * be modified again (no seqno assignment, crypto, etc.)
+ * @IEEE80211_TX_INTFL_MLME_CONN_TX: This frame was transmitted by the MLME
+ * code for connection establishment, this indicates that its status
+ * should kick the MLME state machine.
+ * @IEEE80211_TX_INTFL_NL80211_FRAME_TX: Frame was requested through nl80211
+ * MLME command (internal to mac80211 to figure out whether to send TX
+ * status to user space)
+ * @IEEE80211_TX_CTL_LDPC: tells the driver to use LDPC for this frame
+ * @IEEE80211_TX_CTL_STBC: Enables Space-Time Block Coding (STBC) for this
+ * frame and selects the maximum number of streams that it can use.
+ * @IEEE80211_TX_CTL_TX_OFFCHAN: Marks this packet to be transmitted on
+ * the off-channel channel when a remain-on-channel offload is done
+ * in hardware -- normal packets still flow and are expected to be
+ * handled properly by the device.
+ * @IEEE80211_TX_INTFL_TKIP_MIC_FAILURE: Marks this packet to be used for TKIP
+ * testing. It will be sent out with incorrect Michael MIC key to allow
+ * TKIP countermeasures to be tested.
+ * @IEEE80211_TX_CTL_NO_CCK_RATE: This frame will be sent at non CCK rate.
+ * This flag is actually used for management frame especially for P2P
+ * frames not being sent at CCK rate in 2GHz band.
+ * @IEEE80211_TX_STATUS_EOSP: This packet marks the end of service period,
+ * when its status is reported the service period ends. For frames in
+ * an SP that mac80211 transmits, it is already set; for driver frames
+ * the driver may set this flag. It is also used to do the same for
+ * PS-Poll responses.
+ * @IEEE80211_TX_CTL_USE_MINRATE: This frame will be sent at lowest rate.
+ * This flag is used to send nullfunc frame at minimum rate when
+ * the nullfunc is used for connection monitoring purpose.
+ * @IEEE80211_TX_CTL_DONTFRAG: Don't fragment this packet even if it
+ * would be fragmented by size (this is optional, only used for
+ * monitor injection).
+ * @IEEE80211_TX_STAT_NOACK_TRANSMITTED: A frame that was marked with
+ * IEEE80211_TX_CTL_NO_ACK has been successfully transmitted without
+ * any errors (like issues specific to the driver/HW).
+ * This flag must not be set for frames that don't request no-ack
+ * behaviour with IEEE80211_TX_CTL_NO_ACK.
+ *
+ * Note: If you have to add new flags to the enumeration, then don't
+ * forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
+ */
+enum mac80211_tx_info_flags {
+ IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0),
+ IEEE80211_TX_CTL_ASSIGN_SEQ = BIT(1),
+ IEEE80211_TX_CTL_NO_ACK = BIT(2),
+ IEEE80211_TX_CTL_CLEAR_PS_FILT = BIT(3),
+ IEEE80211_TX_CTL_FIRST_FRAGMENT = BIT(4),
+ IEEE80211_TX_CTL_SEND_AFTER_DTIM = BIT(5),
+ IEEE80211_TX_CTL_AMPDU = BIT(6),
+ IEEE80211_TX_CTL_INJECTED = BIT(7),
+ IEEE80211_TX_STAT_TX_FILTERED = BIT(8),
+ IEEE80211_TX_STAT_ACK = BIT(9),
+ IEEE80211_TX_STAT_AMPDU = BIT(10),
+ IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11),
+ IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12),
+ IEEE80211_TX_INTFL_OFFCHAN_TX_OK = BIT(13),
+ IEEE80211_TX_CTL_HW_80211_ENCAP = BIT(14),
+ IEEE80211_TX_INTFL_RETRIED = BIT(15),
+ IEEE80211_TX_INTFL_DONT_ENCRYPT = BIT(16),
+ IEEE80211_TX_CTL_NO_PS_BUFFER = BIT(17),
+ IEEE80211_TX_CTL_MORE_FRAMES = BIT(18),
+ IEEE80211_TX_INTFL_RETRANSMISSION = BIT(19),
+ IEEE80211_TX_INTFL_MLME_CONN_TX = BIT(20),
+ IEEE80211_TX_INTFL_NL80211_FRAME_TX = BIT(21),
+ IEEE80211_TX_CTL_LDPC = BIT(22),
+ IEEE80211_TX_CTL_STBC = BIT(23) | BIT(24),
+ IEEE80211_TX_CTL_TX_OFFCHAN = BIT(25),
+ IEEE80211_TX_INTFL_TKIP_MIC_FAILURE = BIT(26),
+ IEEE80211_TX_CTL_NO_CCK_RATE = BIT(27),
+ IEEE80211_TX_STATUS_EOSP = BIT(28),
+ IEEE80211_TX_CTL_USE_MINRATE = BIT(29),
+ IEEE80211_TX_CTL_DONTFRAG = BIT(30),
+ IEEE80211_TX_STAT_NOACK_TRANSMITTED = BIT(31),
+};
+
+#define IEEE80211_TX_CTL_STBC_SHIFT 23
+
+#define IEEE80211_TX_RC_S1G_MCS IEEE80211_TX_RC_VHT_MCS
+
+/**
+ * enum mac80211_tx_control_flags - flags to describe transmit control
+ *
+ * @IEEE80211_TX_CTRL_PORT_CTRL_PROTO: this frame is a port control
+ * protocol frame (e.g. EAP)
+ * @IEEE80211_TX_CTRL_PS_RESPONSE: This frame is a response to a poll
+ * frame (PS-Poll or uAPSD).
+ * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
+ * @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
+ * @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path
+ * @IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP: This frame skips mesh path lookup
+ * @IEEE80211_TX_INTCFL_NEED_TXPROCESSING: completely internal to mac80211,
+ * used to indicate that a pending frame requires TX processing before
+ * it can be sent out.
+ * @IEEE80211_TX_CTRL_NO_SEQNO: Do not overwrite the sequence number that
+ * has already been assigned to this frame.
+ * @IEEE80211_TX_CTRL_DONT_REORDER: This frame should not be reordered
+ * relative to other frames that have this flag set, independent
+ * of their QoS TID or other priority field values.
+ * @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally
+ * for sequence number assignment
+ * @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this
+ * frame should be transmitted on the specific link. This really is
+ * only relevant for frames that do not have data present, and is
+ * also not used for 802.3 format frames. Note that even if the frame
+ * is on a specific link, address translation might still apply if
+ * it's intended for an MLD.
+ *
+ * These flags are used in tx_info->control.flags.
+ */
+enum mac80211_tx_control_flags {
+ IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0),
+ IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
+ IEEE80211_TX_CTRL_RATE_INJECT = BIT(2),
+ IEEE80211_TX_CTRL_AMSDU = BIT(3),
+ IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
+ IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP = BIT(5),
+ IEEE80211_TX_INTCFL_NEED_TXPROCESSING = BIT(6),
+ IEEE80211_TX_CTRL_NO_SEQNO = BIT(7),
+ IEEE80211_TX_CTRL_DONT_REORDER = BIT(8),
+ IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = BIT(9),
+ IEEE80211_TX_CTRL_MLO_LINK = 0xf0000000,
+};
+
+#define IEEE80211_LINK_UNSPECIFIED 0xf
+#define IEEE80211_TX_CTRL_MLO_LINK_UNSPEC \
+ u32_encode_bits(IEEE80211_LINK_UNSPECIFIED, \
+ IEEE80211_TX_CTRL_MLO_LINK)
+
+/**
+ * enum mac80211_tx_status_flags - flags to describe transmit status
+ *
+ * @IEEE80211_TX_STATUS_ACK_SIGNAL_VALID: ACK signal is valid
+ *
+ * These flags are used in tx_info->status.flags.
+ */
+enum mac80211_tx_status_flags {
+ IEEE80211_TX_STATUS_ACK_SIGNAL_VALID = BIT(0),
+};
+
+/*
+ * This definition is used as a mask to clear all temporary flags, which are
+ * set by the tx handlers for each transmission attempt by the mac80211 stack.
+ */
+#define IEEE80211_TX_TEMPORARY_FLAGS (IEEE80211_TX_CTL_NO_ACK | \
+ IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT | \
+ IEEE80211_TX_CTL_SEND_AFTER_DTIM | IEEE80211_TX_CTL_AMPDU | \
+ IEEE80211_TX_STAT_TX_FILTERED | IEEE80211_TX_STAT_ACK | \
+ IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_STAT_AMPDU_NO_BACK | \
+ IEEE80211_TX_CTL_RATE_CTRL_PROBE | IEEE80211_TX_CTL_NO_PS_BUFFER | \
+ IEEE80211_TX_CTL_MORE_FRAMES | IEEE80211_TX_CTL_LDPC | \
+ IEEE80211_TX_CTL_STBC | IEEE80211_TX_STATUS_EOSP)
+
+/**
+ * enum mac80211_rate_control_flags - per-rate flags set by the
+ * Rate Control algorithm.
+ *
+ * These flags are set by the Rate control algorithm for each rate during tx,
+ * in the @flags member of struct ieee80211_tx_rate.
+ *
+ * @IEEE80211_TX_RC_USE_RTS_CTS: Use RTS/CTS exchange for this rate.
+ * @IEEE80211_TX_RC_USE_CTS_PROTECT: CTS-to-self protection is required.
+ * This is set if the current BSS requires ERP protection.
+ * @IEEE80211_TX_RC_USE_SHORT_PREAMBLE: Use short preamble.
+ * @IEEE80211_TX_RC_MCS: HT rate.
+ * @IEEE80211_TX_RC_VHT_MCS: VHT MCS rate, in this case the idx field is split
+ * into a higher 4 bits (Nss) and lower 4 bits (MCS number)
+ * @IEEE80211_TX_RC_GREEN_FIELD: Indicates whether this rate should be used in
+ * Greenfield mode.
+ * @IEEE80211_TX_RC_40_MHZ_WIDTH: Indicates if the Channel Width should be 40 MHz.
+ * @IEEE80211_TX_RC_80_MHZ_WIDTH: Indicates 80 MHz transmission
+ * @IEEE80211_TX_RC_160_MHZ_WIDTH: Indicates 160 MHz transmission
+ * (80+80 isn't supported yet)
+ * @IEEE80211_TX_RC_DUP_DATA: The frame should be transmitted on both of the
+ * adjacent 20 MHz channels, if the current channel type is
+ * NL80211_CHAN_HT40MINUS or NL80211_CHAN_HT40PLUS.
+ * @IEEE80211_TX_RC_SHORT_GI: Short Guard interval should be used for this rate.
+ */
+enum mac80211_rate_control_flags {
+ IEEE80211_TX_RC_USE_RTS_CTS = BIT(0),
+ IEEE80211_TX_RC_USE_CTS_PROTECT = BIT(1),
+ IEEE80211_TX_RC_USE_SHORT_PREAMBLE = BIT(2),
+
+ /* rate index is an HT/VHT MCS instead of an index */
+ IEEE80211_TX_RC_MCS = BIT(3),
+ IEEE80211_TX_RC_GREEN_FIELD = BIT(4),
+ IEEE80211_TX_RC_40_MHZ_WIDTH = BIT(5),
+ IEEE80211_TX_RC_DUP_DATA = BIT(6),
+ IEEE80211_TX_RC_SHORT_GI = BIT(7),
+ IEEE80211_TX_RC_VHT_MCS = BIT(8),
+ IEEE80211_TX_RC_80_MHZ_WIDTH = BIT(9),
+ IEEE80211_TX_RC_160_MHZ_WIDTH = BIT(10),
+};
+
+
+/* there are 40 bytes if you don't need the rateset to be kept */
+#define IEEE80211_TX_INFO_DRIVER_DATA_SIZE 40
+
+/* if you do need the rateset, then you have less space */
+#define IEEE80211_TX_INFO_RATE_DRIVER_DATA_SIZE 24
+
+/* maximum number of rate stages */
+#define IEEE80211_TX_MAX_RATES 4
+
+/* maximum number of rate table entries */
+#define IEEE80211_TX_RATE_TABLE_SIZE 4
+
+/**
+ * struct ieee80211_tx_rate - rate selection/status
+ *
+ * @idx: rate index to attempt to send with
+ * @flags: rate control flags (&enum mac80211_rate_control_flags)
+ * @count: number of tries in this rate before going to the next rate
+ *
+ * A value of -1 for @idx indicates an invalid rate and, if used
+ * in an array of retry rates, that no more rates should be tried.
+ *
+ * When used for transmit status reporting, the driver should
+ * always report the rate along with the flags it used.
+ *
+ * &struct ieee80211_tx_info contains an array of these structs
+ * in the control information, and it will be filled by the rate
+ * control algorithm according to what should be sent. For example,
+ * if this array contains, in the format { <idx>, <count> } the
+ * information::
+ *
+ * { 3, 2 }, { 2, 2 }, { 1, 4 }, { -1, 0 }, { -1, 0 }
+ *
+ * then this means that the frame should be transmitted
+ * up to twice at rate 3, up to twice at rate 2, and up to four
+ * times at rate 1 if it doesn't get acknowledged. Say it gets
+ * acknowledged by the peer after the fifth attempt, the status
+ * information should then contain::
+ *
+ * { 3, 2 }, { 2, 2 }, { 1, 1 }, { -1, 0 } ...
+ *
+ * since it was transmitted twice at rate 3, twice at rate 2
+ * and once at rate 1 after which we received an acknowledgement.
+ */
+struct ieee80211_tx_rate {
+ s8 idx;
+ u16 count:5,
+ flags:11;
+} __packed;
+
+#define IEEE80211_MAX_TX_RETRY 31
+
+static inline void ieee80211_rate_set_vht(struct ieee80211_tx_rate *rate,
+ u8 mcs, u8 nss)
+{
+ WARN_ON(mcs & ~0xF);
+ WARN_ON((nss - 1) & ~0x7);
+ rate->idx = ((nss - 1) << 4) | mcs;
+}
+
+static inline u8
+ieee80211_rate_get_vht_mcs(const struct ieee80211_tx_rate *rate)
+{
+ return rate->idx & 0xF;
+}
+
+static inline u8
+ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
+{
+ return (rate->idx >> 4) + 1;
+}
+
+/**
+ * struct ieee80211_tx_info - skb transmit information
+ *
+ * This structure is placed in skb->cb for three uses:
+ * (1) mac80211 TX control - mac80211 tells the driver what to do
+ * (2) driver internal use (if applicable)
+ * (3) TX status information - driver tells mac80211 what happened
+ *
+ * @flags: transmit info flags, defined above
+ * @band: the band to transmit on (use e.g. for checking for races),
+ * not valid if the interface is an MLD since we won't know which
+ * link the frame will be transmitted on
+ * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
+ * @ack_frame_id: internal frame ID for TX status, used internally
+ * @tx_time_est: TX time estimate in units of 4us, used internally
+ * @control: union part for control data
+ * @control.rates: TX rates array to try
+ * @control.rts_cts_rate_idx: rate for RTS or CTS
+ * @control.use_rts: use RTS
+ * @control.use_cts_prot: use RTS/CTS
+ * @control.short_preamble: use short preamble (CCK only)
+ * @control.skip_table: skip externally configured rate table
+ * @control.jiffies: timestamp for expiry on powersave clients
+ * @control.vif: virtual interface (may be NULL)
+ * @control.hw_key: key to encrypt with (may be NULL)
+ * @control.flags: control flags, see &enum mac80211_tx_control_flags
+ * @control.enqueue_time: enqueue time (for iTXQs)
+ * @driver_rates: alias to @control.rates to reserve space
+ * @pad: padding
+ * @rate_driver_data: driver use area if driver needs @control.rates
+ * @status: union part for status data
+ * @status.rates: attempted rates
+ * @status.ack_signal: ACK signal
+ * @status.ampdu_ack_len: AMPDU ack length
+ * @status.ampdu_len: AMPDU length
+ * @status.antenna: (legacy, kept only for iwlegacy)
+ * @status.tx_time: airtime consumed for transmission; note this is only
+ * used for WMM AC, not for airtime fairness
+ * @status.flags: status flags, see &enum mac80211_tx_status_flags
+ * @status.status_driver_data: driver use area
+ * @ack: union part for pure ACK data
+ * @ack.cookie: cookie for the ACK
+ * @driver_data: array of driver_data pointers
+ * @ampdu_ack_len: number of acked aggregated frames.
+ * relevant only if IEEE80211_TX_STAT_AMPDU was set.
+ * @ampdu_len: number of aggregated frames.
+ * relevant only if IEEE80211_TX_STAT_AMPDU was set.
+ * @ack_signal: signal strength of the ACK frame
+ */
+struct ieee80211_tx_info {
+ /* common information */
+ u32 flags;
+ u32 band:3,
+ ack_frame_id:13,
+ hw_queue:4,
+ tx_time_est:10;
+ /* 2 free bits */
+
+ union {
+ struct {
+ union {
+ /* rate control */
+ struct {
+ struct ieee80211_tx_rate rates[
+ IEEE80211_TX_MAX_RATES];
+ s8 rts_cts_rate_idx;
+ u8 use_rts:1;
+ u8 use_cts_prot:1;
+ u8 short_preamble:1;
+ u8 skip_table:1;
+ /* 2 bytes free */
+ };
+ /* only needed before rate control */
+ unsigned long jiffies;
+ };
+ /* NB: vif can be NULL for injected frames */
+ struct ieee80211_vif *vif;
+ struct ieee80211_key_conf *hw_key;
+ u32 flags;
+ codel_time_t enqueue_time;
+ } control;
+ struct {
+ u64 cookie;
+ } ack;
+ struct {
+ struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
+ s32 ack_signal;
+ u8 ampdu_ack_len;
+ u8 ampdu_len;
+ u8 antenna;
+ u8 pad;
+ u16 tx_time;
+ u8 flags;
+ u8 pad2;
+ void *status_driver_data[16 / sizeof(void *)];
+ } status;
+ struct {
+ struct ieee80211_tx_rate driver_rates[
+ IEEE80211_TX_MAX_RATES];
+ u8 pad[4];
+
+ void *rate_driver_data[
+ IEEE80211_TX_INFO_RATE_DRIVER_DATA_SIZE / sizeof(void *)];
+ };
+ void *driver_data[
+ IEEE80211_TX_INFO_DRIVER_DATA_SIZE / sizeof(void *)];
+ };
+};
+
+static inline u16
+ieee80211_info_set_tx_time_est(struct ieee80211_tx_info *info, u16 tx_time_est)
+{
+ /* We only have 10 bits in tx_time_est, so store airtime
+ * in increments of 4us and clamp the maximum to 2**12-1
+ */
+ info->tx_time_est = min_t(u16, tx_time_est, 4095) >> 2;
+ return info->tx_time_est << 2;
+}
+
+static inline u16
+ieee80211_info_get_tx_time_est(struct ieee80211_tx_info *info)
+{
+ return info->tx_time_est << 2;
+}
+
+/***
+ * struct ieee80211_rate_status - mrr stage for status path
+ *
+ * This struct is used in struct ieee80211_tx_status to provide drivers a
+ * dynamic way to report about used rates and power levels per packet.
+ *
+ * @rate_idx The actual used rate.
+ * @try_count How often the rate was tried.
+ * @tx_power_idx An idx into the ieee80211_hw->tx_power_levels list of the
+ * corresponding wifi hardware. The idx shall point to the power level
+ * that was used when sending the packet.
+ */
+struct ieee80211_rate_status {
+ struct rate_info rate_idx;
+ u8 try_count;
+ u8 tx_power_idx;
+};
+
+/**
+ * struct ieee80211_tx_status - extended tx status info for rate control
+ *
+ * @sta: Station that the packet was transmitted for
+ * @info: Basic tx status information
+ * @skb: Packet skb (can be NULL if not provided by the driver)
+ * @rates: Mrr stages that were used when sending the packet
+ * @n_rates: Number of mrr stages (count of instances for @rates)
+ * @free_list: list where processed skbs are stored to be free'd by the driver
+ * @ack_hwtstamp: Hardware timestamp of the received ack in nanoseconds
+ * Only needed for Timing measurement and Fine timing measurement action
+ * frames. Only reported by devices that have timestamping enabled.
+ */
+struct ieee80211_tx_status {
+ struct ieee80211_sta *sta;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+ struct ieee80211_rate_status *rates;
+ ktime_t ack_hwtstamp;
+ u8 n_rates;
+
+ struct list_head *free_list;
+};
+
+/**
+ * struct ieee80211_scan_ies - descriptors for different blocks of IEs
+ *
+ * This structure is used to point to different blocks of IEs in HW scan
+ * and scheduled scan. These blocks contain the IEs passed by userspace
+ * and the ones generated by mac80211.
+ *
+ * @ies: pointers to band specific IEs.
+ * @len: lengths of band_specific IEs.
+ * @common_ies: IEs for all bands (especially vendor specific ones)
+ * @common_ie_len: length of the common_ies
+ */
+struct ieee80211_scan_ies {
+ const u8 *ies[NUM_NL80211_BANDS];
+ size_t len[NUM_NL80211_BANDS];
+ const u8 *common_ies;
+ size_t common_ie_len;
+};
+
+
+static inline struct ieee80211_tx_info *IEEE80211_SKB_CB(struct sk_buff *skb)
+{
+ return (struct ieee80211_tx_info *)skb->cb;
+}
+
+static inline struct ieee80211_rx_status *IEEE80211_SKB_RXCB(struct sk_buff *skb)
+{
+ return (struct ieee80211_rx_status *)skb->cb;
+}
+
+/**
+ * ieee80211_tx_info_clear_status - clear TX status
+ *
+ * @info: The &struct ieee80211_tx_info to be cleared.
+ *
+ * When the driver passes an skb back to mac80211, it must report
+ * a number of things in TX status. This function clears everything
+ * in the TX status but the rate control information (it does clear
+ * the count since you need to fill that in anyway).
+ *
+ * NOTE: While the rates array is kept intact, this will wipe all of the
+ * driver_data fields in info, so it's up to the driver to restore
+ * any fields it needs after calling this helper.
+ */
+static inline void
+ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
+{
+ int i;
+
+ BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, status.rates) !=
+ offsetof(struct ieee80211_tx_info, control.rates));
+ BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, status.rates) !=
+ offsetof(struct ieee80211_tx_info, driver_rates));
+ BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, status.rates) != 8);
+ /* clear the rate counts */
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++)
+ info->status.rates[i].count = 0;
+ memset_after(&info->status, 0, rates);
+}
+
+
+/**
+ * enum mac80211_rx_flags - receive flags
+ *
+ * These flags are used with the @flag member of &struct ieee80211_rx_status.
+ * @RX_FLAG_MMIC_ERROR: Michael MIC error was reported on this frame.
+ * Use together with %RX_FLAG_MMIC_STRIPPED.
+ * @RX_FLAG_DECRYPTED: This frame was decrypted in hardware.
+ * @RX_FLAG_MMIC_STRIPPED: the Michael MIC is stripped off this frame,
+ * verification has been done by the hardware.
+ * @RX_FLAG_IV_STRIPPED: The IV and ICV are stripped from this frame.
+ * If this flag is set, the stack cannot do any replay detection
+ * hence the driver or hardware will have to do that.
+ * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this
+ * flag indicates that the PN was verified for replay protection.
+ * Note that this flag is also currently only supported when a frame
+ * is also decrypted (ie. @RX_FLAG_DECRYPTED must be set)
+ * @RX_FLAG_DUP_VALIDATED: The driver should set this flag if it did
+ * de-duplication by itself.
+ * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on
+ * the frame.
+ * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
+ * the frame.
+ * @RX_FLAG_MACTIME_START: The timestamp passed in the RX status (@mactime
+ * field) is valid and contains the time the first symbol of the MPDU
+ * was received. This is useful in monitor mode and for proper IBSS
+ * merging.
+ * @RX_FLAG_MACTIME_END: The timestamp passed in the RX status (@mactime
+ * field) is valid and contains the time the last symbol of the MPDU
+ * (including FCS) was received.
+ * @RX_FLAG_MACTIME_PLCP_START: The timestamp passed in the RX status (@mactime
+ * field) is valid and contains the time the SYNC preamble was received.
+ * @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present.
+ * Valid only for data frames (mainly A-MPDU)
+ * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
+ * number (@ampdu_reference) must be populated and be a distinct number for
+ * each A-MPDU
+ * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
+ * subframes of a single A-MPDU
+ * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
+ * @RX_FLAG_AMPDU_DELIM_CRC_ERROR: A delimiter CRC error has been detected
+ * on this subframe
+ * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
+ * is stored in the @ampdu_delimiter_crc field)
+ * @RX_FLAG_MIC_STRIPPED: The mic was stripped of this packet. Decryption was
+ * done by the hardware
+ * @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without
+ * processing it in any regular way.
+ * This is useful if drivers offload some frames but still want to report
+ * them for sniffing purposes.
+ * @RX_FLAG_SKIP_MONITOR: Process and report frame to all interfaces except
+ * monitor interfaces.
+ * This is useful if drivers offload some frames but still want to report
+ * them for sniffing purposes.
+ * @RX_FLAG_AMSDU_MORE: Some drivers may prefer to report separate A-MSDU
+ * subframes instead of a one huge frame for performance reasons.
+ * All, but the last MSDU from an A-MSDU should have this flag set. E.g.
+ * if an A-MSDU has 3 frames, the first 2 must have the flag set, while
+ * the 3rd (last) one must not have this flag set. The flag is used to
+ * deal with retransmission/duplication recovery properly since A-MSDU
+ * subframes share the same sequence number. Reported subframes can be
+ * either regular MSDU or singly A-MSDUs. Subframes must not be
+ * interleaved with other frames.
+ * @RX_FLAG_RADIOTAP_VENDOR_DATA: This frame contains vendor-specific
+ * radiotap data in the skb->data (before the frame) as described by
+ * the &struct ieee80211_vendor_radiotap.
+ * @RX_FLAG_ALLOW_SAME_PN: Allow the same PN as same packet before.
+ * This is used for AMSDU subframes which can have the same PN as
+ * the first subframe.
+ * @RX_FLAG_ICV_STRIPPED: The ICV is stripped from this frame. CRC checking must
+ * be done in the hardware.
+ * @RX_FLAG_AMPDU_EOF_BIT: Value of the EOF bit in the A-MPDU delimiter for this
+ * frame
+ * @RX_FLAG_AMPDU_EOF_BIT_KNOWN: The EOF value is known
+ * @RX_FLAG_RADIOTAP_HE: HE radiotap data is present
+ * (&struct ieee80211_radiotap_he, mac80211 will fill in
+ *
+ * - DATA3_DATA_MCS
+ * - DATA3_DATA_DCM
+ * - DATA3_CODING
+ * - DATA5_GI
+ * - DATA5_DATA_BW_RU_ALLOC
+ * - DATA6_NSTS
+ * - DATA3_STBC
+ *
+ * from the RX info data, so leave those zeroed when building this data)
+ * @RX_FLAG_RADIOTAP_HE_MU: HE MU radiotap data is present
+ * (&struct ieee80211_radiotap_he_mu)
+ * @RX_FLAG_RADIOTAP_LSIG: L-SIG radiotap data is present
+ * @RX_FLAG_NO_PSDU: use the frame only for radiotap reporting, with
+ * the "0-length PSDU" field included there. The value for it is
+ * in &struct ieee80211_rx_status. Note that if this value isn't
+ * known the frame shouldn't be reported.
+ * @RX_FLAG_8023: the frame has an 802.3 header (decap offload performed by
+ * hardware or driver)
+ */
+enum mac80211_rx_flags {
+ RX_FLAG_MMIC_ERROR = BIT(0),
+ RX_FLAG_DECRYPTED = BIT(1),
+ RX_FLAG_MACTIME_PLCP_START = BIT(2),
+ RX_FLAG_MMIC_STRIPPED = BIT(3),
+ RX_FLAG_IV_STRIPPED = BIT(4),
+ RX_FLAG_FAILED_FCS_CRC = BIT(5),
+ RX_FLAG_FAILED_PLCP_CRC = BIT(6),
+ RX_FLAG_MACTIME_START = BIT(7),
+ RX_FLAG_NO_SIGNAL_VAL = BIT(8),
+ RX_FLAG_AMPDU_DETAILS = BIT(9),
+ RX_FLAG_PN_VALIDATED = BIT(10),
+ RX_FLAG_DUP_VALIDATED = BIT(11),
+ RX_FLAG_AMPDU_LAST_KNOWN = BIT(12),
+ RX_FLAG_AMPDU_IS_LAST = BIT(13),
+ RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(14),
+ RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(15),
+ RX_FLAG_MACTIME_END = BIT(16),
+ RX_FLAG_ONLY_MONITOR = BIT(17),
+ RX_FLAG_SKIP_MONITOR = BIT(18),
+ RX_FLAG_AMSDU_MORE = BIT(19),
+ RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(20),
+ RX_FLAG_MIC_STRIPPED = BIT(21),
+ RX_FLAG_ALLOW_SAME_PN = BIT(22),
+ RX_FLAG_ICV_STRIPPED = BIT(23),
+ RX_FLAG_AMPDU_EOF_BIT = BIT(24),
+ RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(25),
+ RX_FLAG_RADIOTAP_HE = BIT(26),
+ RX_FLAG_RADIOTAP_HE_MU = BIT(27),
+ RX_FLAG_RADIOTAP_LSIG = BIT(28),
+ RX_FLAG_NO_PSDU = BIT(29),
+ RX_FLAG_8023 = BIT(30),
+};
+
+/**
+ * enum mac80211_rx_encoding_flags - MCS & bandwidth flags
+ *
+ * @RX_ENC_FLAG_SHORTPRE: Short preamble was used for this frame
+ * @RX_ENC_FLAG_SHORT_GI: Short guard interval was used
+ * @RX_ENC_FLAG_HT_GF: This frame was received in a HT-greenfield transmission,
+ * if the driver fills this value it should add
+ * %IEEE80211_RADIOTAP_MCS_HAVE_FMT
+ * to @hw.radiotap_mcs_details to advertise that fact.
+ * @RX_ENC_FLAG_LDPC: LDPC was used
+ * @RX_ENC_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
+ * @RX_ENC_FLAG_BF: packet was beamformed
+ */
+enum mac80211_rx_encoding_flags {
+ RX_ENC_FLAG_SHORTPRE = BIT(0),
+ RX_ENC_FLAG_SHORT_GI = BIT(2),
+ RX_ENC_FLAG_HT_GF = BIT(3),
+ RX_ENC_FLAG_STBC_MASK = BIT(4) | BIT(5),
+ RX_ENC_FLAG_LDPC = BIT(6),
+ RX_ENC_FLAG_BF = BIT(7),
+};
+
+#define RX_ENC_FLAG_STBC_SHIFT 4
+
+enum mac80211_rx_encoding {
+ RX_ENC_LEGACY = 0,
+ RX_ENC_HT,
+ RX_ENC_VHT,
+ RX_ENC_HE,
+};
+
+/**
+ * struct ieee80211_rx_status - receive status
+ *
+ * The low-level driver should provide this information (the subset
+ * supported by hardware) to the 802.11 code with each received
+ * frame, in the skb's control buffer (cb).
+ *
+ * @mactime: value in microseconds of the 64-bit Time Synchronization Function
+ * (TSF) timer when the first data symbol (MPDU) arrived at the hardware.
+ * @boottime_ns: CLOCK_BOOTTIME timestamp the frame was received at, this is
+ * needed only for beacons and probe responses that update the scan cache.
+ * @ack_tx_hwtstamp: Hardware timestamp for the ack TX in nanoseconds. Only
+ * needed for Timing measurement and Fine timing measurement action frames.
+ * Only reported by devices that have timestamping enabled.
+ * @device_timestamp: arbitrary timestamp for the device, mac80211 doesn't use
+ * it but can store it and pass it back to the driver for synchronisation
+ * @band: the active band when this frame was received
+ * @freq: frequency the radio was tuned to when receiving this frame, in MHz
+ * This field must be set for management frames, but isn't strictly needed
+ * for data (other) frames - for those it only affects radiotap reporting.
+ * @freq_offset: @freq has a positive offset of 500Khz.
+ * @signal: signal strength when receiving this frame, either in dBm, in dB or
+ * unspecified depending on the hardware capabilities flags
+ * @IEEE80211_HW_SIGNAL_*
+ * @chains: bitmask of receive chains for which separate signal strength
+ * values were filled.
+ * @chain_signal: per-chain signal strength, in dBm (unlike @signal, doesn't
+ * support dB or unspecified units)
+ * @antenna: antenna used
+ * @rate_idx: index of data rate into band's supported rates or MCS index if
+ * HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
+ * @nss: number of streams (VHT and HE only)
+ * @flag: %RX_FLAG_\*
+ * @encoding: &enum mac80211_rx_encoding
+ * @bw: &enum rate_info_bw
+ * @enc_flags: uses bits from &enum mac80211_rx_encoding_flags
+ * @he_ru: HE RU, from &enum nl80211_he_ru_alloc
+ * @he_gi: HE GI, from &enum nl80211_he_gi
+ * @he_dcm: HE DCM value
+ * @rx_flags: internal RX flags for mac80211
+ * @ampdu_reference: A-MPDU reference number, must be a different value for
+ * each A-MPDU but the same for each subframe within one A-MPDU
+ * @ampdu_delimiter_crc: A-MPDU delimiter CRC
+ * @zero_length_psdu_type: radiotap type of the 0-length PSDU
+ * @link_valid: if the link which is identified by @link_id is valid. This flag
+ * is set only when connection is MLO.
+ * @link_id: id of the link used to receive the packet. This is used along with
+ * @link_valid.
+ */
+struct ieee80211_rx_status {
+ u64 mactime;
+ union {
+ u64 boottime_ns;
+ ktime_t ack_tx_hwtstamp;
+ };
+ u32 device_timestamp;
+ u32 ampdu_reference;
+ u32 flag;
+ u16 freq: 13, freq_offset: 1;
+ u8 enc_flags;
+ u8 encoding:2, bw:3, he_ru:3;
+ u8 he_gi:2, he_dcm:1;
+ u8 rate_idx;
+ u8 nss;
+ u8 rx_flags;
+ u8 band;
+ u8 antenna;
+ s8 signal;
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+ u8 ampdu_delimiter_crc;
+ u8 zero_length_psdu_type;
+ u8 link_valid:1, link_id:4;
+};
+
+static inline u32
+ieee80211_rx_status_to_khz(struct ieee80211_rx_status *rx_status)
+{
+ return MHZ_TO_KHZ(rx_status->freq) +
+ (rx_status->freq_offset ? 500 : 0);
+}
+
+/**
+ * struct ieee80211_vendor_radiotap - vendor radiotap data information
+ * @present: presence bitmap for this vendor namespace
+ * (this could be extended in the future if any vendor needs more
+ * bits, the radiotap spec does allow for that)
+ * @align: radiotap vendor namespace alignment. This defines the needed
+ * alignment for the @data field below, not for the vendor namespace
+ * description itself (which has a fixed 2-byte alignment)
+ * Must be a power of two, and be set to at least 1!
+ * @oui: radiotap vendor namespace OUI
+ * @subns: radiotap vendor sub namespace
+ * @len: radiotap vendor sub namespace skip length, if alignment is done
+ * then that's added to this, i.e. this is only the length of the
+ * @data field.
+ * @pad: number of bytes of padding after the @data, this exists so that
+ * the skb data alignment can be preserved even if the data has odd
+ * length
+ * @data: the actual vendor namespace data
+ *
+ * This struct, including the vendor data, goes into the skb->data before
+ * the 802.11 header. It's split up in mac80211 using the align/oui/subns
+ * data.
+ */
+struct ieee80211_vendor_radiotap {
+ u32 present;
+ u8 align;
+ u8 oui[3];
+ u8 subns;
+ u8 pad;
+ u16 len;
+ u8 data[];
+} __packed;
+
+/**
+ * enum ieee80211_conf_flags - configuration flags
+ *
+ * Flags to define PHY configuration options
+ *
+ * @IEEE80211_CONF_MONITOR: there's a monitor interface present -- use this
+ * to determine for example whether to calculate timestamps for packets
+ * or not, do not use instead of filter flags!
+ * @IEEE80211_CONF_PS: Enable 802.11 power save mode (managed mode only).
+ * This is the power save mode defined by IEEE 802.11-2007 section 11.2,
+ * meaning that the hardware still wakes up for beacons, is able to
+ * transmit frames and receive the possible acknowledgment frames.
+ * Not to be confused with hardware specific wakeup/sleep states,
+ * driver is responsible for that. See the section "Powersave support"
+ * for more.
+ * @IEEE80211_CONF_IDLE: The device is running, but idle; if the flag is set
+ * the driver should be prepared to handle configuration requests but
+ * may turn the device off as much as possible. Typically, this flag will
+ * be set when an interface is set UP but not associated or scanning, but
+ * it can also be unset in that case when monitor interfaces are active.
+ * @IEEE80211_CONF_OFFCHANNEL: The device is currently not on its main
+ * operating channel.
+ */
+enum ieee80211_conf_flags {
+ IEEE80211_CONF_MONITOR = (1<<0),
+ IEEE80211_CONF_PS = (1<<1),
+ IEEE80211_CONF_IDLE = (1<<2),
+ IEEE80211_CONF_OFFCHANNEL = (1<<3),
+};
+
+
+/**
+ * enum ieee80211_conf_changed - denotes which configuration changed
+ *
+ * @IEEE80211_CONF_CHANGE_LISTEN_INTERVAL: the listen interval changed
+ * @IEEE80211_CONF_CHANGE_MONITOR: the monitor flag changed
+ * @IEEE80211_CONF_CHANGE_PS: the PS flag or dynamic PS timeout changed
+ * @IEEE80211_CONF_CHANGE_POWER: the TX power changed
+ * @IEEE80211_CONF_CHANGE_CHANNEL: the channel/channel_type changed
+ * @IEEE80211_CONF_CHANGE_RETRY_LIMITS: retry limits changed
+ * @IEEE80211_CONF_CHANGE_IDLE: Idle flag changed
+ * @IEEE80211_CONF_CHANGE_SMPS: Spatial multiplexing powersave mode changed
+ * Note that this is only valid if channel contexts are not used,
+ * otherwise each channel context has the number of chains listed.
+ */
+enum ieee80211_conf_changed {
+ IEEE80211_CONF_CHANGE_SMPS = BIT(1),
+ IEEE80211_CONF_CHANGE_LISTEN_INTERVAL = BIT(2),
+ IEEE80211_CONF_CHANGE_MONITOR = BIT(3),
+ IEEE80211_CONF_CHANGE_PS = BIT(4),
+ IEEE80211_CONF_CHANGE_POWER = BIT(5),
+ IEEE80211_CONF_CHANGE_CHANNEL = BIT(6),
+ IEEE80211_CONF_CHANGE_RETRY_LIMITS = BIT(7),
+ IEEE80211_CONF_CHANGE_IDLE = BIT(8),
+};
+
+/**
+ * enum ieee80211_smps_mode - spatial multiplexing power save mode
+ *
+ * @IEEE80211_SMPS_AUTOMATIC: automatic
+ * @IEEE80211_SMPS_OFF: off
+ * @IEEE80211_SMPS_STATIC: static
+ * @IEEE80211_SMPS_DYNAMIC: dynamic
+ * @IEEE80211_SMPS_NUM_MODES: internal, don't use
+ */
+enum ieee80211_smps_mode {
+ IEEE80211_SMPS_AUTOMATIC,
+ IEEE80211_SMPS_OFF,
+ IEEE80211_SMPS_STATIC,
+ IEEE80211_SMPS_DYNAMIC,
+
+ /* keep last */
+ IEEE80211_SMPS_NUM_MODES,
+};
+
+/**
+ * struct ieee80211_conf - configuration of the device
+ *
+ * This struct indicates how the driver shall configure the hardware.
+ *
+ * @flags: configuration flags defined above
+ *
+ * @listen_interval: listen interval in units of beacon interval
+ * @ps_dtim_period: The DTIM period of the AP we're connected to, for use
+ * in power saving. Power saving will not be enabled until a beacon
+ * has been received and the DTIM period is known.
+ * @dynamic_ps_timeout: The dynamic powersave timeout (in ms), see the
+ * powersave documentation below. This variable is valid only when
+ * the CONF_PS flag is set.
+ *
+ * @power_level: requested transmit power (in dBm), backward compatibility
+ * value only that is set to the minimum of all interfaces
+ *
+ * @chandef: the channel definition to tune to
+ * @radar_enabled: whether radar detection is enabled
+ *
+ * @long_frame_max_tx_count: Maximum number of transmissions for a "long" frame
+ * (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11,
+ * but actually means the number of transmissions not the number of retries
+ * @short_frame_max_tx_count: Maximum number of transmissions for a "short"
+ * frame, called "dot11ShortRetryLimit" in 802.11, but actually means the
+ * number of transmissions not the number of retries
+ *
+ * @smps_mode: spatial multiplexing powersave mode; note that
+ * %IEEE80211_SMPS_STATIC is used when the device is not
+ * configured for an HT channel.
+ * Note that this is only valid if channel contexts are not used,
+ * otherwise each channel context has the number of chains listed.
+ */
+struct ieee80211_conf {
+ u32 flags;
+ int power_level, dynamic_ps_timeout;
+
+ u16 listen_interval;
+ u8 ps_dtim_period;
+
+ u8 long_frame_max_tx_count, short_frame_max_tx_count;
+
+ struct cfg80211_chan_def chandef;
+ bool radar_enabled;
+ enum ieee80211_smps_mode smps_mode;
+};
+
+/**
+ * struct ieee80211_channel_switch - holds the channel switch data
+ *
+ * The information provided in this structure is required for channel switch
+ * operation.
+ *
+ * @timestamp: value in microseconds of the 64-bit Time Synchronization
+ * Function (TSF) timer when the frame containing the channel switch
+ * announcement was received. This is simply the rx.mactime parameter
+ * the driver passed into mac80211.
+ * @device_timestamp: arbitrary timestamp for the device, this is the
+ * rx.device_timestamp parameter the driver passed to mac80211.
+ * @block_tx: Indicates whether transmission must be blocked before the
+ * scheduled channel switch, as indicated by the AP.
+ * @chandef: the new channel to switch to
+ * @count: the number of TBTT's until the channel switch event
+ * @delay: maximum delay between the time the AP transmitted the last beacon in
+ * current channel and the expected time of the first beacon in the new
+ * channel, expressed in TU.
+ */
+struct ieee80211_channel_switch {
+ u64 timestamp;
+ u32 device_timestamp;
+ bool block_tx;
+ struct cfg80211_chan_def chandef;
+ u8 count;
+ u32 delay;
+};
+
+/**
+ * enum ieee80211_vif_flags - virtual interface flags
+ *
+ * @IEEE80211_VIF_BEACON_FILTER: the device performs beacon filtering
+ * on this virtual interface to avoid unnecessary CPU wakeups
+ * @IEEE80211_VIF_SUPPORTS_CQM_RSSI: the device can do connection quality
+ * monitoring on this virtual interface -- i.e. it can monitor
+ * connection quality related parameters, such as the RSSI level and
+ * provide notifications if configured trigger levels are reached.
+ * @IEEE80211_VIF_SUPPORTS_UAPSD: The device can do U-APSD for this
+ * interface. This flag should be set during interface addition,
+ * but may be set/cleared as late as authentication to an AP. It is
+ * only valid for managed/station mode interfaces.
+ * @IEEE80211_VIF_GET_NOA_UPDATE: request to handle NOA attributes
+ * and send P2P_PS notification to the driver if NOA changed, even
+ * this is not pure P2P vif.
+ */
+enum ieee80211_vif_flags {
+ IEEE80211_VIF_BEACON_FILTER = BIT(0),
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI = BIT(1),
+ IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2),
+ IEEE80211_VIF_GET_NOA_UPDATE = BIT(3),
+};
+
+
+/**
+ * enum ieee80211_offload_flags - virtual interface offload flags
+ *
+ * @IEEE80211_OFFLOAD_ENCAP_ENABLED: tx encapsulation offload is enabled
+ * The driver supports sending frames passed as 802.3 frames by mac80211.
+ * It must also support sending 802.11 packets for the same interface.
+ * @IEEE80211_OFFLOAD_ENCAP_4ADDR: support 4-address mode encapsulation offload
+ * @IEEE80211_OFFLOAD_DECAP_ENABLED: rx encapsulation offload is enabled
+ * The driver supports passing received 802.11 frames as 802.3 frames to
+ * mac80211.
+ */
+
+enum ieee80211_offload_flags {
+ IEEE80211_OFFLOAD_ENCAP_ENABLED = BIT(0),
+ IEEE80211_OFFLOAD_ENCAP_4ADDR = BIT(1),
+ IEEE80211_OFFLOAD_DECAP_ENABLED = BIT(2),
+};
+
+/**
+ * struct ieee80211_vif_cfg - interface configuration
+ * @assoc: association status
+ * @ibss_joined: indicates whether this station is part of an IBSS or not
+ * @ibss_creator: indicates if a new IBSS network is being created
+ * @ps: power-save mode (STA only). This flag is NOT affected by
+ * offchannel/dynamic_ps operations.
+ * @aid: association ID number, valid only when @assoc is true
+ * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
+ * may filter ARP queries targeted for other addresses than listed here.
+ * The driver must allow ARP queries targeted for all address listed here
+ * to pass through. An empty list implies no ARP queries need to pass.
+ * @arp_addr_cnt: Number of addresses currently on the list. Note that this
+ * may be larger than %IEEE80211_BSS_ARP_ADDR_LIST_LEN (the arp_addr_list
+ * array size), it's up to the driver what to do in that case.
+ * @ssid: The SSID of the current vif. Valid in AP and IBSS mode.
+ * @ssid_len: Length of SSID given in @ssid.
+ * @s1g: BSS is S1G BSS (affects Association Request format).
+ * @idle: This interface is idle. There's also a global idle flag in the
+ * hardware config which may be more appropriate depending on what
+ * your driver/device needs to do.
+ * @ap_addr: AP MLD address, or BSSID for non-MLO connections
+ * (station mode only)
+ */
+struct ieee80211_vif_cfg {
+ /* association related data */
+ bool assoc, ibss_joined;
+ bool ibss_creator;
+ bool ps;
+ u16 aid;
+
+ __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
+ int arp_addr_cnt;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ size_t ssid_len;
+ bool s1g;
+ bool idle;
+ u8 ap_addr[ETH_ALEN] __aligned(2);
+};
+
+/**
+ * struct ieee80211_vif - per-interface data
+ *
+ * Data in this structure is continually present for driver
+ * use during the life of a virtual interface.
+ *
+ * @type: type of this virtual interface
+ * @cfg: vif configuration, see &struct ieee80211_vif_cfg
+ * @bss_conf: BSS configuration for this interface, either our own
+ * or the BSS we're associated to
+ * @link_conf: in case of MLD, the per-link BSS configuration,
+ * indexed by link ID
+ * @valid_links: bitmap of valid links, or 0 for non-MLO.
+ * @active_links: The bitmap of active links, or 0 for non-MLO.
+ * The driver shouldn't change this directly, but use the
+ * API calls meant for that purpose.
+ * @addr: address of this interface
+ * @p2p: indicates whether this AP or STA interface is a p2p
+ * interface, i.e. a GO or p2p-sta respectively
+ * @driver_flags: flags/capabilities the driver has for this interface,
+ * these need to be set (or cleared) when the interface is added
+ * or, if supported by the driver, the interface type is changed
+ * at runtime, mac80211 will never touch this field
+ * @offload_flags: hardware offload capabilities/flags for this interface.
+ * These are initialized by mac80211 before calling .add_interface,
+ * .change_interface or .update_vif_offload and updated by the driver
+ * within these ops, based on supported features or runtime change
+ * restrictions.
+ * @hw_queue: hardware queue for each AC
+ * @cab_queue: content-after-beacon (DTIM beacon really) queue, AP mode only
+ * @debugfs_dir: debugfs dentry, can be used by drivers to create own per
+ * interface debug files. Note that it will be NULL for the virtual
+ * monitor interface (if that is requested.)
+ * @probe_req_reg: probe requests should be reported to mac80211 for this
+ * interface.
+ * @rx_mcast_action_reg: multicast Action frames should be reported to mac80211
+ * for this interface.
+ * @drv_priv: data area for driver use, will always be aligned to
+ * sizeof(void \*).
+ * @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
+ * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
+ * &enum ieee80211_offload_flags.
+ * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled.
+ */
+struct ieee80211_vif {
+ enum nl80211_iftype type;
+ struct ieee80211_vif_cfg cfg;
+ struct ieee80211_bss_conf bss_conf;
+ struct ieee80211_bss_conf __rcu *link_conf[IEEE80211_MLD_MAX_NUM_LINKS];
+ u16 valid_links, active_links;
+ u8 addr[ETH_ALEN] __aligned(2);
+ bool p2p;
+
+ u8 cab_queue;
+ u8 hw_queue[IEEE80211_NUM_ACS];
+
+ struct ieee80211_txq *txq;
+
+ u32 driver_flags;
+ u32 offload_flags;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *debugfs_dir;
+#endif
+
+ bool probe_req_reg;
+ bool rx_mcast_action_reg;
+
+ struct ieee80211_vif *mbssid_tx_vif;
+
+ /* must be last */
+ u8 drv_priv[] __aligned(sizeof(void *));
+};
+
+#define for_each_vif_active_link(vif, link, link_id) \
+ for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) \
+ if ((!(vif)->active_links || \
+ (vif)->active_links & BIT(link_id)) && \
+ (link = rcu_dereference((vif)->link_conf[link_id])))
+
+static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
+{
+#ifdef CONFIG_MAC80211_MESH
+ return vif->type == NL80211_IFTYPE_MESH_POINT;
+#endif
+ return false;
+}
+
+/**
+ * wdev_to_ieee80211_vif - return a vif struct from a wdev
+ * @wdev: the wdev to get the vif for
+ *
+ * This can be used by mac80211 drivers with direct cfg80211 APIs
+ * (like the vendor commands) that get a wdev.
+ *
+ * Note that this function may return %NULL if the given wdev isn't
+ * associated with a vif that the driver knows about (e.g. monitor
+ * or AP_VLAN interfaces.)
+ */
+struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
+
+/**
+ * ieee80211_vif_to_wdev - return a wdev struct from a vif
+ * @vif: the vif to get the wdev for
+ *
+ * This can be used by mac80211 drivers with direct cfg80211 APIs
+ * (like the vendor commands) that needs to get the wdev for a vif.
+ * This can also be useful to get the netdev associated to a vif.
+ */
+struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
+
+/**
+ * lockdep_vif_mutex_held - for lockdep checks on link poiners
+ * @vif: the interface to check
+ */
+static inline bool lockdep_vif_mutex_held(struct ieee80211_vif *vif)
+{
+ return lockdep_is_held(&ieee80211_vif_to_wdev(vif)->mtx);
+}
+
+#define link_conf_dereference_protected(vif, link_id) \
+ rcu_dereference_protected((vif)->link_conf[link_id], \
+ lockdep_vif_mutex_held(vif))
+
+/**
+ * enum ieee80211_key_flags - key flags
+ *
+ * These flags are used for communication about keys between the driver
+ * and mac80211, with the @flags parameter of &struct ieee80211_key_conf.
+ *
+ * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
+ * driver to indicate that it requires IV generation for this
+ * particular key. Setting this flag does not necessarily mean that SKBs
+ * will have sufficient tailroom for ICV or MIC.
+ * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
+ * the driver for a TKIP key if it requires Michael MIC
+ * generation in software.
+ * @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates
+ * that the key is pairwise rather then a shared key.
+ * @IEEE80211_KEY_FLAG_SW_MGMT_TX: This flag should be set by the driver for a
+ * CCMP/GCMP key if it requires CCMP/GCMP encryption of management frames
+ * (MFP) to be done in software.
+ * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
+ * if space should be prepared for the IV, but the IV
+ * itself should not be generated. Do not set together with
+ * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does
+ * not necessarily mean that SKBs will have sufficient tailroom for ICV or
+ * MIC.
+ * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
+ * management frames. The flag can help drivers that have a hardware
+ * crypto implementation that doesn't deal with management frames
+ * properly by allowing them to not upload the keys to hardware and
+ * fall back to software crypto. Note that this flag deals only with
+ * RX, if your crypto engine can't deal with TX you can also set the
+ * %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
+ * @IEEE80211_KEY_FLAG_GENERATE_IV_MGMT: This flag should be set by the
+ * driver for a CCMP/GCMP key to indicate that is requires IV generation
+ * only for management frames (MFP).
+ * @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the
+ * driver for a key to indicate that sufficient tailroom must always
+ * be reserved for ICV or MIC, even when HW encryption is enabled.
+ * @IEEE80211_KEY_FLAG_PUT_MIC_SPACE: This flag should be set by the driver for
+ * a TKIP key if it only requires MIC space. Do not set together with
+ * @IEEE80211_KEY_FLAG_GENERATE_MMIC on the same key.
+ * @IEEE80211_KEY_FLAG_NO_AUTO_TX: Key needs explicit Tx activation.
+ * @IEEE80211_KEY_FLAG_GENERATE_MMIE: This flag should be set by the driver
+ * for a AES_CMAC key to indicate that it requires sequence number
+ * generation only
+ */
+enum ieee80211_key_flags {
+ IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0),
+ IEEE80211_KEY_FLAG_GENERATE_IV = BIT(1),
+ IEEE80211_KEY_FLAG_GENERATE_MMIC = BIT(2),
+ IEEE80211_KEY_FLAG_PAIRWISE = BIT(3),
+ IEEE80211_KEY_FLAG_SW_MGMT_TX = BIT(4),
+ IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(5),
+ IEEE80211_KEY_FLAG_RX_MGMT = BIT(6),
+ IEEE80211_KEY_FLAG_RESERVE_TAILROOM = BIT(7),
+ IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(8),
+ IEEE80211_KEY_FLAG_NO_AUTO_TX = BIT(9),
+ IEEE80211_KEY_FLAG_GENERATE_MMIE = BIT(10),
+};
+
+/**
+ * struct ieee80211_key_conf - key information
+ *
+ * This key information is given by mac80211 to the driver by
+ * the set_key() callback in &struct ieee80211_ops.
+ *
+ * @hw_key_idx: To be set by the driver, this is the key index the driver
+ * wants to be given when a frame is transmitted and needs to be
+ * encrypted in hardware.
+ * @cipher: The key's cipher suite selector.
+ * @tx_pn: PN used for TX keys, may be used by the driver as well if it
+ * needs to do software PN assignment by itself (e.g. due to TSO)
+ * @flags: key flags, see &enum ieee80211_key_flags.
+ * @keyidx: the key index (0-3)
+ * @keylen: key material length
+ * @key: key material. For ALG_TKIP the key is encoded as a 256-bit (32 byte)
+ * data block:
+ * - Temporal Encryption Key (128 bits)
+ * - Temporal Authenticator Tx MIC Key (64 bits)
+ * - Temporal Authenticator Rx MIC Key (64 bits)
+ * @icv_len: The ICV length for this key type
+ * @iv_len: The IV length for this key type
+ * @link_id: the link ID for MLO, or -1 for non-MLO or pairwise keys
+ */
+struct ieee80211_key_conf {
+ atomic64_t tx_pn;
+ u32 cipher;
+ u8 icv_len;
+ u8 iv_len;
+ u8 hw_key_idx;
+ s8 keyidx;
+ u16 flags;
+ s8 link_id;
+ u8 keylen;
+ u8 key[];
+};
+
+#define IEEE80211_MAX_PN_LEN 16
+
+#define TKIP_PN_TO_IV16(pn) ((u16)(pn & 0xffff))
+#define TKIP_PN_TO_IV32(pn) ((u32)((pn >> 16) & 0xffffffff))
+
+/**
+ * struct ieee80211_key_seq - key sequence counter
+ *
+ * @tkip: TKIP data, containing IV32 and IV16 in host byte order
+ * @ccmp: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @aes_cmac: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @aes_gmac: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @gcmp: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @hw: data for HW-only (e.g. cipher scheme) keys
+ */
+struct ieee80211_key_seq {
+ union {
+ struct {
+ u32 iv32;
+ u16 iv16;
+ } tkip;
+ struct {
+ u8 pn[6];
+ } ccmp;
+ struct {
+ u8 pn[6];
+ } aes_cmac;
+ struct {
+ u8 pn[6];
+ } aes_gmac;
+ struct {
+ u8 pn[6];
+ } gcmp;
+ struct {
+ u8 seq[IEEE80211_MAX_PN_LEN];
+ u8 seq_len;
+ } hw;
+ };
+};
+
+/**
+ * enum set_key_cmd - key command
+ *
+ * Used with the set_key() callback in &struct ieee80211_ops, this
+ * indicates whether a key is being removed or added.
+ *
+ * @SET_KEY: a key is set
+ * @DISABLE_KEY: a key must be disabled
+ */
+enum set_key_cmd {
+ SET_KEY, DISABLE_KEY,
+};
+
+/**
+ * enum ieee80211_sta_state - station state
+ *
+ * @IEEE80211_STA_NOTEXIST: station doesn't exist at all,
+ * this is a special state for add/remove transitions
+ * @IEEE80211_STA_NONE: station exists without special state
+ * @IEEE80211_STA_AUTH: station is authenticated
+ * @IEEE80211_STA_ASSOC: station is associated
+ * @IEEE80211_STA_AUTHORIZED: station is authorized (802.1X)
+ */
+enum ieee80211_sta_state {
+ /* NOTE: These need to be ordered correctly! */
+ IEEE80211_STA_NOTEXIST,
+ IEEE80211_STA_NONE,
+ IEEE80211_STA_AUTH,
+ IEEE80211_STA_ASSOC,
+ IEEE80211_STA_AUTHORIZED,
+};
+
+/**
+ * enum ieee80211_sta_rx_bandwidth - station RX bandwidth
+ * @IEEE80211_STA_RX_BW_20: station can only receive 20 MHz
+ * @IEEE80211_STA_RX_BW_40: station can receive up to 40 MHz
+ * @IEEE80211_STA_RX_BW_80: station can receive up to 80 MHz
+ * @IEEE80211_STA_RX_BW_160: station can receive up to 160 MHz
+ * (including 80+80 MHz)
+ * @IEEE80211_STA_RX_BW_320: station can receive up to 320 MHz
+ *
+ * Implementation note: 20 must be zero to be initialized
+ * correctly, the values must be sorted.
+ */
+enum ieee80211_sta_rx_bandwidth {
+ IEEE80211_STA_RX_BW_20 = 0,
+ IEEE80211_STA_RX_BW_40,
+ IEEE80211_STA_RX_BW_80,
+ IEEE80211_STA_RX_BW_160,
+ IEEE80211_STA_RX_BW_320,
+};
+
+/**
+ * struct ieee80211_sta_rates - station rate selection table
+ *
+ * @rcu_head: RCU head used for freeing the table on update
+ * @rate: transmit rates/flags to be used by default.
+ * Overriding entries per-packet is possible by using cb tx control.
+ */
+struct ieee80211_sta_rates {
+ struct rcu_head rcu_head;
+ struct {
+ s8 idx;
+ u8 count;
+ u8 count_cts;
+ u8 count_rts;
+ u16 flags;
+ } rate[IEEE80211_TX_RATE_TABLE_SIZE];
+};
+
+/**
+ * struct ieee80211_sta_txpwr - station txpower configuration
+ *
+ * Used to configure txpower for station.
+ *
+ * @power: indicates the tx power, in dBm, to be used when sending data frames
+ * to the STA.
+ * @type: In particular if TPC %type is NL80211_TX_POWER_LIMITED then tx power
+ * will be less than or equal to specified from userspace, whereas if TPC
+ * %type is NL80211_TX_POWER_AUTOMATIC then it indicates default tx power.
+ * NL80211_TX_POWER_FIXED is not a valid configuration option for
+ * per peer TPC.
+ */
+struct ieee80211_sta_txpwr {
+ s16 power;
+ enum nl80211_tx_power_setting type;
+};
+
+/**
+ * struct ieee80211_sta_aggregates - info that is aggregated from active links
+ *
+ * Used for any per-link data that needs to be aggregated and updated in the
+ * main &struct ieee80211_sta when updated or the active links change.
+ *
+ * @max_amsdu_len: indicates the maximal length of an A-MSDU in bytes.
+ * This field is always valid for packets with a VHT preamble.
+ * For packets with a HT preamble, additional limits apply:
+ *
+ * * If the skb is transmitted as part of a BA agreement, the
+ * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
+ * * If the skb is not part of a BA agreement, the A-MSDU maximal
+ * size is min(max_amsdu_len, 7935) bytes.
+ *
+ * Both additional HT limits must be enforced by the low level
+ * driver. This is defined by the spec (IEEE 802.11-2012 section
+ * 8.3.2.2 NOTE 2).
+ * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control.
+ * @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID
+ */
+struct ieee80211_sta_aggregates {
+ u16 max_amsdu_len;
+
+ u16 max_rc_amsdu_len;
+ u16 max_tid_amsdu_len[IEEE80211_NUM_TIDS];
+};
+
+/**
+ * struct ieee80211_link_sta - station Link specific info
+ * All link specific info for a STA link for a non MLD STA(single)
+ * or a MLD STA(multiple entries) are stored here.
+ *
+ * @addr: MAC address of the Link STA. For non-MLO STA this is same as the addr
+ * in ieee80211_sta. For MLO Link STA this addr can be same or different
+ * from addr in ieee80211_sta (representing MLD STA addr)
+ * @link_id: the link ID for this link STA (0 for deflink)
+ * @smps_mode: current SMPS mode (off, static or dynamic)
+ * @supp_rates: Bitmap of supported rates
+ * @ht_cap: HT capabilities of this STA; restricted to our own capabilities
+ * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
+ * @he_cap: HE capabilities of this STA
+ * @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities
+ * @eht_cap: EHT capabilities of this STA
+ * @bandwidth: current bandwidth the station can receive with
+ * @rx_nss: in HT/VHT, the maximum number of spatial streams the
+ * station can receive at the moment, changed by operating mode
+ * notifications and capabilities. The value is only valid after
+ * the station moves to associated state.
+ * @txpwr: the station tx power configuration
+ *
+ */
+struct ieee80211_link_sta {
+ u8 addr[ETH_ALEN];
+ u8 link_id;
+ enum ieee80211_smps_mode smps_mode;
+
+ u32 supp_rates[NUM_NL80211_BANDS];
+ struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
+ struct ieee80211_sta_he_cap he_cap;
+ struct ieee80211_he_6ghz_capa he_6ghz_capa;
+ struct ieee80211_sta_eht_cap eht_cap;
+
+ struct ieee80211_sta_aggregates agg;
+
+ u8 rx_nss;
+ enum ieee80211_sta_rx_bandwidth bandwidth;
+ struct ieee80211_sta_txpwr txpwr;
+};
+
+/**
+ * struct ieee80211_sta - station table entry
+ *
+ * A station table entry represents a station we are possibly
+ * communicating with. Since stations are RCU-managed in
+ * mac80211, any ieee80211_sta pointer you get access to must
+ * either be protected by rcu_read_lock() explicitly or implicitly,
+ * or you must take good care to not use such a pointer after a
+ * call to your sta_remove callback that removed it.
+ * This also represents the MLD STA in case of MLO association
+ * and holds pointers to various link STA's
+ *
+ * @addr: MAC address
+ * @aid: AID we assigned to the station if we're an AP
+ * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
+ * that this station is allowed to transmit to us.
+ * Can be modified by driver.
+ * @wme: indicates whether the STA supports QoS/WME (if local devices does,
+ * otherwise always false)
+ * @drv_priv: data area for driver use, will always be aligned to
+ * sizeof(void \*), size is determined in hw information.
+ * @uapsd_queues: bitmap of queues configured for uapsd. Only valid
+ * if wme is supported. The bits order is like in
+ * IEEE80211_WMM_IE_STA_QOSINFO_AC_*.
+ * @max_sp: max Service Period. Only valid if wme is supported.
+ * @rates: rate control selection table
+ * @tdls: indicates whether the STA is a TDLS peer
+ * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
+ * valid if the STA is a TDLS peer in the first place.
+ * @mfp: indicates whether the STA uses management frame protection or not.
+ * @mlo: indicates whether the STA is MLO station.
+ * @max_amsdu_subframes: indicates the maximal number of MSDUs in a single
+ * A-MSDU. Taken from the Extended Capabilities element. 0 means
+ * unlimited.
+ * @cur: currently valid data as aggregated from the active links
+ * For non MLO STA it will point to the deflink data. For MLO STA
+ * ieee80211_sta_recalc_aggregates() must be called to update it.
+ * @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not.
+ * @txq: per-TID data TX queues (if driver uses the TXQ abstraction); note that
+ * the last entry (%IEEE80211_NUM_TIDS) is used for non-data frames
+ * @deflink: This holds the default link STA information, for non MLO STA all link
+ * specific STA information is accessed through @deflink or through
+ * link[0] which points to address of @deflink. For MLO Link STA
+ * the first added link STA will point to deflink.
+ * @link: reference to Link Sta entries. For Non MLO STA, except 1st link,
+ * i.e link[0] all links would be assigned to NULL by default and
+ * would access link information via @deflink or link[0]. For MLO
+ * STA, first link STA being added will point its link pointer to
+ * @deflink address and remaining would be allocated and the address
+ * would be assigned to link[link_id] where link_id is the id assigned
+ * by the AP.
+ * @valid_links: bitmap of valid links, or 0 for non-MLO
+ */
+struct ieee80211_sta {
+ u8 addr[ETH_ALEN];
+ u16 aid;
+ u16 max_rx_aggregation_subframes;
+ bool wme;
+ u8 uapsd_queues;
+ u8 max_sp;
+ struct ieee80211_sta_rates __rcu *rates;
+ bool tdls;
+ bool tdls_initiator;
+ bool mfp;
+ bool mlo;
+ u8 max_amsdu_subframes;
+
+ struct ieee80211_sta_aggregates *cur;
+
+ bool support_p2p_ps;
+
+ struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1];
+
+ u16 valid_links;
+ struct ieee80211_link_sta deflink;
+ struct ieee80211_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+
+ /* must be last */
+ u8 drv_priv[] __aligned(sizeof(void *));
+};
+
+#ifdef CONFIG_LOCKDEP
+bool lockdep_sta_mutex_held(struct ieee80211_sta *pubsta);
+#else
+static inline bool lockdep_sta_mutex_held(struct ieee80211_sta *pubsta)
+{
+ return true;
+}
+#endif
+
+#define link_sta_dereference_protected(sta, link_id) \
+ rcu_dereference_protected((sta)->link[link_id], \
+ lockdep_sta_mutex_held(sta))
+
+#define for_each_sta_active_link(vif, sta, link_sta, link_id) \
+ for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) \
+ if ((!(vif)->active_links || \
+ (vif)->active_links & BIT(link_id)) && \
+ ((link_sta) = link_sta_dereference_protected(sta, link_id)))
+
+/**
+ * enum sta_notify_cmd - sta notify command
+ *
+ * Used with the sta_notify() callback in &struct ieee80211_ops, this
+ * indicates if an associated station made a power state transition.
+ *
+ * @STA_NOTIFY_SLEEP: a station is now sleeping
+ * @STA_NOTIFY_AWAKE: a sleeping station woke up
+ */
+enum sta_notify_cmd {
+ STA_NOTIFY_SLEEP, STA_NOTIFY_AWAKE,
+};
+
+/**
+ * struct ieee80211_tx_control - TX control data
+ *
+ * @sta: station table entry, this sta pointer may be NULL and
+ * it is not allowed to copy the pointer, due to RCU.
+ */
+struct ieee80211_tx_control {
+ struct ieee80211_sta *sta;
+};
+
+/**
+ * struct ieee80211_txq - Software intermediate tx queue
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @sta: station table entry, %NULL for per-vif queue
+ * @tid: the TID for this queue (unused for per-vif queue),
+ * %IEEE80211_NUM_TIDS for non-data (if enabled)
+ * @ac: the AC for this queue
+ * @drv_priv: driver private area, sized by hw->txq_data_size
+ *
+ * The driver can obtain packets from this queue by calling
+ * ieee80211_tx_dequeue().
+ */
+struct ieee80211_txq {
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ u8 tid;
+ u8 ac;
+
+ /* must be last */
+ u8 drv_priv[] __aligned(sizeof(void *));
+};
+
+/**
+ * enum ieee80211_hw_flags - hardware flags
+ *
+ * These flags are used to indicate hardware capabilities to
+ * the stack. Generally, flags here should have their meaning
+ * done in a way that the simplest hardware doesn't need setting
+ * any particular flags. There are some exceptions to this rule,
+ * however, so you are advised to review these flags carefully.
+ *
+ * @IEEE80211_HW_HAS_RATE_CONTROL:
+ * The hardware or firmware includes rate control, and cannot be
+ * controlled by the stack. As such, no rate control algorithm
+ * should be instantiated, and the TX rate reported to userspace
+ * will be taken from the TX status instead of the rate control
+ * algorithm.
+ * Note that this requires that the driver implement a number of
+ * callbacks so it has the correct information, it needs to have
+ * the @set_rts_threshold callback and must look at the BSS config
+ * @use_cts_prot for G/N protection, @use_short_slot for slot
+ * timing in 2.4 GHz and @use_short_preamble for preambles for
+ * CCK frames.
+ *
+ * @IEEE80211_HW_RX_INCLUDES_FCS:
+ * Indicates that received frames passed to the stack include
+ * the FCS at the end.
+ *
+ * @IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING:
+ * Some wireless LAN chipsets buffer broadcast/multicast frames
+ * for power saving stations in the hardware/firmware and others
+ * rely on the host system for such buffering. This option is used
+ * to configure the IEEE 802.11 upper layer to buffer broadcast and
+ * multicast frames when there are power saving stations so that
+ * the driver can fetch them with ieee80211_get_buffered_bc().
+ *
+ * @IEEE80211_HW_SIGNAL_UNSPEC:
+ * Hardware can provide signal values but we don't know its units. We
+ * expect values between 0 and @max_signal.
+ * If possible please provide dB or dBm instead.
+ *
+ * @IEEE80211_HW_SIGNAL_DBM:
+ * Hardware gives signal values in dBm, decibel difference from
+ * one milliwatt. This is the preferred method since it is standardized
+ * between different devices. @max_signal does not need to be set.
+ *
+ * @IEEE80211_HW_SPECTRUM_MGMT:
+ * Hardware supports spectrum management defined in 802.11h
+ * Measurement, Channel Switch, Quieting, TPC
+ *
+ * @IEEE80211_HW_AMPDU_AGGREGATION:
+ * Hardware supports 11n A-MPDU aggregation.
+ *
+ * @IEEE80211_HW_SUPPORTS_PS:
+ * Hardware has power save support (i.e. can go to sleep).
+ *
+ * @IEEE80211_HW_PS_NULLFUNC_STACK:
+ * Hardware requires nullfunc frame handling in stack, implies
+ * stack support for dynamic PS.
+ *
+ * @IEEE80211_HW_SUPPORTS_DYNAMIC_PS:
+ * Hardware has support for dynamic PS.
+ *
+ * @IEEE80211_HW_MFP_CAPABLE:
+ * Hardware supports management frame protection (MFP, IEEE 802.11w).
+ *
+ * @IEEE80211_HW_REPORTS_TX_ACK_STATUS:
+ * Hardware can provide ack status reports of Tx frames to
+ * the stack.
+ *
+ * @IEEE80211_HW_CONNECTION_MONITOR:
+ * The hardware performs its own connection monitoring, including
+ * periodic keep-alives to the AP and probing the AP on beacon loss.
+ *
+ * @IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC:
+ * This device needs to get data from beacon before association (i.e.
+ * dtim_period).
+ *
+ * @IEEE80211_HW_SUPPORTS_PER_STA_GTK: The device's crypto engine supports
+ * per-station GTKs as used by IBSS RSN or during fast transition. If
+ * the device doesn't support per-station GTKs, but can be asked not
+ * to decrypt group addressed frames, then IBSS RSN support is still
+ * possible but software crypto will be used. Advertise the wiphy flag
+ * only in that case.
+ *
+ * @IEEE80211_HW_AP_LINK_PS: When operating in AP mode the device
+ * autonomously manages the PS status of connected stations. When
+ * this flag is set mac80211 will not trigger PS mode for connected
+ * stations based on the PM bit of incoming frames.
+ * Use ieee80211_start_ps()/ieee8021_end_ps() to manually configure
+ * the PS mode of connected stations.
+ *
+ * @IEEE80211_HW_TX_AMPDU_SETUP_IN_HW: The device handles TX A-MPDU session
+ * setup strictly in HW. mac80211 should not attempt to do this in
+ * software.
+ *
+ * @IEEE80211_HW_WANT_MONITOR_VIF: The driver would like to be informed of
+ * a virtual monitor interface when monitor interfaces are the only
+ * active interfaces.
+ *
+ * @IEEE80211_HW_NO_AUTO_VIF: The driver would like for no wlanX to
+ * be created. It is expected user-space will create vifs as
+ * desired (and thus have them named as desired).
+ *
+ * @IEEE80211_HW_SW_CRYPTO_CONTROL: The driver wants to control which of the
+ * crypto algorithms can be done in software - so don't automatically
+ * try to fall back to it if hardware crypto fails, but do so only if
+ * the driver returns 1. This also forces the driver to advertise its
+ * supported cipher suites.
+ *
+ * @IEEE80211_HW_SUPPORT_FAST_XMIT: The driver/hardware supports fast-xmit,
+ * this currently requires only the ability to calculate the duration
+ * for frames.
+ *
+ * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface
+ * queue mapping in order to use different queues (not just one per AC)
+ * for different virtual interfaces. See the doc section on HW queue
+ * control for more details.
+ *
+ * @IEEE80211_HW_SUPPORTS_RC_TABLE: The driver supports using a rate
+ * selection table provided by the rate control algorithm.
+ *
+ * @IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF: Use the P2P Device address for any
+ * P2P Interface. This will be honoured even if more than one interface
+ * is supported.
+ *
+ * @IEEE80211_HW_TIMING_BEACON_ONLY: Use sync timing from beacon frames
+ * only, to allow getting TBTT of a DTIM beacon.
+ *
+ * @IEEE80211_HW_SUPPORTS_HT_CCK_RATES: Hardware supports mixing HT/CCK rates
+ * and can cope with CCK rates in an aggregation session (e.g. by not
+ * using aggregation for such frames.)
+ *
+ * @IEEE80211_HW_CHANCTX_STA_CSA: Support 802.11h based channel-switch (CSA)
+ * for a single active channel while using channel contexts. When support
+ * is not enabled the default action is to disconnect when getting the
+ * CSA frame.
+ *
+ * @IEEE80211_HW_SUPPORTS_CLONED_SKBS: The driver will never modify the payload
+ * or tailroom of TX skbs without copying them first.
+ *
+ * @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
+ * in one command, mac80211 doesn't have to run separate scans per band.
+ *
+ * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth
+ * than then BSS bandwidth for a TDLS link on the base channel.
+ *
+ * @IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU: The driver supports receiving A-MSDUs
+ * within A-MPDU.
+ *
+ * @IEEE80211_HW_BEACON_TX_STATUS: The device/driver provides TX status
+ * for sent beacons.
+ *
+ * @IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR: Hardware (or driver) requires that each
+ * station has a unique address, i.e. each station entry can be identified
+ * by just its MAC address; this prevents, for example, the same station
+ * from connecting to two virtual AP interfaces at the same time.
+ *
+ * @IEEE80211_HW_SUPPORTS_REORDERING_BUFFER: Hardware (or driver) manages the
+ * reordering buffer internally, guaranteeing mac80211 receives frames in
+ * order and does not need to manage its own reorder buffer or BA session
+ * timeout.
+ *
+ * @IEEE80211_HW_USES_RSS: The device uses RSS and thus requires parallel RX,
+ * which implies using per-CPU station statistics.
+ *
+ * @IEEE80211_HW_TX_AMSDU: Hardware (or driver) supports software aggregated
+ * A-MSDU frames. Requires software tx queueing and fast-xmit support.
+ * When not using minstrel/minstrel_ht rate control, the driver must
+ * limit the maximum A-MSDU size based on the current tx rate by setting
+ * max_rc_amsdu_len in struct ieee80211_sta.
+ *
+ * @IEEE80211_HW_TX_FRAG_LIST: Hardware (or driver) supports sending frag_list
+ * skbs, needed for zero-copy software A-MSDU.
+ *
+ * @IEEE80211_HW_REPORTS_LOW_ACK: The driver (or firmware) reports low ack event
+ * by ieee80211_report_low_ack() based on its own algorithm. For such
+ * drivers, mac80211 packet loss mechanism will not be triggered and driver
+ * is completely depending on firmware event for station kickout.
+ *
+ * @IEEE80211_HW_SUPPORTS_TX_FRAG: Hardware does fragmentation by itself.
+ * The stack will not do fragmentation.
+ * The callback for @set_frag_threshold should be set as well.
+ *
+ * @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on
+ * TDLS links.
+ *
+ * @IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP: The driver requires the
+ * mgd_prepare_tx() callback to be called before transmission of a
+ * deauthentication frame in case the association was completed but no
+ * beacon was heard. This is required in multi-channel scenarios, where the
+ * virtual interface might not be given air time for the transmission of
+ * the frame, as it is not synced with the AP/P2P GO yet, and thus the
+ * deauthentication frame might not be transmitted.
+ *
+ * @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't
+ * support QoS NDP for AP probing - that's most likely a driver bug.
+ *
+ * @IEEE80211_HW_BUFF_MMPDU_TXQ: use the TXQ for bufferable MMPDUs, this of
+ * course requires the driver to use TXQs to start with.
+ *
+ * @IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW: (Hardware) rate control supports VHT
+ * extended NSS BW (dot11VHTExtendedNSSBWCapable). This flag will be set if
+ * the selected rate control algorithm sets %RATE_CTRL_CAPA_VHT_EXT_NSS_BW
+ * but if the rate control is built-in then it must be set by the driver.
+ * See also the documentation for that flag.
+ *
+ * @IEEE80211_HW_STA_MMPDU_TXQ: use the extra non-TID per-station TXQ for all
+ * MMPDUs on station interfaces. This of course requires the driver to use
+ * TXQs to start with.
+ *
+ * @IEEE80211_HW_TX_STATUS_NO_AMPDU_LEN: Driver does not report accurate A-MPDU
+ * length in tx status information
+ *
+ * @IEEE80211_HW_SUPPORTS_MULTI_BSSID: Hardware supports multi BSSID
+ *
+ * @IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID: Hardware supports multi BSSID
+ * only for HE APs. Applies if @IEEE80211_HW_SUPPORTS_MULTI_BSSID is set.
+ *
+ * @IEEE80211_HW_AMPDU_KEYBORDER_SUPPORT: The card and driver is only
+ * aggregating MPDUs with the same keyid, allowing mac80211 to keep Tx
+ * A-MPDU sessions active while rekeying with Extended Key ID.
+ *
+ * @IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD: Hardware supports tx encapsulation
+ * offload
+ *
+ * @IEEE80211_HW_SUPPORTS_RX_DECAP_OFFLOAD: Hardware supports rx decapsulation
+ * offload
+ *
+ * @IEEE80211_HW_SUPPORTS_CONC_MON_RX_DECAP: Hardware supports concurrent rx
+ * decapsulation offload and passing raw 802.11 frames for monitor iface.
+ * If this is supported, the driver must pass both 802.3 frames for real
+ * usage and 802.11 frames with %RX_FLAG_ONLY_MONITOR set for monitor to
+ * the stack.
+ *
+ * @IEEE80211_HW_DETECTS_COLOR_COLLISION: HW/driver has support for BSS color
+ * collision detection and doesn't need it in software.
+ *
+ * @IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX: Hardware/driver handles transmitting
+ * multicast frames on all links, mac80211 should not do that.
+ *
+ * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
+ */
+enum ieee80211_hw_flags {
+ IEEE80211_HW_HAS_RATE_CONTROL,
+ IEEE80211_HW_RX_INCLUDES_FCS,
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING,
+ IEEE80211_HW_SIGNAL_UNSPEC,
+ IEEE80211_HW_SIGNAL_DBM,
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC,
+ IEEE80211_HW_SPECTRUM_MGMT,
+ IEEE80211_HW_AMPDU_AGGREGATION,
+ IEEE80211_HW_SUPPORTS_PS,
+ IEEE80211_HW_PS_NULLFUNC_STACK,
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS,
+ IEEE80211_HW_MFP_CAPABLE,
+ IEEE80211_HW_WANT_MONITOR_VIF,
+ IEEE80211_HW_NO_AUTO_VIF,
+ IEEE80211_HW_SW_CRYPTO_CONTROL,
+ IEEE80211_HW_SUPPORT_FAST_XMIT,
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS,
+ IEEE80211_HW_CONNECTION_MONITOR,
+ IEEE80211_HW_QUEUE_CONTROL,
+ IEEE80211_HW_SUPPORTS_PER_STA_GTK,
+ IEEE80211_HW_AP_LINK_PS,
+ IEEE80211_HW_TX_AMPDU_SETUP_IN_HW,
+ IEEE80211_HW_SUPPORTS_RC_TABLE,
+ IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF,
+ IEEE80211_HW_TIMING_BEACON_ONLY,
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES,
+ IEEE80211_HW_CHANCTX_STA_CSA,
+ IEEE80211_HW_SUPPORTS_CLONED_SKBS,
+ IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
+ IEEE80211_HW_TDLS_WIDER_BW,
+ IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU,
+ IEEE80211_HW_BEACON_TX_STATUS,
+ IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR,
+ IEEE80211_HW_SUPPORTS_REORDERING_BUFFER,
+ IEEE80211_HW_USES_RSS,
+ IEEE80211_HW_TX_AMSDU,
+ IEEE80211_HW_TX_FRAG_LIST,
+ IEEE80211_HW_REPORTS_LOW_ACK,
+ IEEE80211_HW_SUPPORTS_TX_FRAG,
+ IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
+ IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP,
+ IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP,
+ IEEE80211_HW_BUFF_MMPDU_TXQ,
+ IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW,
+ IEEE80211_HW_STA_MMPDU_TXQ,
+ IEEE80211_HW_TX_STATUS_NO_AMPDU_LEN,
+ IEEE80211_HW_SUPPORTS_MULTI_BSSID,
+ IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID,
+ IEEE80211_HW_AMPDU_KEYBORDER_SUPPORT,
+ IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD,
+ IEEE80211_HW_SUPPORTS_RX_DECAP_OFFLOAD,
+ IEEE80211_HW_SUPPORTS_CONC_MON_RX_DECAP,
+ IEEE80211_HW_DETECTS_COLOR_COLLISION,
+ IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX,
+
+ /* keep last, obviously */
+ NUM_IEEE80211_HW_FLAGS
+};
+
+/**
+ * struct ieee80211_hw - hardware information and state
+ *
+ * This structure contains the configuration and hardware
+ * information for an 802.11 PHY.
+ *
+ * @wiphy: This points to the &struct wiphy allocated for this
+ * 802.11 PHY. You must fill in the @perm_addr and @dev
+ * members of this structure using SET_IEEE80211_DEV()
+ * and SET_IEEE80211_PERM_ADDR(). Additionally, all supported
+ * bands (with channels, bitrates) are registered here.
+ *
+ * @conf: &struct ieee80211_conf, device configuration, don't use.
+ *
+ * @priv: pointer to private area that was allocated for driver use
+ * along with this structure.
+ *
+ * @flags: hardware flags, see &enum ieee80211_hw_flags.
+ *
+ * @extra_tx_headroom: headroom to reserve in each transmit skb
+ * for use by the driver (e.g. for transmit headers.)
+ *
+ * @extra_beacon_tailroom: tailroom to reserve in each beacon tx skb.
+ * Can be used by drivers to add extra IEs.
+ *
+ * @max_signal: Maximum value for signal (rssi) in RX information, used
+ * only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB
+ *
+ * @max_listen_interval: max listen interval in units of beacon interval
+ * that HW supports
+ *
+ * @queues: number of available hardware transmit queues for
+ * data packets. WMM/QoS requires at least four, these
+ * queues need to have configurable access parameters.
+ *
+ * @rate_control_algorithm: rate control algorithm for this hardware.
+ * If unset (NULL), the default algorithm will be used. Must be
+ * set before calling ieee80211_register_hw().
+ *
+ * @vif_data_size: size (in bytes) of the drv_priv data area
+ * within &struct ieee80211_vif.
+ * @sta_data_size: size (in bytes) of the drv_priv data area
+ * within &struct ieee80211_sta.
+ * @chanctx_data_size: size (in bytes) of the drv_priv data area
+ * within &struct ieee80211_chanctx_conf.
+ * @txq_data_size: size (in bytes) of the drv_priv data area
+ * within @struct ieee80211_txq.
+ *
+ * @max_rates: maximum number of alternate rate retry stages the hw
+ * can handle.
+ * @max_report_rates: maximum number of alternate rate retry stages
+ * the hw can report back.
+ * @max_rate_tries: maximum number of tries for each stage
+ *
+ * @max_rx_aggregation_subframes: maximum buffer size (number of
+ * sub-frames) to be used for A-MPDU block ack receiver
+ * aggregation.
+ * This is only relevant if the device has restrictions on the
+ * number of subframes, if it relies on mac80211 to do reordering
+ * it shouldn't be set.
+ *
+ * @max_tx_aggregation_subframes: maximum number of subframes in an
+ * aggregate an HT/HE device will transmit. In HT AddBA we'll
+ * advertise a constant value of 64 as some older APs crash if
+ * the window size is smaller (an example is LinkSys WRT120N
+ * with FW v1.0.07 build 002 Jun 18 2012).
+ * For AddBA to HE capable peers this value will be used.
+ *
+ * @max_tx_fragments: maximum number of tx buffers per (A)-MSDU, sum
+ * of 1 + skb_shinfo(skb)->nr_frags for each skb in the frag_list.
+ *
+ * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX
+ * (if %IEEE80211_HW_QUEUE_CONTROL is set)
+ *
+ * @radiotap_mcs_details: lists which MCS information can the HW
+ * reports, by default it is set to _MCS, _GI and _BW but doesn't
+ * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_\* values, only
+ * adding _BW is supported today.
+ *
+ * @radiotap_vht_details: lists which VHT MCS information the HW reports,
+ * the default is _GI | _BANDWIDTH.
+ * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values.
+ *
+ * @radiotap_he: HE radiotap validity flags
+ *
+ * @radiotap_timestamp: Information for the radiotap timestamp field; if the
+ * @units_pos member is set to a non-negative value then the timestamp
+ * field will be added and populated from the &struct ieee80211_rx_status
+ * device_timestamp.
+ * @radiotap_timestamp.units_pos: Must be set to a combination of a
+ * IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a
+ * IEEE80211_RADIOTAP_TIMESTAMP_SPOS_* value.
+ * @radiotap_timestamp.accuracy: If non-negative, fills the accuracy in the
+ * radiotap field and the accuracy known flag will be set.
+ *
+ * @netdev_features: netdev features to be set in each netdev created
+ * from this HW. Note that not all features are usable with mac80211,
+ * other features will be rejected during HW registration.
+ *
+ * @uapsd_queues: This bitmap is included in (re)association frame to indicate
+ * for each access category if it is uAPSD trigger-enabled and delivery-
+ * enabled. Use IEEE80211_WMM_IE_STA_QOSINFO_AC_* to set this bitmap.
+ * Each bit corresponds to different AC. Value '1' in specific bit means
+ * that corresponding AC is both trigger- and delivery-enabled. '0' means
+ * neither enabled.
+ *
+ * @uapsd_max_sp_len: maximum number of total buffered frames the WMM AP may
+ * deliver to a WMM STA during any Service Period triggered by the WMM STA.
+ * Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct values.
+ *
+ * @max_nan_de_entries: maximum number of NAN DE functions supported by the
+ * device.
+ *
+ * @tx_sk_pacing_shift: Pacing shift to set on TCP sockets when frames from
+ * them are encountered. The default should typically not be changed,
+ * unless the driver has good reasons for needing more buffers.
+ *
+ * @weight_multiplier: Driver specific airtime weight multiplier used while
+ * refilling deficit of each TXQ.
+ *
+ * @max_mtu: the max mtu could be set.
+ *
+ * @tx_power_levels: a list of power levels supported by the wifi hardware.
+ * The power levels can be specified either as integer or fractions.
+ * The power level at idx 0 shall be the maximum positive power level.
+ *
+ * @max_txpwr_levels_idx: the maximum valid idx of 'tx_power_levels' list.
+ */
+struct ieee80211_hw {
+ struct ieee80211_conf conf;
+ struct wiphy *wiphy;
+ const char *rate_control_algorithm;
+ void *priv;
+ unsigned long flags[BITS_TO_LONGS(NUM_IEEE80211_HW_FLAGS)];
+ unsigned int extra_tx_headroom;
+ unsigned int extra_beacon_tailroom;
+ int vif_data_size;
+ int sta_data_size;
+ int chanctx_data_size;
+ int txq_data_size;
+ u16 queues;
+ u16 max_listen_interval;
+ s8 max_signal;
+ u8 max_rates;
+ u8 max_report_rates;
+ u8 max_rate_tries;
+ u16 max_rx_aggregation_subframes;
+ u16 max_tx_aggregation_subframes;
+ u8 max_tx_fragments;
+ u8 offchannel_tx_hw_queue;
+ u8 radiotap_mcs_details;
+ u16 radiotap_vht_details;
+ struct {
+ int units_pos;
+ s16 accuracy;
+ } radiotap_timestamp;
+ netdev_features_t netdev_features;
+ u8 uapsd_queues;
+ u8 uapsd_max_sp_len;
+ u8 max_nan_de_entries;
+ u8 tx_sk_pacing_shift;
+ u8 weight_multiplier;
+ u32 max_mtu;
+ const s8 *tx_power_levels;
+ u8 max_txpwr_levels_idx;
+};
+
+static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
+ enum ieee80211_hw_flags flg)
+{
+ return test_bit(flg, hw->flags);
+}
+#define ieee80211_hw_check(hw, flg) _ieee80211_hw_check(hw, IEEE80211_HW_##flg)
+
+static inline void _ieee80211_hw_set(struct ieee80211_hw *hw,
+ enum ieee80211_hw_flags flg)
+{
+ return __set_bit(flg, hw->flags);
+}
+#define ieee80211_hw_set(hw, flg) _ieee80211_hw_set(hw, IEEE80211_HW_##flg)
+
+/**
+ * struct ieee80211_scan_request - hw scan request
+ *
+ * @ies: pointers different parts of IEs (in req.ie)
+ * @req: cfg80211 request.
+ */
+struct ieee80211_scan_request {
+ struct ieee80211_scan_ies ies;
+
+ /* Keep last */
+ struct cfg80211_scan_request req;
+};
+
+/**
+ * struct ieee80211_tdls_ch_sw_params - TDLS channel switch parameters
+ *
+ * @sta: peer this TDLS channel-switch request/response came from
+ * @chandef: channel referenced in a TDLS channel-switch request
+ * @action_code: see &enum ieee80211_tdls_actioncode
+ * @status: channel-switch response status
+ * @timestamp: time at which the frame was received
+ * @switch_time: switch-timing parameter received in the frame
+ * @switch_timeout: switch-timing parameter received in the frame
+ * @tmpl_skb: TDLS switch-channel response template
+ * @ch_sw_tm_ie: offset of the channel-switch timing IE inside @tmpl_skb
+ */
+struct ieee80211_tdls_ch_sw_params {
+ struct ieee80211_sta *sta;
+ struct cfg80211_chan_def *chandef;
+ u8 action_code;
+ u32 status;
+ u32 timestamp;
+ u16 switch_time;
+ u16 switch_timeout;
+ struct sk_buff *tmpl_skb;
+ u32 ch_sw_tm_ie;
+};
+
+/**
+ * wiphy_to_ieee80211_hw - return a mac80211 driver hw struct from a wiphy
+ *
+ * @wiphy: the &struct wiphy which we want to query
+ *
+ * mac80211 drivers can use this to get to their respective
+ * &struct ieee80211_hw. Drivers wishing to get to their own private
+ * structure can then access it via hw->priv. Note that mac802111 drivers should
+ * not use wiphy_priv() to try to get their private driver structure as this
+ * is already used internally by mac80211.
+ *
+ * Return: The mac80211 driver hw struct of @wiphy.
+ */
+struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy);
+
+/**
+ * SET_IEEE80211_DEV - set device for 802.11 hardware
+ *
+ * @hw: the &struct ieee80211_hw to set the device for
+ * @dev: the &struct device of this 802.11 device
+ */
+static inline void SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev)
+{
+ set_wiphy_dev(hw->wiphy, dev);
+}
+
+/**
+ * SET_IEEE80211_PERM_ADDR - set the permanent MAC address for 802.11 hardware
+ *
+ * @hw: the &struct ieee80211_hw to set the MAC address for
+ * @addr: the address to set
+ */
+static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, const u8 *addr)
+{
+ memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN);
+}
+
+static inline struct ieee80211_rate *
+ieee80211_get_tx_rate(const struct ieee80211_hw *hw,
+ const struct ieee80211_tx_info *c)
+{
+ if (WARN_ON_ONCE(c->control.rates[0].idx < 0))
+ return NULL;
+ return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[0].idx];
+}
+
+static inline struct ieee80211_rate *
+ieee80211_get_rts_cts_rate(const struct ieee80211_hw *hw,
+ const struct ieee80211_tx_info *c)
+{
+ if (c->control.rts_cts_rate_idx < 0)
+ return NULL;
+ return &hw->wiphy->bands[c->band]->bitrates[c->control.rts_cts_rate_idx];
+}
+
+static inline struct ieee80211_rate *
+ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
+ const struct ieee80211_tx_info *c, int idx)
+{
+ if (c->control.rates[idx + 1].idx < 0)
+ return NULL;
+ return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[idx + 1].idx];
+}
+
+/**
+ * ieee80211_free_txskb - free TX skb
+ * @hw: the hardware
+ * @skb: the skb
+ *
+ * Free a transmit skb. Use this function when some failure
+ * to transmit happened and thus status cannot be reported.
+ */
+void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
+
+/**
+ * DOC: Hardware crypto acceleration
+ *
+ * mac80211 is capable of taking advantage of many hardware
+ * acceleration designs for encryption and decryption operations.
+ *
+ * The set_key() callback in the &struct ieee80211_ops for a given
+ * device is called to enable hardware acceleration of encryption and
+ * decryption. The callback takes a @sta parameter that will be NULL
+ * for default keys or keys used for transmission only, or point to
+ * the station information for the peer for individual keys.
+ * Multiple transmission keys with the same key index may be used when
+ * VLANs are configured for an access point.
+ *
+ * When transmitting, the TX control data will use the @hw_key_idx
+ * selected by the driver by modifying the &struct ieee80211_key_conf
+ * pointed to by the @key parameter to the set_key() function.
+ *
+ * The set_key() call for the %SET_KEY command should return 0 if
+ * the key is now in use, -%EOPNOTSUPP or -%ENOSPC if it couldn't be
+ * added; if you return 0 then hw_key_idx must be assigned to the
+ * hardware key index, you are free to use the full u8 range.
+ *
+ * Note that in the case that the @IEEE80211_HW_SW_CRYPTO_CONTROL flag is
+ * set, mac80211 will not automatically fall back to software crypto if
+ * enabling hardware crypto failed. The set_key() call may also return the
+ * value 1 to permit this specific key/algorithm to be done in software.
+ *
+ * When the cmd is %DISABLE_KEY then it must succeed.
+ *
+ * Note that it is permissible to not decrypt a frame even if a key
+ * for it has been uploaded to hardware, the stack will not make any
+ * decision based on whether a key has been uploaded or not but rather
+ * based on the receive flags.
+ *
+ * The &struct ieee80211_key_conf structure pointed to by the @key
+ * parameter is guaranteed to be valid until another call to set_key()
+ * removes it, but it can only be used as a cookie to differentiate
+ * keys.
+ *
+ * In TKIP some HW need to be provided a phase 1 key, for RX decryption
+ * acceleration (i.e. iwlwifi). Those drivers should provide update_tkip_key
+ * handler.
+ * The update_tkip_key() call updates the driver with the new phase 1 key.
+ * This happens every time the iv16 wraps around (every 65536 packets). The
+ * set_key() call will happen only once for each key (unless the AP did
+ * rekeying), it will not include a valid phase 1 key. The valid phase 1 key is
+ * provided by update_tkip_key only. The trigger that makes mac80211 call this
+ * handler is software decryption with wrap around of iv16.
+ *
+ * The set_default_unicast_key() call updates the default WEP key index
+ * configured to the hardware for WEP encryption type. This is required
+ * for devices that support offload of data packets (e.g. ARP responses).
+ *
+ * Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag
+ * when they are able to replace in-use PTK keys according to the following
+ * requirements:
+ * 1) They do not hand over frames decrypted with the old key to mac80211
+ once the call to set_key() with command %DISABLE_KEY has been completed,
+ 2) either drop or continue to use the old key for any outgoing frames queued
+ at the time of the key deletion (including re-transmits),
+ 3) never send out a frame queued prior to the set_key() %SET_KEY command
+ encrypted with the new key when also needing
+ @IEEE80211_KEY_FLAG_GENERATE_IV and
+ 4) never send out a frame unencrypted when it should be encrypted.
+ Mac80211 will not queue any new frames for a deleted key to the driver.
+ */
+
+/**
+ * DOC: Powersave support
+ *
+ * mac80211 has support for various powersave implementations.
+ *
+ * First, it can support hardware that handles all powersaving by itself,
+ * such hardware should simply set the %IEEE80211_HW_SUPPORTS_PS hardware
+ * flag. In that case, it will be told about the desired powersave mode
+ * with the %IEEE80211_CONF_PS flag depending on the association status.
+ * The hardware must take care of sending nullfunc frames when necessary,
+ * i.e. when entering and leaving powersave mode. The hardware is required
+ * to look at the AID in beacons and signal to the AP that it woke up when
+ * it finds traffic directed to it.
+ *
+ * %IEEE80211_CONF_PS flag enabled means that the powersave mode defined in
+ * IEEE 802.11-2007 section 11.2 is enabled. This is not to be confused
+ * with hardware wakeup and sleep states. Driver is responsible for waking
+ * up the hardware before issuing commands to the hardware and putting it
+ * back to sleep at appropriate times.
+ *
+ * When PS is enabled, hardware needs to wakeup for beacons and receive the
+ * buffered multicast/broadcast frames after the beacon. Also it must be
+ * possible to send frames and receive the acknowledment frame.
+ *
+ * Other hardware designs cannot send nullfunc frames by themselves and also
+ * need software support for parsing the TIM bitmap. This is also supported
+ * by mac80211 by combining the %IEEE80211_HW_SUPPORTS_PS and
+ * %IEEE80211_HW_PS_NULLFUNC_STACK flags. The hardware is of course still
+ * required to pass up beacons. The hardware is still required to handle
+ * waking up for multicast traffic; if it cannot the driver must handle that
+ * as best as it can, mac80211 is too slow to do that.
+ *
+ * Dynamic powersave is an extension to normal powersave in which the
+ * hardware stays awake for a user-specified period of time after sending a
+ * frame so that reply frames need not be buffered and therefore delayed to
+ * the next wakeup. It's compromise of getting good enough latency when
+ * there's data traffic and still saving significantly power in idle
+ * periods.
+ *
+ * Dynamic powersave is simply supported by mac80211 enabling and disabling
+ * PS based on traffic. Driver needs to only set %IEEE80211_HW_SUPPORTS_PS
+ * flag and mac80211 will handle everything automatically. Additionally,
+ * hardware having support for the dynamic PS feature may set the
+ * %IEEE80211_HW_SUPPORTS_DYNAMIC_PS flag to indicate that it can support
+ * dynamic PS mode itself. The driver needs to look at the
+ * @dynamic_ps_timeout hardware configuration value and use it that value
+ * whenever %IEEE80211_CONF_PS is set. In this case mac80211 will disable
+ * dynamic PS feature in stack and will just keep %IEEE80211_CONF_PS
+ * enabled whenever user has enabled powersave.
+ *
+ * Driver informs U-APSD client support by enabling
+ * %IEEE80211_VIF_SUPPORTS_UAPSD flag. The mode is configured through the
+ * uapsd parameter in conf_tx() operation. Hardware needs to send the QoS
+ * Nullfunc frames and stay awake until the service period has ended. To
+ * utilize U-APSD, dynamic powersave is disabled for voip AC and all frames
+ * from that AC are transmitted with powersave enabled.
+ *
+ * Note: U-APSD client mode is not yet supported with
+ * %IEEE80211_HW_PS_NULLFUNC_STACK.
+ */
+
+/**
+ * DOC: Beacon filter support
+ *
+ * Some hardware have beacon filter support to reduce host cpu wakeups
+ * which will reduce system power consumption. It usually works so that
+ * the firmware creates a checksum of the beacon but omits all constantly
+ * changing elements (TSF, TIM etc). Whenever the checksum changes the
+ * beacon is forwarded to the host, otherwise it will be just dropped. That
+ * way the host will only receive beacons where some relevant information
+ * (for example ERP protection or WMM settings) have changed.
+ *
+ * Beacon filter support is advertised with the %IEEE80211_VIF_BEACON_FILTER
+ * interface capability. The driver needs to enable beacon filter support
+ * whenever power save is enabled, that is %IEEE80211_CONF_PS is set. When
+ * power save is enabled, the stack will not check for beacon loss and the
+ * driver needs to notify about loss of beacons with ieee80211_beacon_loss().
+ *
+ * The time (or number of beacons missed) until the firmware notifies the
+ * driver of a beacon loss event (which in turn causes the driver to call
+ * ieee80211_beacon_loss()) should be configurable and will be controlled
+ * by mac80211 and the roaming algorithm in the future.
+ *
+ * Since there may be constantly changing information elements that nothing
+ * in the software stack cares about, we will, in the future, have mac80211
+ * tell the driver which information elements are interesting in the sense
+ * that we want to see changes in them. This will include
+ *
+ * - a list of information element IDs
+ * - a list of OUIs for the vendor information element
+ *
+ * Ideally, the hardware would filter out any beacons without changes in the
+ * requested elements, but if it cannot support that it may, at the expense
+ * of some efficiency, filter out only a subset. For example, if the device
+ * doesn't support checking for OUIs it should pass up all changes in all
+ * vendor information elements.
+ *
+ * Note that change, for the sake of simplification, also includes information
+ * elements appearing or disappearing from the beacon.
+ *
+ * Some hardware supports an "ignore list" instead, just make sure nothing
+ * that was requested is on the ignore list, and include commonly changing
+ * information element IDs in the ignore list, for example 11 (BSS load) and
+ * the various vendor-assigned IEs with unknown contents (128, 129, 133-136,
+ * 149, 150, 155, 156, 173, 176, 178, 179, 219); for forward compatibility
+ * it could also include some currently unused IDs.
+ *
+ *
+ * In addition to these capabilities, hardware should support notifying the
+ * host of changes in the beacon RSSI. This is relevant to implement roaming
+ * when no traffic is flowing (when traffic is flowing we see the RSSI of
+ * the received data packets). This can consist in notifying the host when
+ * the RSSI changes significantly or when it drops below or rises above
+ * configurable thresholds. In the future these thresholds will also be
+ * configured by mac80211 (which gets them from userspace) to implement
+ * them as the roaming algorithm requires.
+ *
+ * If the hardware cannot implement this, the driver should ask it to
+ * periodically pass beacon frames to the host so that software can do the
+ * signal strength threshold checking.
+ */
+
+/**
+ * DOC: Spatial multiplexing power save
+ *
+ * SMPS (Spatial multiplexing power save) is a mechanism to conserve
+ * power in an 802.11n implementation. For details on the mechanism
+ * and rationale, please refer to 802.11 (as amended by 802.11n-2009)
+ * "11.2.3 SM power save".
+ *
+ * The mac80211 implementation is capable of sending action frames
+ * to update the AP about the station's SMPS mode, and will instruct
+ * the driver to enter the specific mode. It will also announce the
+ * requested SMPS mode during the association handshake. Hardware
+ * support for this feature is required, and can be indicated by
+ * hardware flags.
+ *
+ * The default mode will be "automatic", which nl80211/cfg80211
+ * defines to be dynamic SMPS in (regular) powersave, and SMPS
+ * turned off otherwise.
+ *
+ * To support this feature, the driver must set the appropriate
+ * hardware support flags, and handle the SMPS flag to the config()
+ * operation. It will then with this mechanism be instructed to
+ * enter the requested SMPS mode while associated to an HT AP.
+ */
+
+/**
+ * DOC: Frame filtering
+ *
+ * mac80211 requires to see many management frames for proper
+ * operation, and users may want to see many more frames when
+ * in monitor mode. However, for best CPU usage and power consumption,
+ * having as few frames as possible percolate through the stack is
+ * desirable. Hence, the hardware should filter as much as possible.
+ *
+ * To achieve this, mac80211 uses filter flags (see below) to tell
+ * the driver's configure_filter() function which frames should be
+ * passed to mac80211 and which should be filtered out.
+ *
+ * Before configure_filter() is invoked, the prepare_multicast()
+ * callback is invoked with the parameters @mc_count and @mc_list
+ * for the combined multicast address list of all virtual interfaces.
+ * It's use is optional, and it returns a u64 that is passed to
+ * configure_filter(). Additionally, configure_filter() has the
+ * arguments @changed_flags telling which flags were changed and
+ * @total_flags with the new flag states.
+ *
+ * If your device has no multicast address filters your driver will
+ * need to check both the %FIF_ALLMULTI flag and the @mc_count
+ * parameter to see whether multicast frames should be accepted
+ * or dropped.
+ *
+ * All unsupported flags in @total_flags must be cleared.
+ * Hardware does not support a flag if it is incapable of _passing_
+ * the frame to the stack. Otherwise the driver must ignore
+ * the flag, but not clear it.
+ * You must _only_ clear the flag (announce no support for the
+ * flag to mac80211) if you are not able to pass the packet type
+ * to the stack (so the hardware always filters it).
+ * So for example, you should clear @FIF_CONTROL, if your hardware
+ * always filters control frames. If your hardware always passes
+ * control frames to the kernel and is incapable of filtering them,
+ * you do _not_ clear the @FIF_CONTROL flag.
+ * This rule applies to all other FIF flags as well.
+ */
+
+/**
+ * DOC: AP support for powersaving clients
+ *
+ * In order to implement AP and P2P GO modes, mac80211 has support for
+ * client powersaving, both "legacy" PS (PS-Poll/null data) and uAPSD.
+ * There currently is no support for sAPSD.
+ *
+ * There is one assumption that mac80211 makes, namely that a client
+ * will not poll with PS-Poll and trigger with uAPSD at the same time.
+ * Both are supported, and both can be used by the same client, but
+ * they can't be used concurrently by the same client. This simplifies
+ * the driver code.
+ *
+ * The first thing to keep in mind is that there is a flag for complete
+ * driver implementation: %IEEE80211_HW_AP_LINK_PS. If this flag is set,
+ * mac80211 expects the driver to handle most of the state machine for
+ * powersaving clients and will ignore the PM bit in incoming frames.
+ * Drivers then use ieee80211_sta_ps_transition() to inform mac80211 of
+ * stations' powersave transitions. In this mode, mac80211 also doesn't
+ * handle PS-Poll/uAPSD.
+ *
+ * In the mode without %IEEE80211_HW_AP_LINK_PS, mac80211 will check the
+ * PM bit in incoming frames for client powersave transitions. When a
+ * station goes to sleep, we will stop transmitting to it. There is,
+ * however, a race condition: a station might go to sleep while there is
+ * data buffered on hardware queues. If the device has support for this
+ * it will reject frames, and the driver should give the frames back to
+ * mac80211 with the %IEEE80211_TX_STAT_TX_FILTERED flag set which will
+ * cause mac80211 to retry the frame when the station wakes up. The
+ * driver is also notified of powersave transitions by calling its
+ * @sta_notify callback.
+ *
+ * When the station is asleep, it has three choices: it can wake up,
+ * it can PS-Poll, or it can possibly start a uAPSD service period.
+ * Waking up is implemented by simply transmitting all buffered (and
+ * filtered) frames to the station. This is the easiest case. When
+ * the station sends a PS-Poll or a uAPSD trigger frame, mac80211
+ * will inform the driver of this with the @allow_buffered_frames
+ * callback; this callback is optional. mac80211 will then transmit
+ * the frames as usual and set the %IEEE80211_TX_CTL_NO_PS_BUFFER
+ * on each frame. The last frame in the service period (or the only
+ * response to a PS-Poll) also has %IEEE80211_TX_STATUS_EOSP set to
+ * indicate that it ends the service period; as this frame must have
+ * TX status report it also sets %IEEE80211_TX_CTL_REQ_TX_STATUS.
+ * When TX status is reported for this frame, the service period is
+ * marked has having ended and a new one can be started by the peer.
+ *
+ * Additionally, non-bufferable MMPDUs can also be transmitted by
+ * mac80211 with the %IEEE80211_TX_CTL_NO_PS_BUFFER set in them.
+ *
+ * Another race condition can happen on some devices like iwlwifi
+ * when there are frames queued for the station and it wakes up
+ * or polls; the frames that are already queued could end up being
+ * transmitted first instead, causing reordering and/or wrong
+ * processing of the EOSP. The cause is that allowing frames to be
+ * transmitted to a certain station is out-of-band communication to
+ * the device. To allow this problem to be solved, the driver can
+ * call ieee80211_sta_block_awake() if frames are buffered when it
+ * is notified that the station went to sleep. When all these frames
+ * have been filtered (see above), it must call the function again
+ * to indicate that the station is no longer blocked.
+ *
+ * If the driver buffers frames in the driver for aggregation in any
+ * way, it must use the ieee80211_sta_set_buffered() call when it is
+ * notified of the station going to sleep to inform mac80211 of any
+ * TIDs that have frames buffered. Note that when a station wakes up
+ * this information is reset (hence the requirement to call it when
+ * informed of the station going to sleep). Then, when a service
+ * period starts for any reason, @release_buffered_frames is called
+ * with the number of frames to be released and which TIDs they are
+ * to come from. In this case, the driver is responsible for setting
+ * the EOSP (for uAPSD) and MORE_DATA bits in the released frames,
+ * to help the @more_data parameter is passed to tell the driver if
+ * there is more data on other TIDs -- the TIDs to release frames
+ * from are ignored since mac80211 doesn't know how many frames the
+ * buffers for those TIDs contain.
+ *
+ * If the driver also implement GO mode, where absence periods may
+ * shorten service periods (or abort PS-Poll responses), it must
+ * filter those response frames except in the case of frames that
+ * are buffered in the driver -- those must remain buffered to avoid
+ * reordering. Because it is possible that no frames are released
+ * in this case, the driver must call ieee80211_sta_eosp()
+ * to indicate to mac80211 that the service period ended anyway.
+ *
+ * Finally, if frames from multiple TIDs are released from mac80211
+ * but the driver might reorder them, it must clear & set the flags
+ * appropriately (only the last frame may have %IEEE80211_TX_STATUS_EOSP)
+ * and also take care of the EOSP and MORE_DATA bits in the frame.
+ * The driver may also use ieee80211_sta_eosp() in this case.
+ *
+ * Note that if the driver ever buffers frames other than QoS-data
+ * frames, it must take care to never send a non-QoS-data frame as
+ * the last frame in a service period, adding a QoS-nulldata frame
+ * after a non-QoS-data frame if needed.
+ */
+
+/**
+ * DOC: HW queue control
+ *
+ * Before HW queue control was introduced, mac80211 only had a single static
+ * assignment of per-interface AC software queues to hardware queues. This
+ * was problematic for a few reasons:
+ * 1) off-channel transmissions might get stuck behind other frames
+ * 2) multiple virtual interfaces couldn't be handled correctly
+ * 3) after-DTIM frames could get stuck behind other frames
+ *
+ * To solve this, hardware typically uses multiple different queues for all
+ * the different usages, and this needs to be propagated into mac80211 so it
+ * won't have the same problem with the software queues.
+ *
+ * Therefore, mac80211 now offers the %IEEE80211_HW_QUEUE_CONTROL capability
+ * flag that tells it that the driver implements its own queue control. To do
+ * so, the driver will set up the various queues in each &struct ieee80211_vif
+ * and the offchannel queue in &struct ieee80211_hw. In response, mac80211 will
+ * use those queue IDs in the hw_queue field of &struct ieee80211_tx_info and
+ * if necessary will queue the frame on the right software queue that mirrors
+ * the hardware queue.
+ * Additionally, the driver has to then use these HW queue IDs for the queue
+ * management functions (ieee80211_stop_queue() et al.)
+ *
+ * The driver is free to set up the queue mappings as needed, multiple virtual
+ * interfaces may map to the same hardware queues if needed. The setup has to
+ * happen during add_interface or change_interface callbacks. For example, a
+ * driver supporting station+station and station+AP modes might decide to have
+ * 10 hardware queues to handle different scenarios:
+ *
+ * 4 AC HW queues for 1st vif: 0, 1, 2, 3
+ * 4 AC HW queues for 2nd vif: 4, 5, 6, 7
+ * after-DTIM queue for AP: 8
+ * off-channel queue: 9
+ *
+ * It would then set up the hardware like this:
+ * hw.offchannel_tx_hw_queue = 9
+ *
+ * and the first virtual interface that is added as follows:
+ * vif.hw_queue[IEEE80211_AC_VO] = 0
+ * vif.hw_queue[IEEE80211_AC_VI] = 1
+ * vif.hw_queue[IEEE80211_AC_BE] = 2
+ * vif.hw_queue[IEEE80211_AC_BK] = 3
+ * vif.cab_queue = 8 // if AP mode, otherwise %IEEE80211_INVAL_HW_QUEUE
+ * and the second virtual interface with 4-7.
+ *
+ * If queue 6 gets full, for example, mac80211 would only stop the second
+ * virtual interface's BE queue since virtual interface queues are per AC.
+ *
+ * Note that the vif.cab_queue value should be set to %IEEE80211_INVAL_HW_QUEUE
+ * whenever the queue is not used (i.e. the interface is not in AP mode) if the
+ * queue could potentially be shared since mac80211 will look at cab_queue when
+ * a queue is stopped/woken even if the interface is not in AP mode.
+ */
+
+/**
+ * enum ieee80211_filter_flags - hardware filter flags
+ *
+ * These flags determine what the filter in hardware should be
+ * programmed to let through and what should not be passed to the
+ * stack. It is always safe to pass more frames than requested,
+ * but this has negative impact on power consumption.
+ *
+ * @FIF_ALLMULTI: pass all multicast frames, this is used if requested
+ * by the user or if the hardware is not capable of filtering by
+ * multicast address.
+ *
+ * @FIF_FCSFAIL: pass frames with failed FCS (but you need to set the
+ * %RX_FLAG_FAILED_FCS_CRC for them)
+ *
+ * @FIF_PLCPFAIL: pass frames with failed PLCP CRC (but you need to set
+ * the %RX_FLAG_FAILED_PLCP_CRC for them
+ *
+ * @FIF_BCN_PRBRESP_PROMISC: This flag is set during scanning to indicate
+ * to the hardware that it should not filter beacons or probe responses
+ * by BSSID. Filtering them can greatly reduce the amount of processing
+ * mac80211 needs to do and the amount of CPU wakeups, so you should
+ * honour this flag if possible.
+ *
+ * @FIF_CONTROL: pass control frames (except for PS Poll) addressed to this
+ * station
+ *
+ * @FIF_OTHER_BSS: pass frames destined to other BSSes
+ *
+ * @FIF_PSPOLL: pass PS Poll frames
+ *
+ * @FIF_PROBE_REQ: pass probe request frames
+ *
+ * @FIF_MCAST_ACTION: pass multicast Action frames
+ */
+enum ieee80211_filter_flags {
+ FIF_ALLMULTI = 1<<1,
+ FIF_FCSFAIL = 1<<2,
+ FIF_PLCPFAIL = 1<<3,
+ FIF_BCN_PRBRESP_PROMISC = 1<<4,
+ FIF_CONTROL = 1<<5,
+ FIF_OTHER_BSS = 1<<6,
+ FIF_PSPOLL = 1<<7,
+ FIF_PROBE_REQ = 1<<8,
+ FIF_MCAST_ACTION = 1<<9,
+};
+
+/**
+ * enum ieee80211_ampdu_mlme_action - A-MPDU actions
+ *
+ * These flags are used with the ampdu_action() callback in
+ * &struct ieee80211_ops to indicate which action is needed.
+ *
+ * Note that drivers MUST be able to deal with a TX aggregation
+ * session being stopped even before they OK'ed starting it by
+ * calling ieee80211_start_tx_ba_cb_irqsafe, because the peer
+ * might receive the addBA frame and send a delBA right away!
+ *
+ * @IEEE80211_AMPDU_RX_START: start RX aggregation
+ * @IEEE80211_AMPDU_RX_STOP: stop RX aggregation
+ * @IEEE80211_AMPDU_TX_START: start TX aggregation, the driver must either
+ * call ieee80211_start_tx_ba_cb_irqsafe() or
+ * call ieee80211_start_tx_ba_cb_irqsafe() with status
+ * %IEEE80211_AMPDU_TX_START_DELAY_ADDBA to delay addba after
+ * ieee80211_start_tx_ba_cb_irqsafe is called, or just return the special
+ * status %IEEE80211_AMPDU_TX_START_IMMEDIATE.
+ * @IEEE80211_AMPDU_TX_OPERATIONAL: TX aggregation has become operational
+ * @IEEE80211_AMPDU_TX_STOP_CONT: stop TX aggregation but continue transmitting
+ * queued packets, now unaggregated. After all packets are transmitted the
+ * driver has to call ieee80211_stop_tx_ba_cb_irqsafe().
+ * @IEEE80211_AMPDU_TX_STOP_FLUSH: stop TX aggregation and flush all packets,
+ * called when the station is removed. There's no need or reason to call
+ * ieee80211_stop_tx_ba_cb_irqsafe() in this case as mac80211 assumes the
+ * session is gone and removes the station.
+ * @IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: called when TX aggregation is stopped
+ * but the driver hasn't called ieee80211_stop_tx_ba_cb_irqsafe() yet and
+ * now the connection is dropped and the station will be removed. Drivers
+ * should clean up and drop remaining packets when this is called.
+ */
+enum ieee80211_ampdu_mlme_action {
+ IEEE80211_AMPDU_RX_START,
+ IEEE80211_AMPDU_RX_STOP,
+ IEEE80211_AMPDU_TX_START,
+ IEEE80211_AMPDU_TX_STOP_CONT,
+ IEEE80211_AMPDU_TX_STOP_FLUSH,
+ IEEE80211_AMPDU_TX_STOP_FLUSH_CONT,
+ IEEE80211_AMPDU_TX_OPERATIONAL,
+};
+
+#define IEEE80211_AMPDU_TX_START_IMMEDIATE 1
+#define IEEE80211_AMPDU_TX_START_DELAY_ADDBA 2
+
+/**
+ * struct ieee80211_ampdu_params - AMPDU action parameters
+ *
+ * @action: the ampdu action, value from %ieee80211_ampdu_mlme_action.
+ * @sta: peer of this AMPDU session
+ * @tid: tid of the BA session
+ * @ssn: start sequence number of the session. TX/RX_STOP can pass 0. When
+ * action is set to %IEEE80211_AMPDU_RX_START the driver passes back the
+ * actual ssn value used to start the session and writes the value here.
+ * @buf_size: reorder buffer size (number of subframes). Valid only when the
+ * action is set to %IEEE80211_AMPDU_RX_START or
+ * %IEEE80211_AMPDU_TX_OPERATIONAL
+ * @amsdu: indicates the peer's ability to receive A-MSDU within A-MPDU.
+ * valid when the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL
+ * @timeout: BA session timeout. Valid only when the action is set to
+ * %IEEE80211_AMPDU_RX_START
+ */
+struct ieee80211_ampdu_params {
+ enum ieee80211_ampdu_mlme_action action;
+ struct ieee80211_sta *sta;
+ u16 tid;
+ u16 ssn;
+ u16 buf_size;
+ bool amsdu;
+ u16 timeout;
+};
+
+/**
+ * enum ieee80211_frame_release_type - frame release reason
+ * @IEEE80211_FRAME_RELEASE_PSPOLL: frame released for PS-Poll
+ * @IEEE80211_FRAME_RELEASE_UAPSD: frame(s) released due to
+ * frame received on trigger-enabled AC
+ */
+enum ieee80211_frame_release_type {
+ IEEE80211_FRAME_RELEASE_PSPOLL,
+ IEEE80211_FRAME_RELEASE_UAPSD,
+};
+
+/**
+ * enum ieee80211_rate_control_changed - flags to indicate what changed
+ *
+ * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit
+ * to this station changed. The actual bandwidth is in the station
+ * information -- for HT20/40 the IEEE80211_HT_CAP_SUP_WIDTH_20_40
+ * flag changes, for HT and VHT the bandwidth field changes.
+ * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed.
+ * @IEEE80211_RC_SUPP_RATES_CHANGED: The supported rate set of this peer
+ * changed (in IBSS mode) due to discovering more information about
+ * the peer.
+ * @IEEE80211_RC_NSS_CHANGED: N_SS (number of spatial streams) was changed
+ * by the peer
+ */
+enum ieee80211_rate_control_changed {
+ IEEE80211_RC_BW_CHANGED = BIT(0),
+ IEEE80211_RC_SMPS_CHANGED = BIT(1),
+ IEEE80211_RC_SUPP_RATES_CHANGED = BIT(2),
+ IEEE80211_RC_NSS_CHANGED = BIT(3),
+};
+
+/**
+ * enum ieee80211_roc_type - remain on channel type
+ *
+ * With the support for multi channel contexts and multi channel operations,
+ * remain on channel operations might be limited/deferred/aborted by other
+ * flows/operations which have higher priority (and vice versa).
+ * Specifying the ROC type can be used by devices to prioritize the ROC
+ * operations compared to other operations/flows.
+ *
+ * @IEEE80211_ROC_TYPE_NORMAL: There are no special requirements for this ROC.
+ * @IEEE80211_ROC_TYPE_MGMT_TX: The remain on channel request is required
+ * for sending management frames offchannel.
+ */
+enum ieee80211_roc_type {
+ IEEE80211_ROC_TYPE_NORMAL = 0,
+ IEEE80211_ROC_TYPE_MGMT_TX,
+};
+
+/**
+ * enum ieee80211_reconfig_type - reconfig type
+ *
+ * This enum is used by the reconfig_complete() callback to indicate what
+ * reconfiguration type was completed.
+ *
+ * @IEEE80211_RECONFIG_TYPE_RESTART: hw restart type
+ * (also due to resume() callback returning 1)
+ * @IEEE80211_RECONFIG_TYPE_SUSPEND: suspend type (regardless
+ * of wowlan configuration)
+ */
+enum ieee80211_reconfig_type {
+ IEEE80211_RECONFIG_TYPE_RESTART,
+ IEEE80211_RECONFIG_TYPE_SUSPEND,
+};
+
+/**
+ * struct ieee80211_prep_tx_info - prepare TX information
+ * @duration: if non-zero, hint about the required duration,
+ * only used with the mgd_prepare_tx() method.
+ * @subtype: frame subtype (auth, (re)assoc, deauth, disassoc)
+ * @success: whether the frame exchange was successful, only
+ * used with the mgd_complete_tx() method, and then only
+ * valid for auth and (re)assoc.
+ */
+struct ieee80211_prep_tx_info {
+ u16 duration;
+ u16 subtype;
+ u8 success:1;
+};
+
+/**
+ * struct ieee80211_ops - callbacks from mac80211 to the driver
+ *
+ * This structure contains various callbacks that the driver may
+ * handle or, in some cases, must handle, for example to configure
+ * the hardware to a new channel or to transmit a frame.
+ *
+ * @tx: Handler that 802.11 module calls for each transmitted frame.
+ * skb contains the buffer starting from the IEEE 802.11 header.
+ * The low-level driver should send the frame out based on
+ * configuration in the TX control data. This handler should,
+ * preferably, never fail and stop queues appropriately.
+ * Must be atomic.
+ *
+ * @start: Called before the first netdevice attached to the hardware
+ * is enabled. This should turn on the hardware and must turn on
+ * frame reception (for possibly enabled monitor interfaces.)
+ * Returns negative error codes, these may be seen in userspace,
+ * or zero.
+ * When the device is started it should not have a MAC address
+ * to avoid acknowledging frames before a non-monitor device
+ * is added.
+ * Must be implemented and can sleep.
+ *
+ * @stop: Called after last netdevice attached to the hardware
+ * is disabled. This should turn off the hardware (at least
+ * it must turn off frame reception.)
+ * May be called right after add_interface if that rejects
+ * an interface. If you added any work onto the mac80211 workqueue
+ * you should ensure to cancel it on this callback.
+ * Must be implemented and can sleep.
+ *
+ * @suspend: Suspend the device; mac80211 itself will quiesce before and
+ * stop transmitting and doing any other configuration, and then
+ * ask the device to suspend. This is only invoked when WoWLAN is
+ * configured, otherwise the device is deconfigured completely and
+ * reconfigured at resume time.
+ * The driver may also impose special conditions under which it
+ * wants to use the "normal" suspend (deconfigure), say if it only
+ * supports WoWLAN when the device is associated. In this case, it
+ * must return 1 from this function.
+ *
+ * @resume: If WoWLAN was configured, this indicates that mac80211 is
+ * now resuming its operation, after this the device must be fully
+ * functional again. If this returns an error, the only way out is
+ * to also unregister the device. If it returns 1, then mac80211
+ * will also go through the regular complete restart on resume.
+ *
+ * @set_wakeup: Enable or disable wakeup when WoWLAN configuration is
+ * modified. The reason is that device_set_wakeup_enable() is
+ * supposed to be called when the configuration changes, not only
+ * in suspend().
+ *
+ * @add_interface: Called when a netdevice attached to the hardware is
+ * enabled. Because it is not called for monitor mode devices, @start
+ * and @stop must be implemented.
+ * The driver should perform any initialization it needs before
+ * the device can be enabled. The initial configuration for the
+ * interface is given in the conf parameter.
+ * The callback may refuse to add an interface by returning a
+ * negative error code (which will be seen in userspace.)
+ * Must be implemented and can sleep.
+ *
+ * @change_interface: Called when a netdevice changes type. This callback
+ * is optional, but only if it is supported can interface types be
+ * switched while the interface is UP. The callback may sleep.
+ * Note that while an interface is being switched, it will not be
+ * found by the interface iteration callbacks.
+ *
+ * @remove_interface: Notifies a driver that an interface is going down.
+ * The @stop callback is called after this if it is the last interface
+ * and no monitor interfaces are present.
+ * When all interfaces are removed, the MAC address in the hardware
+ * must be cleared so the device no longer acknowledges packets,
+ * the mac_addr member of the conf structure is, however, set to the
+ * MAC address of the device going away.
+ * Hence, this callback must be implemented. It can sleep.
+ *
+ * @config: Handler for configuration requests. IEEE 802.11 code calls this
+ * function to change hardware configuration, e.g., channel.
+ * This function should never fail but returns a negative error code
+ * if it does. The callback can sleep.
+ *
+ * @bss_info_changed: Handler for configuration requests related to BSS
+ * parameters that may vary during BSS's lifespan, and may affect low
+ * level driver (e.g. assoc/disassoc status, erp parameters).
+ * This function should not be used if no BSS has been set, unless
+ * for association indication. The @changed parameter indicates which
+ * of the bss parameters has changed when a call is made. The callback
+ * can sleep.
+ * Note: this callback is called if @vif_cfg_changed or @link_info_changed
+ * are not implemented.
+ *
+ * @vif_cfg_changed: Handler for configuration requests related to interface
+ * (MLD) parameters from &struct ieee80211_vif_cfg that vary during the
+ * lifetime of the interface (e.g. assoc status, IP addresses, etc.)
+ * The @changed parameter indicates which value changed.
+ * The callback can sleep.
+ *
+ * @link_info_changed: Handler for configuration requests related to link
+ * parameters from &struct ieee80211_bss_conf that are related to an
+ * individual link. e.g. legacy/HT/VHT/... rate information.
+ * The @changed parameter indicates which value changed, and the @link_id
+ * parameter indicates the link ID. Note that the @link_id will be 0 for
+ * non-MLO connections.
+ * The callback can sleep.
+ *
+ * @prepare_multicast: Prepare for multicast filter configuration.
+ * This callback is optional, and its return value is passed
+ * to configure_filter(). This callback must be atomic.
+ *
+ * @configure_filter: Configure the device's RX filter.
+ * See the section "Frame filtering" for more information.
+ * This callback must be implemented and can sleep.
+ *
+ * @config_iface_filter: Configure the interface's RX filter.
+ * This callback is optional and is used to configure which frames
+ * should be passed to mac80211. The filter_flags is the combination
+ * of FIF_* flags. The changed_flags is a bit mask that indicates
+ * which flags are changed.
+ * This callback can sleep.
+ *
+ * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit
+ * must be set or cleared for a given STA. Must be atomic.
+ *
+ * @set_key: See the section "Hardware crypto acceleration"
+ * This callback is only called between add_interface and
+ * remove_interface calls, i.e. while the given virtual interface
+ * is enabled.
+ * Returns a negative error code if the key can't be added.
+ * The callback can sleep.
+ *
+ * @update_tkip_key: See the section "Hardware crypto acceleration"
+ * This callback will be called in the context of Rx. Called for drivers
+ * which set IEEE80211_KEY_FLAG_TKIP_REQ_RX_P1_KEY.
+ * The callback must be atomic.
+ *
+ * @set_rekey_data: If the device supports GTK rekeying, for example while the
+ * host is suspended, it can assign this callback to retrieve the data
+ * necessary to do GTK rekeying, this is the KEK, KCK and replay counter.
+ * After rekeying was done it should (for example during resume) notify
+ * userspace of the new replay counter using ieee80211_gtk_rekey_notify().
+ *
+ * @set_default_unicast_key: Set the default (unicast) key index, useful for
+ * WEP when the device sends data packets autonomously, e.g. for ARP
+ * offloading. The index can be 0-3, or -1 for unsetting it.
+ *
+ * @hw_scan: Ask the hardware to service the scan request, no need to start
+ * the scan state machine in stack. The scan must honour the channel
+ * configuration done by the regulatory agent in the wiphy's
+ * registered bands. The hardware (or the driver) needs to make sure
+ * that power save is disabled.
+ * The @req ie/ie_len members are rewritten by mac80211 to contain the
+ * entire IEs after the SSID, so that drivers need not look at these
+ * at all but just send them after the SSID -- mac80211 includes the
+ * (extended) supported rates and HT information (where applicable).
+ * When the scan finishes, ieee80211_scan_completed() must be called;
+ * note that it also must be called when the scan cannot finish due to
+ * any error unless this callback returned a negative error code.
+ * This callback is also allowed to return the special return value 1,
+ * this indicates that hardware scan isn't desirable right now and a
+ * software scan should be done instead. A driver wishing to use this
+ * capability must ensure its (hardware) scan capabilities aren't
+ * advertised as more capable than mac80211's software scan is.
+ * The callback can sleep.
+ *
+ * @cancel_hw_scan: Ask the low-level tp cancel the active hw scan.
+ * The driver should ask the hardware to cancel the scan (if possible),
+ * but the scan will be completed only after the driver will call
+ * ieee80211_scan_completed().
+ * This callback is needed for wowlan, to prevent enqueueing a new
+ * scan_work after the low-level driver was already suspended.
+ * The callback can sleep.
+ *
+ * @sched_scan_start: Ask the hardware to start scanning repeatedly at
+ * specific intervals. The driver must call the
+ * ieee80211_sched_scan_results() function whenever it finds results.
+ * This process will continue until sched_scan_stop is called.
+ *
+ * @sched_scan_stop: Tell the hardware to stop an ongoing scheduled scan.
+ * In this case, ieee80211_sched_scan_stopped() must not be called.
+ *
+ * @sw_scan_start: Notifier function that is called just before a software scan
+ * is started. Can be NULL, if the driver doesn't need this notification.
+ * The mac_addr parameter allows supporting NL80211_SCAN_FLAG_RANDOM_ADDR,
+ * the driver may set the NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR flag if it
+ * can use this parameter. The callback can sleep.
+ *
+ * @sw_scan_complete: Notifier function that is called just after a
+ * software scan finished. Can be NULL, if the driver doesn't need
+ * this notification.
+ * The callback can sleep.
+ *
+ * @get_stats: Return low-level statistics.
+ * Returns zero if statistics are available.
+ * The callback can sleep.
+ *
+ * @get_key_seq: If your device implements encryption in hardware and does
+ * IV/PN assignment then this callback should be provided to read the
+ * IV/PN for the given key from hardware.
+ * The callback must be atomic.
+ *
+ * @set_frag_threshold: Configuration of fragmentation threshold. Assign this
+ * if the device does fragmentation by itself. Note that to prevent the
+ * stack from doing fragmentation IEEE80211_HW_SUPPORTS_TX_FRAG
+ * should be set as well.
+ * The callback can sleep.
+ *
+ * @set_rts_threshold: Configuration of RTS threshold (if device needs it)
+ * The callback can sleep.
+ *
+ * @sta_add: Notifies low level driver about addition of an associated station,
+ * AP, IBSS/WDS/mesh peer etc. This callback can sleep.
+ *
+ * @sta_remove: Notifies low level driver about removal of an associated
+ * station, AP, IBSS/WDS/mesh peer etc. Note that after the callback
+ * returns it isn't safe to use the pointer, not even RCU protected;
+ * no RCU grace period is guaranteed between returning here and freeing
+ * the station. See @sta_pre_rcu_remove if needed.
+ * This callback can sleep.
+ *
+ * @sta_add_debugfs: Drivers can use this callback to add debugfs files
+ * when a station is added to mac80211's station list. This callback
+ * should be within a CONFIG_MAC80211_DEBUGFS conditional. This
+ * callback can sleep.
+ *
+ * @sta_notify: Notifies low level driver about power state transition of an
+ * associated station, AP, IBSS/WDS/mesh peer etc. For a VIF operating
+ * in AP mode, this callback will not be called when the flag
+ * %IEEE80211_HW_AP_LINK_PS is set. Must be atomic.
+ *
+ * @sta_set_txpwr: Configure the station tx power. This callback set the tx
+ * power for the station.
+ * This callback can sleep.
+ *
+ * @sta_state: Notifies low level driver about state transition of a
+ * station (which can be the AP, a client, IBSS/WDS/mesh peer etc.)
+ * This callback is mutually exclusive with @sta_add/@sta_remove.
+ * It must not fail for down transitions but may fail for transitions
+ * up the list of states. Also note that after the callback returns it
+ * isn't safe to use the pointer, not even RCU protected - no RCU grace
+ * period is guaranteed between returning here and freeing the station.
+ * See @sta_pre_rcu_remove if needed.
+ * The callback can sleep.
+ *
+ * @sta_pre_rcu_remove: Notify driver about station removal before RCU
+ * synchronisation. This is useful if a driver needs to have station
+ * pointers protected using RCU, it can then use this call to clear
+ * the pointers instead of waiting for an RCU grace period to elapse
+ * in @sta_state.
+ * The callback can sleep.
+ *
+ * @sta_rc_update: Notifies the driver of changes to the bitrates that can be
+ * used to transmit to the station. The changes are advertised with bits
+ * from &enum ieee80211_rate_control_changed and the values are reflected
+ * in the station data. This callback should only be used when the driver
+ * uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since
+ * otherwise the rate control algorithm is notified directly.
+ * Must be atomic.
+ * @sta_rate_tbl_update: Notifies the driver that the rate table changed. This
+ * is only used if the configured rate control algorithm actually uses
+ * the new rate table API, and is therefore optional. Must be atomic.
+ *
+ * @sta_statistics: Get statistics for this station. For example with beacon
+ * filtering, the statistics kept by mac80211 might not be accurate, so
+ * let the driver pre-fill the statistics. The driver can fill most of
+ * the values (indicating which by setting the filled bitmap), but not
+ * all of them make sense - see the source for which ones are possible.
+ * Statistics that the driver doesn't fill will be filled by mac80211.
+ * The callback can sleep.
+ *
+ * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
+ * bursting) for a hardware TX queue.
+ * Returns a negative error code on failure.
+ * The callback can sleep.
+ *
+ * @get_tsf: Get the current TSF timer value from firmware/hardware. Currently,
+ * this is only used for IBSS mode BSSID merging and debugging. Is not a
+ * required function.
+ * The callback can sleep.
+ *
+ * @set_tsf: Set the TSF timer to the specified value in the firmware/hardware.
+ * Currently, this is only used for IBSS mode debugging. Is not a
+ * required function.
+ * The callback can sleep.
+ *
+ * @offset_tsf: Offset the TSF timer by the specified value in the
+ * firmware/hardware. Preferred to set_tsf as it avoids delay between
+ * calling set_tsf() and hardware getting programmed, which will show up
+ * as TSF delay. Is not a required function.
+ * The callback can sleep.
+ *
+ * @reset_tsf: Reset the TSF timer and allow firmware/hardware to synchronize
+ * with other STAs in the IBSS. This is only used in IBSS mode. This
+ * function is optional if the firmware/hardware takes full care of
+ * TSF synchronization.
+ * The callback can sleep.
+ *
+ * @tx_last_beacon: Determine whether the last IBSS beacon was sent by us.
+ * This is needed only for IBSS mode and the result of this function is
+ * used to determine whether to reply to Probe Requests.
+ * Returns non-zero if this device sent the last beacon.
+ * The callback can sleep.
+ *
+ * @get_survey: Return per-channel survey information
+ *
+ * @rfkill_poll: Poll rfkill hardware state. If you need this, you also
+ * need to set wiphy->rfkill_poll to %true before registration,
+ * and need to call wiphy_rfkill_set_hw_state() in the callback.
+ * The callback can sleep.
+ *
+ * @set_coverage_class: Set slot time for given coverage class as specified
+ * in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout
+ * accordingly; coverage class equals to -1 to enable ACK timeout
+ * estimation algorithm (dynack). To disable dynack set valid value for
+ * coverage class. This callback is not required and may sleep.
+ *
+ * @testmode_cmd: Implement a cfg80211 test mode command. The passed @vif may
+ * be %NULL. The callback can sleep.
+ * @testmode_dump: Implement a cfg80211 test mode dump. The callback can sleep.
+ *
+ * @flush: Flush all pending frames from the hardware queue, making sure
+ * that the hardware queues are empty. The @queues parameter is a bitmap
+ * of queues to flush, which is useful if different virtual interfaces
+ * use different hardware queues; it may also indicate all queues.
+ * If the parameter @drop is set to %true, pending frames may be dropped.
+ * Note that vif can be NULL.
+ * The callback can sleep.
+ *
+ * @channel_switch: Drivers that need (or want) to offload the channel
+ * switch operation for CSAs received from the AP may implement this
+ * callback. They must then call ieee80211_chswitch_done() to indicate
+ * completion of the channel switch.
+ *
+ * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
+ * Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
+ * reject TX/RX mask combinations they cannot support by returning -EINVAL
+ * (also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX).
+ *
+ * @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant).
+ *
+ * @remain_on_channel: Starts an off-channel period on the given channel, must
+ * call back to ieee80211_ready_on_channel() when on that channel. Note
+ * that normal channel traffic is not stopped as this is intended for hw
+ * offload. Frames to transmit on the off-channel channel are transmitted
+ * normally except for the %IEEE80211_TX_CTL_TX_OFFCHAN flag. When the
+ * duration (which will always be non-zero) expires, the driver must call
+ * ieee80211_remain_on_channel_expired().
+ * Note that this callback may be called while the device is in IDLE and
+ * must be accepted in this case.
+ * This callback may sleep.
+ * @cancel_remain_on_channel: Requests that an ongoing off-channel period is
+ * aborted before it expires. This callback may sleep.
+ *
+ * @set_ringparam: Set tx and rx ring sizes.
+ *
+ * @get_ringparam: Get tx and rx ring current and maximum sizes.
+ *
+ * @tx_frames_pending: Check if there is any pending frame in the hardware
+ * queues before entering power save.
+ *
+ * @set_bitrate_mask: Set a mask of rates to be used for rate control selection
+ * when transmitting a frame. Currently only legacy rates are handled.
+ * The callback can sleep.
+ * @event_callback: Notify driver about any event in mac80211. See
+ * &enum ieee80211_event_type for the different types.
+ * The callback must be atomic.
+ *
+ * @release_buffered_frames: Release buffered frames according to the given
+ * parameters. In the case where the driver buffers some frames for
+ * sleeping stations mac80211 will use this callback to tell the driver
+ * to release some frames, either for PS-poll or uAPSD.
+ * Note that if the @more_data parameter is %false the driver must check
+ * if there are more frames on the given TIDs, and if there are more than
+ * the frames being released then it must still set the more-data bit in
+ * the frame. If the @more_data parameter is %true, then of course the
+ * more-data bit must always be set.
+ * The @tids parameter tells the driver which TIDs to release frames
+ * from, for PS-poll it will always have only a single bit set.
+ * In the case this is used for a PS-poll initiated release, the
+ * @num_frames parameter will always be 1 so code can be shared. In
+ * this case the driver must also set %IEEE80211_TX_STATUS_EOSP flag
+ * on the TX status (and must report TX status) so that the PS-poll
+ * period is properly ended. This is used to avoid sending multiple
+ * responses for a retried PS-poll frame.
+ * In the case this is used for uAPSD, the @num_frames parameter may be
+ * bigger than one, but the driver may send fewer frames (it must send
+ * at least one, however). In this case it is also responsible for
+ * setting the EOSP flag in the QoS header of the frames. Also, when the
+ * service period ends, the driver must set %IEEE80211_TX_STATUS_EOSP
+ * on the last frame in the SP. Alternatively, it may call the function
+ * ieee80211_sta_eosp() to inform mac80211 of the end of the SP.
+ * This callback must be atomic.
+ * @allow_buffered_frames: Prepare device to allow the given number of frames
+ * to go out to the given station. The frames will be sent by mac80211
+ * via the usual TX path after this call. The TX information for frames
+ * released will also have the %IEEE80211_TX_CTL_NO_PS_BUFFER flag set
+ * and the last one will also have %IEEE80211_TX_STATUS_EOSP set. In case
+ * frames from multiple TIDs are released and the driver might reorder
+ * them between the TIDs, it must set the %IEEE80211_TX_STATUS_EOSP flag
+ * on the last frame and clear it on all others and also handle the EOSP
+ * bit in the QoS header correctly. Alternatively, it can also call the
+ * ieee80211_sta_eosp() function.
+ * The @tids parameter is a bitmap and tells the driver which TIDs the
+ * frames will be on; it will at most have two bits set.
+ * This callback must be atomic.
+ *
+ * @get_et_sset_count: Ethtool API to get string-set count.
+ *
+ * @get_et_stats: Ethtool API to get a set of u64 stats.
+ *
+ * @get_et_strings: Ethtool API to get a set of strings to describe stats
+ * and perhaps other supported types of ethtool data-sets.
+ *
+ * @mgd_prepare_tx: Prepare for transmitting a management frame for association
+ * before associated. In multi-channel scenarios, a virtual interface is
+ * bound to a channel before it is associated, but as it isn't associated
+ * yet it need not necessarily be given airtime, in particular since any
+ * transmission to a P2P GO needs to be synchronized against the GO's
+ * powersave state. mac80211 will call this function before transmitting a
+ * management frame prior to having successfully associated to allow the
+ * driver to give it channel time for the transmission, to get a response
+ * and to be able to synchronize with the GO.
+ * For drivers that set %IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, mac80211
+ * would also call this function before transmitting a deauthentication
+ * frame in case that no beacon was heard from the AP/P2P GO.
+ * The callback will be called before each transmission and upon return
+ * mac80211 will transmit the frame right away.
+ * Additional information is passed in the &struct ieee80211_prep_tx_info
+ * data. If duration there is greater than zero, mac80211 hints to the
+ * driver the duration for which the operation is requested.
+ * The callback is optional and can (should!) sleep.
+ * @mgd_complete_tx: Notify the driver that the response frame for a previously
+ * transmitted frame announced with @mgd_prepare_tx was received, the data
+ * is filled similarly to @mgd_prepare_tx though the duration is not used.
+ *
+ * @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending
+ * a TDLS discovery-request, we expect a reply to arrive on the AP's
+ * channel. We must stay on the channel (no PSM, scan, etc.), since a TDLS
+ * setup-response is a direct packet not buffered by the AP.
+ * mac80211 will call this function just before the transmission of a TDLS
+ * discovery-request. The recommended period of protection is at least
+ * 2 * (DTIM period).
+ * The callback is optional and can sleep.
+ *
+ * @add_chanctx: Notifies device driver about new channel context creation.
+ * This callback may sleep.
+ * @remove_chanctx: Notifies device driver about channel context destruction.
+ * This callback may sleep.
+ * @change_chanctx: Notifies device driver about channel context changes that
+ * may happen when combining different virtual interfaces on the same
+ * channel context with different settings
+ * This callback may sleep.
+ * @assign_vif_chanctx: Notifies device driver about channel context being bound
+ * to vif. Possible use is for hw queue remapping.
+ * This callback may sleep.
+ * @unassign_vif_chanctx: Notifies device driver about channel context being
+ * unbound from vif.
+ * This callback may sleep.
+ * @switch_vif_chanctx: switch a number of vifs from one chanctx to
+ * another, as specified in the list of
+ * @ieee80211_vif_chanctx_switch passed to the driver, according
+ * to the mode defined in &ieee80211_chanctx_switch_mode.
+ * This callback may sleep.
+ *
+ * @start_ap: Start operation on the AP interface, this is called after all the
+ * information in bss_conf is set and beacon can be retrieved. A channel
+ * context is bound before this is called. Note that if the driver uses
+ * software scan or ROC, this (and @stop_ap) isn't called when the AP is
+ * just "paused" for scanning/ROC, which is indicated by the beacon being
+ * disabled/enabled via @bss_info_changed.
+ * @stop_ap: Stop operation on the AP interface.
+ *
+ * @reconfig_complete: Called after a call to ieee80211_restart_hw() and
+ * during resume, when the reconfiguration has completed.
+ * This can help the driver implement the reconfiguration step (and
+ * indicate mac80211 is ready to receive frames).
+ * This callback may sleep.
+ *
+ * @ipv6_addr_change: IPv6 address assignment on the given interface changed.
+ * Currently, this is only called for managed or P2P client interfaces.
+ * This callback is optional; it must not sleep.
+ *
+ * @channel_switch_beacon: Starts a channel switch to a new channel.
+ * Beacons are modified to include CSA or ECSA IEs before calling this
+ * function. The corresponding count fields in these IEs must be
+ * decremented, and when they reach 1 the driver must call
+ * ieee80211_csa_finish(). Drivers which use ieee80211_beacon_get()
+ * get the csa counter decremented by mac80211, but must check if it is
+ * 1 using ieee80211_beacon_counter_is_complete() after the beacon has been
+ * transmitted and then call ieee80211_csa_finish().
+ * If the CSA count starts as zero or 1, this function will not be called,
+ * since there won't be any time to beacon before the switch anyway.
+ * @pre_channel_switch: This is an optional callback that is called
+ * before a channel switch procedure is started (ie. when a STA
+ * gets a CSA or a userspace initiated channel-switch), allowing
+ * the driver to prepare for the channel switch.
+ * @post_channel_switch: This is an optional callback that is called
+ * after a channel switch procedure is completed, allowing the
+ * driver to go back to a normal configuration.
+ * @abort_channel_switch: This is an optional callback that is called
+ * when channel switch procedure was completed, allowing the
+ * driver to go back to a normal configuration.
+ * @channel_switch_rx_beacon: This is an optional callback that is called
+ * when channel switch procedure is in progress and additional beacon with
+ * CSA IE was received, allowing driver to track changes in count.
+ * @join_ibss: Join an IBSS (on an IBSS interface); this is called after all
+ * information in bss_conf is set up and the beacon can be retrieved. A
+ * channel context is bound before this is called.
+ * @leave_ibss: Leave the IBSS again.
+ *
+ * @get_expected_throughput: extract the expected throughput towards the
+ * specified station. The returned value is expressed in Kbps. It returns 0
+ * if the RC algorithm does not have proper data to provide.
+ *
+ * @get_txpower: get current maximum tx power (in dBm) based on configuration
+ * and hardware limits.
+ *
+ * @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver
+ * is responsible for continually initiating channel-switching operations
+ * and returning to the base channel for communication with the AP. The
+ * driver receives a channel-switch request template and the location of
+ * the switch-timing IE within the template as part of the invocation.
+ * The template is valid only within the call, and the driver can
+ * optionally copy the skb for further re-use.
+ * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both
+ * peers must be on the base channel when the call completes.
+ * @tdls_recv_channel_switch: a TDLS channel-switch related frame (request or
+ * response) has been received from a remote peer. The driver gets
+ * parameters parsed from the incoming frame and may use them to continue
+ * an ongoing channel-switch operation. In addition, a channel-switch
+ * response template is provided, together with the location of the
+ * switch-timing IE within the template. The skb can only be used within
+ * the function call.
+ *
+ * @wake_tx_queue: Called when new packets have been added to the queue.
+ * @sync_rx_queues: Process all pending frames in RSS queues. This is a
+ * synchronization which is needed in case driver has in its RSS queues
+ * pending frames that were received prior to the control path action
+ * currently taken (e.g. disassociation) but are not processed yet.
+ *
+ * @start_nan: join an existing NAN cluster, or create a new one.
+ * @stop_nan: leave the NAN cluster.
+ * @nan_change_conf: change NAN configuration. The data in cfg80211_nan_conf
+ * contains full new configuration and changes specify which parameters
+ * are changed with respect to the last NAN config.
+ * The driver gets both full configuration and the changed parameters since
+ * some devices may need the full configuration while others need only the
+ * changed parameters.
+ * @add_nan_func: Add a NAN function. Returns 0 on success. The data in
+ * cfg80211_nan_func must not be referenced outside the scope of
+ * this call.
+ * @del_nan_func: Remove a NAN function. The driver must call
+ * ieee80211_nan_func_terminated() with
+ * NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST reason code upon removal.
+ * @can_aggregate_in_amsdu: Called in order to determine if HW supports
+ * aggregating two specific frames in the same A-MSDU. The relation
+ * between the skbs should be symmetric and transitive. Note that while
+ * skb is always a real frame, head may or may not be an A-MSDU.
+ * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available.
+ * Statistics should be cumulative, currently no way to reset is provided.
+ *
+ * @start_pmsr: start peer measurement (e.g. FTM) (this call can sleep)
+ * @abort_pmsr: abort peer measurement (this call can sleep)
+ * @set_tid_config: Apply TID specific configurations. This callback may sleep.
+ * @reset_tid_config: Reset TID specific configuration for the peer.
+ * This callback may sleep.
+ * @update_vif_offload: Update virtual interface offload flags
+ * This callback may sleep.
+ * @sta_set_4addr: Called to notify the driver when a station starts/stops using
+ * 4-address mode
+ * @set_sar_specs: Update the SAR (TX power) settings.
+ * @sta_set_decap_offload: Called to notify the driver when a station is allowed
+ * to use rx decapsulation offload
+ * @add_twt_setup: Update hw with TWT agreement parameters received from the peer.
+ * This callback allows the hw to check if requested parameters
+ * are supported and if there is enough room for a new agreement.
+ * The hw is expected to set agreement result in the req_type field of
+ * twt structure.
+ * @twt_teardown_request: Update the hw with TWT teardown request received
+ * from the peer.
+ * @set_radar_background: Configure dedicated offchannel chain available for
+ * radar/CAC detection on some hw. This chain can't be used to transmit
+ * or receive frames and it is bounded to a running wdev.
+ * Background radar/CAC detection allows to avoid the CAC downtime
+ * switching to a different channel during CAC detection on the selected
+ * radar channel.
+ * The caller is expected to set chandef pointer to NULL in order to
+ * disable background CAC/radar detection.
+ * @net_fill_forward_path: Called from .ndo_fill_forward_path in order to
+ * resolve a path for hardware flow offloading
+ * @change_vif_links: Change the valid links on an interface, note that while
+ * removing the old link information is still valid (link_conf pointer),
+ * but may immediately disappear after the function returns. The old or
+ * new links bitmaps may be 0 if going from/to a non-MLO situation.
+ * The @old array contains pointers to the old bss_conf structures
+ * that were already removed, in case they're needed.
+ * This callback can sleep.
+ * @change_sta_links: Change the valid links of a station, similar to
+ * @change_vif_links. This callback can sleep.
+ * Note that a sta can also be inserted or removed with valid links,
+ * i.e. passed to @sta_add/@sta_state with sta->valid_links not zero.
+ * In fact, cannot change from having valid_links and not having them.
+ */
+struct ieee80211_ops {
+ void (*tx)(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+ int (*start)(struct ieee80211_hw *hw);
+ void (*stop)(struct ieee80211_hw *hw);
+#ifdef CONFIG_PM
+ int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+ int (*resume)(struct ieee80211_hw *hw);
+ void (*set_wakeup)(struct ieee80211_hw *hw, bool enabled);
+#endif
+ int (*add_interface)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ int (*change_interface)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype new_type, bool p2p);
+ void (*remove_interface)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ int (*config)(struct ieee80211_hw *hw, u32 changed);
+ void (*bss_info_changed)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed);
+ void (*vif_cfg_changed)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u64 changed);
+ void (*link_info_changed)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed);
+
+ int (*start_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+ void (*stop_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+
+ u64 (*prepare_multicast)(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list);
+ void (*configure_filter)(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast);
+ void (*config_iface_filter)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int filter_flags,
+ unsigned int changed_flags);
+ int (*set_tim)(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ bool set);
+ int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+ void (*update_tkip_key)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *conf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key);
+ void (*set_rekey_data)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data);
+ void (*set_default_unicast_key)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int idx);
+ int (*hw_scan)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req);
+ void (*cancel_hw_scan)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ int (*sched_scan_start)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies);
+ int (*sched_scan_stop)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ void (*sw_scan_start)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr);
+ void (*sw_scan_complete)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ int (*get_stats)(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats);
+ void (*get_key_seq)(struct ieee80211_hw *hw,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq);
+ int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value);
+ int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value);
+ int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+#ifdef CONFIG_MAC80211_DEBUGFS
+ void (*sta_add_debugfs)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir);
+#endif
+ void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd, struct ieee80211_sta *sta);
+ int (*sta_set_txpwr)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int (*sta_state)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state);
+ void (*sta_pre_rcu_remove)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ void (*sta_rc_update)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed);
+ void (*sta_rate_tbl_update)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ void (*sta_statistics)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo);
+ int (*conf_tx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params);
+ u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+ void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 tsf);
+ void (*offset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ s64 offset);
+ void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+ int (*tx_last_beacon)(struct ieee80211_hw *hw);
+
+ /**
+ * @ampdu_action:
+ * Perform a certain A-MPDU action.
+ * The RA/TID combination determines the destination and TID we want
+ * the ampdu action to be performed for. The action is defined through
+ * ieee80211_ampdu_mlme_action.
+ * When the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL the driver
+ * may neither send aggregates containing more subframes than @buf_size
+ * nor send aggregates in a way that lost frames would exceed the
+ * buffer size. If just limiting the aggregate size, this would be
+ * possible with a buf_size of 8:
+ *
+ * - ``TX: 1.....7``
+ * - ``RX: 2....7`` (lost frame #1)
+ * - ``TX: 8..1...``
+ *
+ * which is invalid since #1 was now re-transmitted well past the
+ * buffer size of 8. Correct ways to retransmit #1 would be:
+ *
+ * - ``TX: 1 or``
+ * - ``TX: 18 or``
+ * - ``TX: 81``
+ *
+ * Even ``189`` would be wrong since 1 could be lost again.
+ *
+ * Returns a negative error code on failure. The driver may return
+ * %IEEE80211_AMPDU_TX_START_IMMEDIATE for %IEEE80211_AMPDU_TX_START
+ * if the session can start immediately.
+ *
+ * The callback can sleep.
+ */
+ int (*ampdu_action)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params);
+ int (*get_survey)(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey);
+ void (*rfkill_poll)(struct ieee80211_hw *hw);
+ void (*set_coverage_class)(struct ieee80211_hw *hw, s16 coverage_class);
+#ifdef CONFIG_NL80211_TESTMODE
+ int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+ int (*testmode_dump)(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ void *data, int len);
+#endif
+ void (*flush)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
+ void (*channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *ch_switch);
+ int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
+ int (*get_antenna)(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
+
+ int (*remain_on_channel)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type);
+ int (*cancel_remain_on_channel)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ int (*set_ringparam)(struct ieee80211_hw *hw, u32 tx, u32 rx);
+ void (*get_ringparam)(struct ieee80211_hw *hw,
+ u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max);
+ bool (*tx_frames_pending)(struct ieee80211_hw *hw);
+ int (*set_bitrate_mask)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask);
+ void (*event_callback)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event);
+
+ void (*allow_buffered_frames)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data);
+ void (*release_buffered_frames)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data);
+
+ int (*get_et_sset_count)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset);
+ void (*get_et_stats)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data);
+ void (*get_et_strings)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data);
+
+ void (*mgd_prepare_tx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info);
+ void (*mgd_complete_tx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info);
+
+ void (*mgd_protect_tdls_discover)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+ int (*add_chanctx)(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+ void (*remove_chanctx)(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+ void (*change_chanctx)(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed);
+ int (*assign_vif_chanctx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx);
+ void (*unassign_vif_chanctx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx);
+ int (*switch_vif_chanctx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode);
+
+ void (*reconfig_complete)(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ void (*ipv6_addr_change)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev);
+#endif
+ void (*channel_switch_beacon)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef);
+ int (*pre_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *ch_switch);
+
+ int (*post_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ void (*abort_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ void (*channel_switch_rx_beacon)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *ch_switch);
+
+ int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+ void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+ u32 (*get_expected_throughput)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta);
+ int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int *dbm);
+
+ int (*tdls_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie);
+ void (*tdls_cancel_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ void (*tdls_recv_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_tdls_ch_sw_params *params);
+
+ void (*wake_tx_queue)(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+ void (*sync_rx_queues)(struct ieee80211_hw *hw);
+
+ int (*start_nan)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf);
+ int (*stop_nan)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ int (*nan_change_conf)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf, u32 changes);
+ int (*add_nan_func)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_nan_func *nan_func);
+ void (*del_nan_func)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 instance_id);
+ bool (*can_aggregate_in_amsdu)(struct ieee80211_hw *hw,
+ struct sk_buff *head,
+ struct sk_buff *skb);
+ int (*get_ftm_responder_stats)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_ftm_responder_stats *ftm_stats);
+ int (*start_pmsr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request);
+ void (*abort_pmsr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request);
+ int (*set_tid_config)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_conf);
+ int (*reset_tid_config)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u8 tids);
+ void (*update_vif_offload)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ void (*sta_set_4addr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enabled);
+ int (*set_sar_specs)(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar);
+ void (*sta_set_decap_offload)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enabled);
+ void (*add_twt_setup)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct ieee80211_twt_setup *twt);
+ void (*twt_teardown_request)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u8 flowid);
+ int (*set_radar_background)(struct ieee80211_hw *hw,
+ struct cfg80211_chan_def *chandef);
+ int (*net_fill_forward_path)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct net_device_path_ctx *ctx,
+ struct net_device_path *path);
+ int (*change_vif_links)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]);
+ int (*change_sta_links)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links);
+};
+
+/**
+ * ieee80211_alloc_hw_nm - Allocate a new hardware device
+ *
+ * This must be called once for each hardware device. The returned pointer
+ * must be used to refer to this device when calling other functions.
+ * mac80211 allocates a private data area for the driver pointed to by
+ * @priv in &struct ieee80211_hw, the size of this area is given as
+ * @priv_data_len.
+ *
+ * @priv_data_len: length of private data
+ * @ops: callbacks for this device
+ * @requested_name: Requested name for this device.
+ * NULL is valid value, and means use the default naming (phy%d)
+ *
+ * Return: A pointer to the new hardware device, or %NULL on error.
+ */
+struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ const struct ieee80211_ops *ops,
+ const char *requested_name);
+
+/**
+ * ieee80211_alloc_hw - Allocate a new hardware device
+ *
+ * This must be called once for each hardware device. The returned pointer
+ * must be used to refer to this device when calling other functions.
+ * mac80211 allocates a private data area for the driver pointed to by
+ * @priv in &struct ieee80211_hw, the size of this area is given as
+ * @priv_data_len.
+ *
+ * @priv_data_len: length of private data
+ * @ops: callbacks for this device
+ *
+ * Return: A pointer to the new hardware device, or %NULL on error.
+ */
+static inline
+struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
+ const struct ieee80211_ops *ops)
+{
+ return ieee80211_alloc_hw_nm(priv_data_len, ops, NULL);
+}
+
+/**
+ * ieee80211_register_hw - Register hardware device
+ *
+ * You must call this function before any other functions in
+ * mac80211. Note that before a hardware can be registered, you
+ * need to fill the contained wiphy's information.
+ *
+ * @hw: the device to register as returned by ieee80211_alloc_hw()
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+int ieee80211_register_hw(struct ieee80211_hw *hw);
+
+/**
+ * struct ieee80211_tpt_blink - throughput blink description
+ * @throughput: throughput in Kbit/sec
+ * @blink_time: blink time in milliseconds
+ * (full cycle, ie. one off + one on period)
+ */
+struct ieee80211_tpt_blink {
+ int throughput;
+ int blink_time;
+};
+
+/**
+ * enum ieee80211_tpt_led_trigger_flags - throughput trigger flags
+ * @IEEE80211_TPT_LEDTRIG_FL_RADIO: enable blinking with radio
+ * @IEEE80211_TPT_LEDTRIG_FL_WORK: enable blinking when working
+ * @IEEE80211_TPT_LEDTRIG_FL_CONNECTED: enable blinking when at least one
+ * interface is connected in some way, including being an AP
+ */
+enum ieee80211_tpt_led_trigger_flags {
+ IEEE80211_TPT_LEDTRIG_FL_RADIO = BIT(0),
+ IEEE80211_TPT_LEDTRIG_FL_WORK = BIT(1),
+ IEEE80211_TPT_LEDTRIG_FL_CONNECTED = BIT(2),
+};
+
+#ifdef CONFIG_MAC80211_LEDS
+const char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
+const char *
+__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
+ unsigned int flags,
+ const struct ieee80211_tpt_blink *blink_table,
+ unsigned int blink_table_len);
+#endif
+/**
+ * ieee80211_get_tx_led_name - get name of TX LED
+ *
+ * mac80211 creates a transmit LED trigger for each wireless hardware
+ * that can be used to drive LEDs if your driver registers a LED device.
+ * This function returns the name (or %NULL if not configured for LEDs)
+ * of the trigger so you can automatically link the LED device.
+ *
+ * @hw: the hardware to get the LED trigger name for
+ *
+ * Return: The name of the LED trigger. %NULL if not configured for LEDs.
+ */
+static inline const char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
+{
+#ifdef CONFIG_MAC80211_LEDS
+ return __ieee80211_get_tx_led_name(hw);
+#else
+ return NULL;
+#endif
+}
+
+/**
+ * ieee80211_get_rx_led_name - get name of RX LED
+ *
+ * mac80211 creates a receive LED trigger for each wireless hardware
+ * that can be used to drive LEDs if your driver registers a LED device.
+ * This function returns the name (or %NULL if not configured for LEDs)
+ * of the trigger so you can automatically link the LED device.
+ *
+ * @hw: the hardware to get the LED trigger name for
+ *
+ * Return: The name of the LED trigger. %NULL if not configured for LEDs.
+ */
+static inline const char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
+{
+#ifdef CONFIG_MAC80211_LEDS
+ return __ieee80211_get_rx_led_name(hw);
+#else
+ return NULL;
+#endif
+}
+
+/**
+ * ieee80211_get_assoc_led_name - get name of association LED
+ *
+ * mac80211 creates a association LED trigger for each wireless hardware
+ * that can be used to drive LEDs if your driver registers a LED device.
+ * This function returns the name (or %NULL if not configured for LEDs)
+ * of the trigger so you can automatically link the LED device.
+ *
+ * @hw: the hardware to get the LED trigger name for
+ *
+ * Return: The name of the LED trigger. %NULL if not configured for LEDs.
+ */
+static inline const char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
+{
+#ifdef CONFIG_MAC80211_LEDS
+ return __ieee80211_get_assoc_led_name(hw);
+#else
+ return NULL;
+#endif
+}
+
+/**
+ * ieee80211_get_radio_led_name - get name of radio LED
+ *
+ * mac80211 creates a radio change LED trigger for each wireless hardware
+ * that can be used to drive LEDs if your driver registers a LED device.
+ * This function returns the name (or %NULL if not configured for LEDs)
+ * of the trigger so you can automatically link the LED device.
+ *
+ * @hw: the hardware to get the LED trigger name for
+ *
+ * Return: The name of the LED trigger. %NULL if not configured for LEDs.
+ */
+static inline const char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
+{
+#ifdef CONFIG_MAC80211_LEDS
+ return __ieee80211_get_radio_led_name(hw);
+#else
+ return NULL;
+#endif
+}
+
+/**
+ * ieee80211_create_tpt_led_trigger - create throughput LED trigger
+ * @hw: the hardware to create the trigger for
+ * @flags: trigger flags, see &enum ieee80211_tpt_led_trigger_flags
+ * @blink_table: the blink table -- needs to be ordered by throughput
+ * @blink_table_len: size of the blink table
+ *
+ * Return: %NULL (in case of error, or if no LED triggers are
+ * configured) or the name of the new trigger.
+ *
+ * Note: This function must be called before ieee80211_register_hw().
+ */
+static inline const char *
+ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, unsigned int flags,
+ const struct ieee80211_tpt_blink *blink_table,
+ unsigned int blink_table_len)
+{
+#ifdef CONFIG_MAC80211_LEDS
+ return __ieee80211_create_tpt_led_trigger(hw, flags, blink_table,
+ blink_table_len);
+#else
+ return NULL;
+#endif
+}
+
+/**
+ * ieee80211_unregister_hw - Unregister a hardware device
+ *
+ * This function instructs mac80211 to free allocated resources
+ * and unregister netdevices from the networking subsystem.
+ *
+ * @hw: the hardware to unregister
+ */
+void ieee80211_unregister_hw(struct ieee80211_hw *hw);
+
+/**
+ * ieee80211_free_hw - free hardware descriptor
+ *
+ * This function frees everything that was allocated, including the
+ * private data for the driver. You must call ieee80211_unregister_hw()
+ * before calling this function.
+ *
+ * @hw: the hardware to free
+ */
+void ieee80211_free_hw(struct ieee80211_hw *hw);
+
+/**
+ * ieee80211_restart_hw - restart hardware completely
+ *
+ * Call this function when the hardware was restarted for some reason
+ * (hardware error, ...) and the driver is unable to restore its state
+ * by itself. mac80211 assumes that at this point the driver/hardware
+ * is completely uninitialised and stopped, it starts the process by
+ * calling the ->start() operation. The driver will need to reset all
+ * internal state that it has prior to calling this function.
+ *
+ * @hw: the hardware to restart
+ */
+void ieee80211_restart_hw(struct ieee80211_hw *hw);
+
+/**
+ * ieee80211_rx_list - receive frame and store processed skbs in a list
+ *
+ * Use this function to hand received frames to mac80211. The receive
+ * buffer in @skb must start with an IEEE 802.11 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee80211
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other. Calls to
+ * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
+ * mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ *
+ * This function must be called with BHs disabled and RCU read lock
+ *
+ * @hw: the hardware this frame came in on
+ * @sta: the station the frame was received from, or %NULL
+ * @skb: the buffer to receive, owned by mac80211 after this call
+ * @list: the destination list
+ */
+void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct list_head *list);
+
+/**
+ * ieee80211_rx_napi - receive frame from NAPI context
+ *
+ * Use this function to hand received frames to mac80211. The receive
+ * buffer in @skb must start with an IEEE 802.11 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee80211
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other. Calls to
+ * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
+ * mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ *
+ * This function must be called with BHs disabled.
+ *
+ * @hw: the hardware this frame came in on
+ * @sta: the station the frame was received from, or %NULL
+ * @skb: the buffer to receive, owned by mac80211 after this call
+ * @napi: the NAPI context
+ */
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct napi_struct *napi);
+
+/**
+ * ieee80211_rx - receive frame
+ *
+ * Use this function to hand received frames to mac80211. The receive
+ * buffer in @skb must start with an IEEE 802.11 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee80211
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other. Calls to
+ * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
+ * mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ *
+ * In process context use instead ieee80211_rx_ni().
+ *
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac80211 after this call
+ */
+static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ ieee80211_rx_napi(hw, NULL, skb, NULL);
+}
+
+/**
+ * ieee80211_rx_irqsafe - receive frame
+ *
+ * Like ieee80211_rx() but can be called in IRQ context
+ * (internally defers to a tasklet.)
+ *
+ * Calls to this function, ieee80211_rx() or ieee80211_rx_ni() may not
+ * be mixed for a single hardware.Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ *
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac80211 after this call
+ */
+void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb);
+
+/**
+ * ieee80211_rx_ni - receive frame (in process context)
+ *
+ * Like ieee80211_rx() but can be called in process context
+ * (internally disables bottom halves).
+ *
+ * Calls to this function, ieee80211_rx() and ieee80211_rx_irqsafe() may
+ * not be mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ *
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac80211 after this call
+ */
+static inline void ieee80211_rx_ni(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ local_bh_disable();
+ ieee80211_rx(hw, skb);
+ local_bh_enable();
+}
+
+/**
+ * ieee80211_sta_ps_transition - PS transition for connected sta
+ *
+ * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS
+ * flag set, use this function to inform mac80211 about a connected station
+ * entering/leaving PS mode.
+ *
+ * This function may not be called in IRQ context or with softirqs enabled.
+ *
+ * Calls to this function for a single hardware must be synchronized against
+ * each other.
+ *
+ * @sta: currently connected sta
+ * @start: start or stop PS
+ *
+ * Return: 0 on success. -EINVAL when the requested PS mode is already set.
+ */
+int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start);
+
+/**
+ * ieee80211_sta_ps_transition_ni - PS transition for connected sta
+ * (in process context)
+ *
+ * Like ieee80211_sta_ps_transition() but can be called in process context
+ * (internally disables bottom halves). Concurrent call restriction still
+ * applies.
+ *
+ * @sta: currently connected sta
+ * @start: start or stop PS
+ *
+ * Return: Like ieee80211_sta_ps_transition().
+ */
+static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta,
+ bool start)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = ieee80211_sta_ps_transition(sta, start);
+ local_bh_enable();
+
+ return ret;
+}
+
+/**
+ * ieee80211_sta_pspoll - PS-Poll frame received
+ * @sta: currently connected station
+ *
+ * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS flag set,
+ * use this function to inform mac80211 that a PS-Poll frame from a
+ * connected station was received.
+ * This must be used in conjunction with ieee80211_sta_ps_transition()
+ * and possibly ieee80211_sta_uapsd_trigger(); calls to all three must
+ * be serialized.
+ */
+void ieee80211_sta_pspoll(struct ieee80211_sta *sta);
+
+/**
+ * ieee80211_sta_uapsd_trigger - (potential) U-APSD trigger frame received
+ * @sta: currently connected station
+ * @tid: TID of the received (potential) trigger frame
+ *
+ * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS flag set,
+ * use this function to inform mac80211 that a (potential) trigger frame
+ * from a connected station was received.
+ * This must be used in conjunction with ieee80211_sta_ps_transition()
+ * and possibly ieee80211_sta_pspoll(); calls to all three must be
+ * serialized.
+ * %IEEE80211_NUM_TIDS can be passed as the tid if the tid is unknown.
+ * In this case, mac80211 will not check that this tid maps to an AC
+ * that is trigger enabled and assume that the caller did the proper
+ * checks.
+ */
+void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid);
+
+/*
+ * The TX headroom reserved by mac80211 for its own tx_status functions.
+ * This is enough for the radiotap header.
+ */
+#define IEEE80211_TX_STATUS_HEADROOM ALIGN(14, 4)
+
+/**
+ * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames
+ * @sta: &struct ieee80211_sta pointer for the sleeping station
+ * @tid: the TID that has buffered frames
+ * @buffered: indicates whether or not frames are buffered for this TID
+ *
+ * If a driver buffers frames for a powersave station instead of passing
+ * them back to mac80211 for retransmission, the station may still need
+ * to be told that there are buffered frames via the TIM bit.
+ *
+ * This function informs mac80211 whether or not there are frames that are
+ * buffered in the driver for a given TID; mac80211 can then use this data
+ * to set the TIM bit (NOTE: This may call back into the driver's set_tim
+ * call! Beware of the locking!)
+ *
+ * If all frames are released to the station (due to PS-poll or uAPSD)
+ * then the driver needs to inform mac80211 that there no longer are
+ * frames buffered. However, when the station wakes up mac80211 assumes
+ * that all buffered frames will be transmitted and clears this data,
+ * drivers need to make sure they inform mac80211 about all buffered
+ * frames on the sleep transition (sta_notify() with %STA_NOTIFY_SLEEP).
+ *
+ * Note that technically mac80211 only needs to know this per AC, not per
+ * TID, but since driver buffering will inevitably happen per TID (since
+ * it is related to aggregation) it is easier to make mac80211 map the
+ * TID to the AC as required instead of keeping track in all drivers that
+ * use this API.
+ */
+void ieee80211_sta_set_buffered(struct ieee80211_sta *sta,
+ u8 tid, bool buffered);
+
+/**
+ * ieee80211_get_tx_rates - get the selected transmit rates for a packet
+ *
+ * Call this function in a driver with per-packet rate selection support
+ * to combine the rate info in the packet tx info with the most recent
+ * rate selection table for the station entry.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @sta: the receiver station to which this packet is sent.
+ * @skb: the frame to be transmitted.
+ * @dest: buffer for extracted rate/retry information
+ * @max_rates: maximum number of rates to fetch
+ */
+void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct ieee80211_tx_rate *dest,
+ int max_rates);
+
+/**
+ * ieee80211_sta_set_expected_throughput - set the expected tpt for a station
+ *
+ * Call this function to notify mac80211 about a change in expected throughput
+ * to a station. A driver for a device that does rate control in firmware can
+ * call this function when the expected throughput estimate towards a station
+ * changes. The information is used to tune the CoDel AQM applied to traffic
+ * going towards that station (which can otherwise be too aggressive and cause
+ * slow stations to starve).
+ *
+ * @pubsta: the station to set throughput for.
+ * @thr: the current expected throughput in kbps.
+ */
+void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta,
+ u32 thr);
+
+/**
+ * ieee80211_tx_rate_update - transmit rate update callback
+ *
+ * Drivers should call this functions with a non-NULL pub sta
+ * This function can be used in drivers that does not have provision
+ * in updating the tx rate in data path.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @pubsta: the station to update the tx rate for.
+ * @info: tx status information
+ */
+void ieee80211_tx_rate_update(struct ieee80211_hw *hw,
+ struct ieee80211_sta *pubsta,
+ struct ieee80211_tx_info *info);
+
+/**
+ * ieee80211_tx_status - transmit status callback
+ *
+ * Call this function for all transmitted frames after they have been
+ * transmitted. It is permissible to not call this function for
+ * multicast frames but this can affect statistics.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other. Calls
+ * to this function, ieee80211_tx_status_ni() and ieee80211_tx_status_irqsafe()
+ * may not be mixed for a single hardware. Must not run concurrently with
+ * ieee80211_rx() or ieee80211_rx_ni().
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @skb: the frame that was transmitted, owned by mac80211 after this call
+ */
+void ieee80211_tx_status(struct ieee80211_hw *hw,
+ struct sk_buff *skb);
+
+/**
+ * ieee80211_tx_status_ext - extended transmit status callback
+ *
+ * This function can be used as a replacement for ieee80211_tx_status
+ * in drivers that may want to provide extra information that does not
+ * fit into &struct ieee80211_tx_info.
+ *
+ * Calls to this function for a single hardware must be synchronized
+ * against each other. Calls to this function, ieee80211_tx_status_ni()
+ * and ieee80211_tx_status_irqsafe() may not be mixed for a single hardware.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @status: tx status information
+ */
+void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
+ struct ieee80211_tx_status *status);
+
+/**
+ * ieee80211_tx_status_noskb - transmit status callback without skb
+ *
+ * This function can be used as a replacement for ieee80211_tx_status
+ * in drivers that cannot reliably map tx status information back to
+ * specific skbs.
+ *
+ * Calls to this function for a single hardware must be synchronized
+ * against each other. Calls to this function, ieee80211_tx_status_ni()
+ * and ieee80211_tx_status_irqsafe() may not be mixed for a single hardware.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @sta: the receiver station to which this packet is sent
+ * (NULL for multicast packets)
+ * @info: tx status information
+ */
+static inline void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_status status = {
+ .sta = sta,
+ .info = info,
+ };
+
+ ieee80211_tx_status_ext(hw, &status);
+}
+
+/**
+ * ieee80211_tx_status_ni - transmit status callback (in process context)
+ *
+ * Like ieee80211_tx_status() but can be called in process context.
+ *
+ * Calls to this function, ieee80211_tx_status() and
+ * ieee80211_tx_status_irqsafe() may not be mixed
+ * for a single hardware.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @skb: the frame that was transmitted, owned by mac80211 after this call
+ */
+static inline void ieee80211_tx_status_ni(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ local_bh_disable();
+ ieee80211_tx_status(hw, skb);
+ local_bh_enable();
+}
+
+/**
+ * ieee80211_tx_status_irqsafe - IRQ-safe transmit status callback
+ *
+ * Like ieee80211_tx_status() but can be called in IRQ context
+ * (internally defers to a tasklet.)
+ *
+ * Calls to this function, ieee80211_tx_status() and
+ * ieee80211_tx_status_ni() may not be mixed for a single hardware.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @skb: the frame that was transmitted, owned by mac80211 after this call
+ */
+void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
+ struct sk_buff *skb);
+
+/**
+ * ieee80211_tx_status_8023 - transmit status callback for 802.3 frame format
+ *
+ * Call this function for all transmitted data frames after their transmit
+ * completion. This callback should only be called for data frames which
+ * are using driver's (or hardware's) offload capability of encap/decap
+ * 802.11 frames.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other and all
+ * calls in the same tx status family.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @vif: the interface for which the frame was transmitted
+ * @skb: the frame that was transmitted, owned by mac80211 after this call
+ */
+void ieee80211_tx_status_8023(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb);
+
+/**
+ * ieee80211_report_low_ack - report non-responding station
+ *
+ * When operating in AP-mode, call this function to report a non-responding
+ * connected STA.
+ *
+ * @sta: the non-responding connected sta
+ * @num_packets: number of packets sent to @sta without a response
+ */
+void ieee80211_report_low_ack(struct ieee80211_sta *sta, u32 num_packets);
+
+#define IEEE80211_MAX_CNTDWN_COUNTERS_NUM 2
+
+/**
+ * struct ieee80211_mutable_offsets - mutable beacon offsets
+ * @tim_offset: position of TIM element
+ * @tim_length: size of TIM element
+ * @cntdwn_counter_offs: array of IEEE80211_MAX_CNTDWN_COUNTERS_NUM offsets
+ * to countdown counters. This array can contain zero values which
+ * should be ignored.
+ * @mbssid_off: position of the multiple bssid element
+ */
+struct ieee80211_mutable_offsets {
+ u16 tim_offset;
+ u16 tim_length;
+
+ u16 cntdwn_counter_offs[IEEE80211_MAX_CNTDWN_COUNTERS_NUM];
+ u16 mbssid_off;
+};
+
+/**
+ * ieee80211_beacon_get_template - beacon template generation function
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @offs: &struct ieee80211_mutable_offsets pointer to struct that will
+ * receive the offsets that may be updated by the driver.
+ * @link_id: the link id to which the beacon belongs (or 0 for a non-MLD AP)
+ *
+ * If the driver implements beaconing modes, it must use this function to
+ * obtain the beacon template.
+ *
+ * This function should be used if the beacon frames are generated by the
+ * device, and then the driver must use the returned beacon as the template
+ * The driver or the device are responsible to update the DTIM and, when
+ * applicable, the CSA count.
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: The beacon template. %NULL on error.
+ */
+struct sk_buff *
+ieee80211_beacon_get_template(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_mutable_offsets *offs,
+ unsigned int link_id);
+
+/**
+ * ieee80211_beacon_get_tim - beacon generation function
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @tim_offset: pointer to variable that will receive the TIM IE offset.
+ * Set to 0 if invalid (in non-AP modes).
+ * @tim_length: pointer to variable that will receive the TIM IE length,
+ * (including the ID and length bytes!).
+ * Set to 0 if invalid (in non-AP modes).
+ * @link_id: the link id to which the beacon belongs (or 0 for a non-MLD AP)
+ *
+ * If the driver implements beaconing modes, it must use this function to
+ * obtain the beacon frame.
+ *
+ * If the beacon frames are generated by the host system (i.e., not in
+ * hardware/firmware), the driver uses this function to get each beacon
+ * frame from mac80211 -- it is responsible for calling this function exactly
+ * once before the beacon is needed (e.g. based on hardware interrupt).
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: The beacon template. %NULL on error.
+ */
+struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 *tim_offset, u16 *tim_length,
+ unsigned int link_id);
+
+/**
+ * ieee80211_beacon_get - beacon generation function
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: the link id to which the beacon belongs (or 0 for a non-MLD AP)
+ *
+ * See ieee80211_beacon_get_tim().
+ *
+ * Return: See ieee80211_beacon_get_tim().
+ */
+static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id)
+{
+ return ieee80211_beacon_get_tim(hw, vif, NULL, NULL, link_id);
+}
+
+/**
+ * ieee80211_beacon_update_cntdwn - request mac80211 to decrement the beacon countdown
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * The beacon counter should be updated after each beacon transmission.
+ * This function is called implicitly when
+ * ieee80211_beacon_get/ieee80211_beacon_get_tim are called, however if the
+ * beacon frames are generated by the device, the driver should call this
+ * function after each beacon transmission to sync mac80211's beacon countdown.
+ *
+ * Return: new countdown value
+ */
+u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_beacon_set_cntdwn - request mac80211 to set beacon countdown
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @counter: the new value for the counter
+ *
+ * The beacon countdown can be changed by the device, this API should be
+ * used by the device driver to update csa counter in mac80211.
+ *
+ * It should never be used together with ieee80211_beacon_update_cntdwn(),
+ * as it will cause a race condition around the counter value.
+ */
+void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter);
+
+/**
+ * ieee80211_csa_finish - notify mac80211 about channel switch
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * After a channel switch announcement was scheduled and the counter in this
+ * announcement hits 1, this function must be called by the driver to
+ * notify mac80211 that the channel can be changed.
+ */
+void ieee80211_csa_finish(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_beacon_cntdwn_is_complete - find out if countdown reached 1
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * This function returns whether the countdown reached zero.
+ */
+bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_color_change_finish - notify mac80211 about color change
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * After a color change announcement was scheduled and the counter in this
+ * announcement hits 1, this function must be called by the driver to
+ * notify mac80211 that the color can be changed
+ */
+void ieee80211_color_change_finish(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_proberesp_get - retrieve a Probe Response template
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Creates a Probe Response template which can, for example, be uploaded to
+ * hardware. The destination address should be set by the caller.
+ *
+ * Can only be called in AP mode.
+ *
+ * Return: The Probe Response template. %NULL on error.
+ */
+struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_pspoll_get - retrieve a PS Poll template
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Creates a PS Poll a template which can, for example, uploaded to
+ * hardware. The template must be updated after association so that correct
+ * AID, BSSID and MAC address is used.
+ *
+ * Note: Caller (or hardware) is responsible for setting the
+ * &IEEE80211_FCTL_PM bit.
+ *
+ * Return: The PS Poll template. %NULL on error.
+ */
+struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_nullfunc_get - retrieve a nullfunc template
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: If the vif is an MLD, get a frame with the link addresses
+ * for the given link ID. For a link_id < 0 you get a frame with
+ * MLD addresses, however useful that might be.
+ * @qos_ok: QoS NDP is acceptable to the caller, this should be set
+ * if at all possible
+ *
+ * Creates a Nullfunc template which can, for example, uploaded to
+ * hardware. The template must be updated after association so that correct
+ * BSSID and address is used.
+ *
+ * If @qos_ndp is set and the association is to an AP with QoS/WMM, the
+ * returned packet will be QoS NDP.
+ *
+ * Note: Caller (or hardware) is responsible for setting the
+ * &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields.
+ *
+ * Return: The nullfunc template. %NULL on error.
+ */
+struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ int link_id, bool qos_ok);
+
+/**
+ * ieee80211_probereq_get - retrieve a Probe Request template
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @src_addr: source MAC address
+ * @ssid: SSID buffer
+ * @ssid_len: length of SSID
+ * @tailroom: tailroom to reserve at end of SKB for IEs
+ *
+ * Creates a Probe Request template which can, for example, be uploaded to
+ * hardware.
+ *
+ * Return: The Probe Request template. %NULL on error.
+ */
+struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
+ const u8 *src_addr,
+ const u8 *ssid, size_t ssid_len,
+ size_t tailroom);
+
+/**
+ * ieee80211_rts_get - RTS frame generation function
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @frame: pointer to the frame that is going to be protected by the RTS.
+ * @frame_len: the frame length (in octets).
+ * @frame_txctl: &struct ieee80211_tx_info of the frame.
+ * @rts: The buffer where to store the RTS frame.
+ *
+ * If the RTS frames are generated by the host system (i.e., not in
+ * hardware/firmware), the low-level driver uses this function to receive
+ * the next RTS frame from the 802.11 code. The low-level is responsible
+ * for calling this function before and RTS frame is needed.
+ */
+void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const void *frame, size_t frame_len,
+ const struct ieee80211_tx_info *frame_txctl,
+ struct ieee80211_rts *rts);
+
+/**
+ * ieee80211_rts_duration - Get the duration field for an RTS frame
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @frame_len: the length of the frame that is going to be protected by the RTS.
+ * @frame_txctl: &struct ieee80211_tx_info of the frame.
+ *
+ * If the RTS is generated in firmware, but the host system must provide
+ * the duration field, the low-level driver uses this function to receive
+ * the duration field value in little-endian byteorder.
+ *
+ * Return: The duration.
+ */
+__le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, size_t frame_len,
+ const struct ieee80211_tx_info *frame_txctl);
+
+/**
+ * ieee80211_ctstoself_get - CTS-to-self frame generation function
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @frame: pointer to the frame that is going to be protected by the CTS-to-self.
+ * @frame_len: the frame length (in octets).
+ * @frame_txctl: &struct ieee80211_tx_info of the frame.
+ * @cts: The buffer where to store the CTS-to-self frame.
+ *
+ * If the CTS-to-self frames are generated by the host system (i.e., not in
+ * hardware/firmware), the low-level driver uses this function to receive
+ * the next CTS-to-self frame from the 802.11 code. The low-level is responsible
+ * for calling this function before and CTS-to-self frame is needed.
+ */
+void ieee80211_ctstoself_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const void *frame, size_t frame_len,
+ const struct ieee80211_tx_info *frame_txctl,
+ struct ieee80211_cts *cts);
+
+/**
+ * ieee80211_ctstoself_duration - Get the duration field for a CTS-to-self frame
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @frame_len: the length of the frame that is going to be protected by the CTS-to-self.
+ * @frame_txctl: &struct ieee80211_tx_info of the frame.
+ *
+ * If the CTS-to-self is generated in firmware, but the host system must provide
+ * the duration field, the low-level driver uses this function to receive
+ * the duration field value in little-endian byteorder.
+ *
+ * Return: The duration.
+ */
+__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ size_t frame_len,
+ const struct ieee80211_tx_info *frame_txctl);
+
+/**
+ * ieee80211_generic_frame_duration - Calculate the duration field for a frame
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @band: the band to calculate the frame duration on
+ * @frame_len: the length of the frame.
+ * @rate: the rate at which the frame is going to be transmitted.
+ *
+ * Calculate the duration field of some generic frame, given its
+ * length and transmission rate (in 100kbps).
+ *
+ * Return: The duration.
+ */
+__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_band band,
+ size_t frame_len,
+ struct ieee80211_rate *rate);
+
+/**
+ * ieee80211_get_buffered_bc - accessing buffered broadcast and multicast frames
+ * @hw: pointer as obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Function for accessing buffered broadcast and multicast frames. If
+ * hardware/firmware does not implement buffering of broadcast/multicast
+ * frames when power saving is used, 802.11 code buffers them in the host
+ * memory. The low-level driver uses this function to fetch next buffered
+ * frame. In most cases, this is used when generating beacon frame.
+ *
+ * Return: A pointer to the next buffered skb or NULL if no more buffered
+ * frames are available.
+ *
+ * Note: buffered frames are returned only after DTIM beacon frame was
+ * generated with ieee80211_beacon_get() and the low-level driver must thus
+ * call ieee80211_beacon_get() first. ieee80211_get_buffered_bc() returns
+ * NULL if the previous generated beacon was not DTIM, so the low-level driver
+ * does not need to check for DTIM beacons separately and should be able to
+ * use common code for all beacons.
+ */
+struct sk_buff *
+ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_get_tkip_p1k_iv - get a TKIP phase 1 key for IV32
+ *
+ * This function returns the TKIP phase 1 key for the given IV32.
+ *
+ * @keyconf: the parameter passed with the set key
+ * @iv32: IV32 to get the P1K for
+ * @p1k: a buffer to which the key will be written, as 5 u16 values
+ */
+void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf,
+ u32 iv32, u16 *p1k);
+
+/**
+ * ieee80211_get_tkip_p1k - get a TKIP phase 1 key
+ *
+ * This function returns the TKIP phase 1 key for the IV32 taken
+ * from the given packet.
+ *
+ * @keyconf: the parameter passed with the set key
+ * @skb: the packet to take the IV32 value from that will be encrypted
+ * with this P1K
+ * @p1k: a buffer to which the key will be written, as 5 u16 values
+ */
+static inline void ieee80211_get_tkip_p1k(struct ieee80211_key_conf *keyconf,
+ struct sk_buff *skb, u16 *p1k)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
+ u32 iv32 = get_unaligned_le32(&data[4]);
+
+ ieee80211_get_tkip_p1k_iv(keyconf, iv32, p1k);
+}
+
+/**
+ * ieee80211_get_tkip_rx_p1k - get a TKIP phase 1 key for RX
+ *
+ * This function returns the TKIP phase 1 key for the given IV32
+ * and transmitter address.
+ *
+ * @keyconf: the parameter passed with the set key
+ * @ta: TA that will be used with the key
+ * @iv32: IV32 to get the P1K for
+ * @p1k: a buffer to which the key will be written, as 5 u16 values
+ */
+void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf,
+ const u8 *ta, u32 iv32, u16 *p1k);
+
+/**
+ * ieee80211_get_tkip_p2k - get a TKIP phase 2 key
+ *
+ * This function computes the TKIP RC4 key for the IV values
+ * in the packet.
+ *
+ * @keyconf: the parameter passed with the set key
+ * @skb: the packet to take the IV32/IV16 values from that will be
+ * encrypted with this key
+ * @p2k: a buffer to which the key will be written, 16 bytes
+ */
+void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
+ struct sk_buff *skb, u8 *p2k);
+
+/**
+ * ieee80211_tkip_add_iv - write TKIP IV and Ext. IV to pos
+ *
+ * @pos: start of crypto header
+ * @keyconf: the parameter passed with the set key
+ * @pn: PN to add
+ *
+ * Returns: pointer to the octet following IVs (i.e. beginning of
+ * the packet payload)
+ *
+ * This function writes the tkip IV value to pos (which should
+ * point to the crypto header)
+ */
+u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key_conf *keyconf, u64 pn);
+
+/**
+ * ieee80211_get_key_rx_seq - get key RX sequence counter
+ *
+ * @keyconf: the parameter passed with the set key
+ * @tid: The TID, or -1 for the management frame value (CCMP/GCMP only);
+ * the value on TID 0 is also used for non-QoS frames. For
+ * CMAC, only TID 0 is valid.
+ * @seq: buffer to receive the sequence data
+ *
+ * This function allows a driver to retrieve the current RX IV/PNs
+ * for the given key. It must not be called if IV checking is done
+ * by the device and not by mac80211.
+ *
+ * Note that this function may only be called when no RX processing
+ * can be done concurrently.
+ */
+void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
+ int tid, struct ieee80211_key_seq *seq);
+
+/**
+ * ieee80211_set_key_rx_seq - set key RX sequence counter
+ *
+ * @keyconf: the parameter passed with the set key
+ * @tid: The TID, or -1 for the management frame value (CCMP/GCMP only);
+ * the value on TID 0 is also used for non-QoS frames. For
+ * CMAC, only TID 0 is valid.
+ * @seq: new sequence data
+ *
+ * This function allows a driver to set the current RX IV/PNs for the
+ * given key. This is useful when resuming from WoWLAN sleep and GTK
+ * rekey may have been done while suspended. It should not be called
+ * if IV checking is done by the device and not by mac80211.
+ *
+ * Note that this function may only be called when no RX processing
+ * can be done concurrently.
+ */
+void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
+ int tid, struct ieee80211_key_seq *seq);
+
+/**
+ * ieee80211_remove_key - remove the given key
+ * @keyconf: the parameter passed with the set key
+ *
+ * Remove the given key. If the key was uploaded to the hardware at the
+ * time this function is called, it is not deleted in the hardware but
+ * instead assumed to have been removed already.
+ *
+ * Note that due to locking considerations this function can (currently)
+ * only be called during key iteration (ieee80211_iter_keys().)
+ */
+void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
+
+/**
+ * ieee80211_gtk_rekey_add - add a GTK key from rekeying during WoWLAN
+ * @vif: the virtual interface to add the key on
+ * @keyconf: new key data
+ *
+ * When GTK rekeying was done while the system was suspended, (a) new
+ * key(s) will be available. These will be needed by mac80211 for proper
+ * RX processing, so this function allows setting them.
+ *
+ * The function returns the newly allocated key structure, which will
+ * have similar contents to the passed key configuration but point to
+ * mac80211-owned memory. In case of errors, the function returns an
+ * ERR_PTR(), use IS_ERR() etc.
+ *
+ * Note that this function assumes the key isn't added to hardware
+ * acceleration, so no TX will be done with the key. Since it's a GTK
+ * on managed (station) networks, this is true anyway. If the driver
+ * calls this function from the resume callback and subsequently uses
+ * the return code 1 to reconfigure the device, this key will be part
+ * of the reconfiguration.
+ *
+ * Note that the driver should also call ieee80211_set_key_rx_seq()
+ * for the new key for each TID to set up sequence counters properly.
+ *
+ * IMPORTANT: If this replaces a key that is present in the hardware,
+ * then it will attempt to remove it during this call. In many cases
+ * this isn't what you want, so call ieee80211_remove_key() first for
+ * the key that's being replaced.
+ */
+struct ieee80211_key_conf *
+ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf);
+
+/**
+ * ieee80211_gtk_rekey_notify - notify userspace supplicant of rekeying
+ * @vif: virtual interface the rekeying was done on
+ * @bssid: The BSSID of the AP, for checking association
+ * @replay_ctr: the new replay counter after GTK rekeying
+ * @gfp: allocation flags
+ */
+void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid,
+ const u8 *replay_ctr, gfp_t gfp);
+
+/**
+ * ieee80211_key_mic_failure - increment MIC failure counter for the key
+ *
+ * Note: this is really only safe if no other RX function is called
+ * at the same time.
+ *
+ * @keyconf: the key in question
+ */
+void ieee80211_key_mic_failure(struct ieee80211_key_conf *keyconf);
+
+/**
+ * ieee80211_key_replay - increment replay counter for the key
+ *
+ * Note: this is really only safe if no other RX function is called
+ * at the same time.
+ *
+ * @keyconf: the key in question
+ */
+void ieee80211_key_replay(struct ieee80211_key_conf *keyconf);
+
+/**
+ * ieee80211_wake_queue - wake specific queue
+ * @hw: pointer as obtained from ieee80211_alloc_hw().
+ * @queue: queue number (counted from zero).
+ *
+ * Drivers should use this function instead of netif_wake_queue.
+ */
+void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue);
+
+/**
+ * ieee80211_stop_queue - stop specific queue
+ * @hw: pointer as obtained from ieee80211_alloc_hw().
+ * @queue: queue number (counted from zero).
+ *
+ * Drivers should use this function instead of netif_stop_queue.
+ */
+void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue);
+
+/**
+ * ieee80211_queue_stopped - test status of the queue
+ * @hw: pointer as obtained from ieee80211_alloc_hw().
+ * @queue: queue number (counted from zero).
+ *
+ * Drivers should use this function instead of netif_stop_queue.
+ *
+ * Return: %true if the queue is stopped. %false otherwise.
+ */
+
+int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue);
+
+/**
+ * ieee80211_stop_queues - stop all queues
+ * @hw: pointer as obtained from ieee80211_alloc_hw().
+ *
+ * Drivers should use this function instead of netif_stop_queue.
+ */
+void ieee80211_stop_queues(struct ieee80211_hw *hw);
+
+/**
+ * ieee80211_wake_queues - wake all queues
+ * @hw: pointer as obtained from ieee80211_alloc_hw().
+ *
+ * Drivers should use this function instead of netif_wake_queue.
+ */
+void ieee80211_wake_queues(struct ieee80211_hw *hw);
+
+/**
+ * ieee80211_scan_completed - completed hardware scan
+ *
+ * When hardware scan offload is used (i.e. the hw_scan() callback is
+ * assigned) this function needs to be called by the driver to notify
+ * mac80211 that the scan finished. This function can be called from
+ * any context, including hardirq context.
+ *
+ * @hw: the hardware that finished the scan
+ * @info: information about the completed scan
+ */
+void ieee80211_scan_completed(struct ieee80211_hw *hw,
+ struct cfg80211_scan_info *info);
+
+/**
+ * ieee80211_sched_scan_results - got results from scheduled scan
+ *
+ * When a scheduled scan is running, this function needs to be called by the
+ * driver whenever there are new scan results available.
+ *
+ * @hw: the hardware that is performing scheduled scans
+ */
+void ieee80211_sched_scan_results(struct ieee80211_hw *hw);
+
+/**
+ * ieee80211_sched_scan_stopped - inform that the scheduled scan has stopped
+ *
+ * When a scheduled scan is running, this function can be called by
+ * the driver if it needs to stop the scan to perform another task.
+ * Usual scenarios are drivers that cannot continue the scheduled scan
+ * while associating, for instance.
+ *
+ * @hw: the hardware that is performing scheduled scans
+ */
+void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw);
+
+/**
+ * enum ieee80211_interface_iteration_flags - interface iteration flags
+ * @IEEE80211_IFACE_ITER_NORMAL: Iterate over all interfaces that have
+ * been added to the driver; However, note that during hardware
+ * reconfiguration (after restart_hw) it will iterate over a new
+ * interface and over all the existing interfaces even if they
+ * haven't been re-added to the driver yet.
+ * @IEEE80211_IFACE_ITER_RESUME_ALL: During resume, iterate over all
+ * interfaces, even if they haven't been re-added to the driver yet.
+ * @IEEE80211_IFACE_ITER_ACTIVE: Iterate only active interfaces (netdev is up).
+ * @IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER: Skip any interfaces where SDATA
+ * is not in the driver. This may fix crashes during firmware recovery
+ * for instance.
+ */
+enum ieee80211_interface_iteration_flags {
+ IEEE80211_IFACE_ITER_NORMAL = 0,
+ IEEE80211_IFACE_ITER_RESUME_ALL = BIT(0),
+ IEEE80211_IFACE_ITER_ACTIVE = BIT(1),
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER = BIT(2),
+};
+
+/**
+ * ieee80211_iterate_interfaces - iterate interfaces
+ *
+ * This function iterates over the interfaces associated with a given
+ * hardware and calls the callback for them. This includes active as well as
+ * inactive interfaces. This function allows the iterator function to sleep.
+ * Will iterate over a new interface during add_interface().
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
+ * @iterator: the iterator function to call
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_interfaces(struct ieee80211_hw *hw, u32 iter_flags,
+ void (*iterator)(void *data, u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data);
+
+/**
+ * ieee80211_iterate_active_interfaces - iterate active interfaces
+ *
+ * This function iterates over the interfaces associated with a given
+ * hardware that are currently active and calls the callback for them.
+ * This function allows the iterator function to sleep, when the iterator
+ * function is atomic @ieee80211_iterate_active_interfaces_atomic can
+ * be used.
+ * Does not iterate over a new interface during add_interface().
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
+ * @iterator: the iterator function to call
+ * @data: first argument of the iterator function
+ */
+static inline void
+ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, u32 iter_flags,
+ void (*iterator)(void *data, u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data)
+{
+ ieee80211_iterate_interfaces(hw,
+ iter_flags | IEEE80211_IFACE_ITER_ACTIVE,
+ iterator, data);
+}
+
+/**
+ * ieee80211_iterate_active_interfaces_atomic - iterate active interfaces
+ *
+ * This function iterates over the interfaces associated with a given
+ * hardware that are currently active and calls the callback for them.
+ * This function requires the iterator callback function to be atomic,
+ * if that is not desired, use @ieee80211_iterate_active_interfaces instead.
+ * Does not iterate over a new interface during add_interface().
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
+ * @iterator: the iterator function to call, cannot sleep
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
+ u32 iter_flags,
+ void (*iterator)(void *data,
+ u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data);
+
+/**
+ * ieee80211_iterate_active_interfaces_mtx - iterate active interfaces
+ *
+ * This function iterates over the interfaces associated with a given
+ * hardware that are currently active and calls the callback for them.
+ * This version can only be used while holding the wiphy mutex.
+ * The driver must not call this with a lock held that it can also take in
+ * response to callbacks from mac80211, and it must not call this within
+ * callbacks made by mac80211 - both would result in deadlocks.
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
+ * @iterator: the iterator function to call, cannot sleep
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_active_interfaces_mtx(struct ieee80211_hw *hw,
+ u32 iter_flags,
+ void (*iterator)(void *data,
+ u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data);
+
+/**
+ * ieee80211_iterate_stations - iterate stations
+ *
+ * This function iterates over all stations associated with a given
+ * hardware that are currently uploaded to the driver and calls the callback
+ * function for them.
+ * This function allows the iterator function to sleep, when the iterator
+ * function is atomic @ieee80211_iterate_stations_atomic can be used.
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iterator: the iterator function to call, cannot sleep
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_stations(struct ieee80211_hw *hw,
+ void (*iterator)(void *data,
+ struct ieee80211_sta *sta),
+ void *data);
+
+/**
+ * ieee80211_iterate_stations_atomic - iterate stations
+ *
+ * This function iterates over all stations associated with a given
+ * hardware that are currently uploaded to the driver and calls the callback
+ * function for them.
+ * This function requires the iterator callback function to be atomic,
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iterator: the iterator function to call, cannot sleep
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
+ void (*iterator)(void *data,
+ struct ieee80211_sta *sta),
+ void *data);
+/**
+ * ieee80211_queue_work - add work onto the mac80211 workqueue
+ *
+ * Drivers and mac80211 use this to add work onto the mac80211 workqueue.
+ * This helper ensures drivers are not queueing work when they should not be.
+ *
+ * @hw: the hardware struct for the interface we are adding work for
+ * @work: the work we want to add onto the mac80211 workqueue
+ */
+void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work);
+
+/**
+ * ieee80211_queue_delayed_work - add work onto the mac80211 workqueue
+ *
+ * Drivers and mac80211 use this to queue delayed work onto the mac80211
+ * workqueue.
+ *
+ * @hw: the hardware struct for the interface we are adding work for
+ * @dwork: delayable work to queue onto the mac80211 workqueue
+ * @delay: number of jiffies to wait before queueing
+ */
+void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
+ struct delayed_work *dwork,
+ unsigned long delay);
+
+/**
+ * ieee80211_start_tx_ba_session - Start a tx Block Ack session.
+ * @sta: the station for which to start a BA session
+ * @tid: the TID to BA on.
+ * @timeout: session timeout value (in TUs)
+ *
+ * Return: success if addBA request was sent, failure otherwise
+ *
+ * Although mac80211/low level driver/user space application can estimate
+ * the need to start aggregation on a certain RA/TID, the session level
+ * will be managed by the mac80211.
+ */
+int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, u16 tid,
+ u16 timeout);
+
+/**
+ * ieee80211_start_tx_ba_cb_irqsafe - low level driver ready to aggregate.
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
+ * @ra: receiver address of the BA session recipient.
+ * @tid: the TID to BA on.
+ *
+ * This function must be called by low level driver once it has
+ * finished with preparations for the BA session. It can be called
+ * from any context.
+ */
+void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra,
+ u16 tid);
+
+/**
+ * ieee80211_stop_tx_ba_session - Stop a Block Ack session.
+ * @sta: the station whose BA session to stop
+ * @tid: the TID to stop BA.
+ *
+ * Return: negative error if the TID is invalid, or no aggregation active
+ *
+ * Although mac80211/low level driver/user space application can estimate
+ * the need to stop aggregation on a certain RA/TID, the session level
+ * will be managed by the mac80211.
+ */
+int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid);
+
+/**
+ * ieee80211_stop_tx_ba_cb_irqsafe - low level driver ready to stop aggregate.
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
+ * @ra: receiver address of the BA session recipient.
+ * @tid: the desired TID to BA on.
+ *
+ * This function must be called by low level driver once it has
+ * finished with preparations for the BA session tear down. It
+ * can be called from any context.
+ */
+void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra,
+ u16 tid);
+
+/**
+ * ieee80211_find_sta - find a station
+ *
+ * @vif: virtual interface to look for station on
+ * @addr: station's address
+ *
+ * Return: The station, if found. %NULL otherwise.
+ *
+ * Note: This function must be called under RCU lock and the
+ * resulting pointer is only valid under RCU lock as well.
+ */
+struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif,
+ const u8 *addr);
+
+/**
+ * ieee80211_find_sta_by_ifaddr - find a station on hardware
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @addr: remote station's address
+ * @localaddr: local address (vif->sdata->vif.addr). Use NULL for 'any'.
+ *
+ * Return: The station, if found. %NULL otherwise.
+ *
+ * Note: This function must be called under RCU lock and the
+ * resulting pointer is only valid under RCU lock as well.
+ *
+ * NOTE: You may pass NULL for localaddr, but then you will just get
+ * the first STA that matches the remote address 'addr'.
+ * We can have multiple STA associated with multiple
+ * logical stations (e.g. consider a station connecting to another
+ * BSSID on the same AP hardware without disconnecting first).
+ * In this case, the result of this method with localaddr NULL
+ * is not reliable.
+ *
+ * DO NOT USE THIS FUNCTION with localaddr NULL if at all possible.
+ */
+struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
+ const u8 *addr,
+ const u8 *localaddr);
+
+/**
+ * ieee80211_find_sta_by_link_addrs - find STA by link addresses
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @addr: remote station's link address
+ * @localaddr: local link address, use %NULL for any (but avoid that)
+ * @link_id: pointer to obtain the link ID if the STA is found,
+ * may be %NULL if the link ID is not needed
+ *
+ * Obtain the STA by link address, must use RCU protection.
+ */
+struct ieee80211_sta *
+ieee80211_find_sta_by_link_addrs(struct ieee80211_hw *hw,
+ const u8 *addr,
+ const u8 *localaddr,
+ unsigned int *link_id);
+
+/**
+ * ieee80211_sta_block_awake - block station from waking up
+ * @hw: the hardware
+ * @pubsta: the station
+ * @block: whether to block or unblock
+ *
+ * Some devices require that all frames that are on the queues
+ * for a specific station that went to sleep are flushed before
+ * a poll response or frames after the station woke up can be
+ * delivered to that it. Note that such frames must be rejected
+ * by the driver as filtered, with the appropriate status flag.
+ *
+ * This function allows implementing this mode in a race-free
+ * manner.
+ *
+ * To do this, a driver must keep track of the number of frames
+ * still enqueued for a specific station. If this number is not
+ * zero when the station goes to sleep, the driver must call
+ * this function to force mac80211 to consider the station to
+ * be asleep regardless of the station's actual state. Once the
+ * number of outstanding frames reaches zero, the driver must
+ * call this function again to unblock the station. That will
+ * cause mac80211 to be able to send ps-poll responses, and if
+ * the station queried in the meantime then frames will also
+ * be sent out as a result of this. Additionally, the driver
+ * will be notified that the station woke up some time after
+ * it is unblocked, regardless of whether the station actually
+ * woke up while blocked or not.
+ */
+void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
+ struct ieee80211_sta *pubsta, bool block);
+
+/**
+ * ieee80211_sta_eosp - notify mac80211 about end of SP
+ * @pubsta: the station
+ *
+ * When a device transmits frames in a way that it can't tell
+ * mac80211 in the TX status about the EOSP, it must clear the
+ * %IEEE80211_TX_STATUS_EOSP bit and call this function instead.
+ * This applies for PS-Poll as well as uAPSD.
+ *
+ * Note that just like with _tx_status() and _rx() drivers must
+ * not mix calls to irqsafe/non-irqsafe versions, this function
+ * must not be mixed with those either. Use the all irqsafe, or
+ * all non-irqsafe, don't mix!
+ *
+ * NB: the _irqsafe version of this function doesn't exist, no
+ * driver needs it right now. Don't call this function if
+ * you'd need the _irqsafe version, look at the git history
+ * and restore the _irqsafe version!
+ */
+void ieee80211_sta_eosp(struct ieee80211_sta *pubsta);
+
+/**
+ * ieee80211_send_eosp_nullfunc - ask mac80211 to send NDP with EOSP
+ * @pubsta: the station
+ * @tid: the tid of the NDP
+ *
+ * Sometimes the device understands that it needs to close
+ * the Service Period unexpectedly. This can happen when
+ * sending frames that are filling holes in the BA window.
+ * In this case, the device can ask mac80211 to send a
+ * Nullfunc frame with EOSP set. When that happens, the
+ * driver must have called ieee80211_sta_set_buffered() to
+ * let mac80211 know that there are no buffered frames any
+ * more, otherwise mac80211 will get the more_data bit wrong.
+ * The low level driver must have made sure that the frame
+ * will be sent despite the station being in power-save.
+ * Mac80211 won't call allow_buffered_frames().
+ * Note that calling this function, doesn't exempt the driver
+ * from closing the EOSP properly, it will still have to call
+ * ieee80211_sta_eosp when the NDP is sent.
+ */
+void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid);
+
+/**
+ * ieee80211_sta_recalc_aggregates - recalculate aggregate data after a change
+ * @pubsta: the station
+ *
+ * Call this function after changing a per-link aggregate data as referenced in
+ * &struct ieee80211_sta_aggregates by accessing the agg field of
+ * &struct ieee80211_link_sta.
+ *
+ * With non MLO the data in deflink will be referenced directly. In that case
+ * there is no need to call this function.
+ */
+void ieee80211_sta_recalc_aggregates(struct ieee80211_sta *pubsta);
+
+/**
+ * ieee80211_sta_register_airtime - register airtime usage for a sta/tid
+ *
+ * Register airtime usage for a given sta on a given tid. The driver must call
+ * this function to notify mac80211 that a station used a certain amount of
+ * airtime. This information will be used by the TXQ scheduler to schedule
+ * stations in a way that ensures airtime fairness.
+ *
+ * The reported airtime should as a minimum include all time that is spent
+ * transmitting to the remote station, including overhead and padding, but not
+ * including time spent waiting for a TXOP. If the time is not reported by the
+ * hardware it can in some cases be calculated from the rate and known frame
+ * composition. When possible, the time should include any failed transmission
+ * attempts.
+ *
+ * The driver can either call this function synchronously for every packet or
+ * aggregate, or asynchronously as airtime usage information becomes available.
+ * TX and RX airtime can be reported together, or separately by setting one of
+ * them to 0.
+ *
+ * @pubsta: the station
+ * @tid: the TID to register airtime for
+ * @tx_airtime: airtime used during TX (in usec)
+ * @rx_airtime: airtime used during RX (in usec)
+ */
+void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
+ u32 tx_airtime, u32 rx_airtime);
+
+/**
+ * ieee80211_txq_airtime_check - check if a txq can send frame to device
+ *
+ * @hw: pointer obtained from ieee80211_alloc_hw()
+ * @txq: pointer obtained from station or virtual interface
+ *
+ * Return true if the AQL's airtime limit has not been reached and the txq can
+ * continue to send more packets to the device. Otherwise return false.
+ */
+bool
+ieee80211_txq_airtime_check(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
+
+/**
+ * ieee80211_iter_keys - iterate keys programmed into the device
+ * @hw: pointer obtained from ieee80211_alloc_hw()
+ * @vif: virtual interface to iterate, may be %NULL for all
+ * @iter: iterator function that will be called for each key
+ * @iter_data: custom data to pass to the iterator function
+ *
+ * This function can be used to iterate all the keys known to
+ * mac80211, even those that weren't previously programmed into
+ * the device. This is intended for use in WoWLAN if the device
+ * needs reprogramming of the keys during suspend. Note that due
+ * to locking reasons, it is also only safe to call this at few
+ * spots since it must hold the RTNL and be able to sleep.
+ *
+ * The order in which the keys are iterated matches the order
+ * in which they were originally installed and handed to the
+ * set_key callback.
+ */
+void ieee80211_iter_keys(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void (*iter)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data),
+ void *iter_data);
+
+/**
+ * ieee80211_iter_keys_rcu - iterate keys programmed into the device
+ * @hw: pointer obtained from ieee80211_alloc_hw()
+ * @vif: virtual interface to iterate, may be %NULL for all
+ * @iter: iterator function that will be called for each key
+ * @iter_data: custom data to pass to the iterator function
+ *
+ * This function can be used to iterate all the keys known to
+ * mac80211, even those that weren't previously programmed into
+ * the device. Note that due to locking reasons, keys of station
+ * in removal process will be skipped.
+ *
+ * This function requires being called in an RCU critical section,
+ * and thus iter must be atomic.
+ */
+void ieee80211_iter_keys_rcu(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void (*iter)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data),
+ void *iter_data);
+
+/**
+ * ieee80211_iter_chan_contexts_atomic - iterate channel contexts
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @iter: iterator function
+ * @iter_data: data passed to iterator function
+ *
+ * Iterate all active channel contexts. This function is atomic and
+ * doesn't acquire any locks internally that might be held in other
+ * places while calling into the driver.
+ *
+ * The iterator will not find a context that's being added (during
+ * the driver callback to add it) but will find it while it's being
+ * removed.
+ *
+ * Note that during hardware restart, all contexts that existed
+ * before the restart are considered already present so will be
+ * found while iterating, whether they've been re-added already
+ * or not.
+ */
+void ieee80211_iter_chan_contexts_atomic(
+ struct ieee80211_hw *hw,
+ void (*iter)(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf,
+ void *data),
+ void *iter_data);
+
+/**
+ * ieee80211_ap_probereq_get - retrieve a Probe Request template
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Creates a Probe Request template which can, for example, be uploaded to
+ * hardware. The template is filled with bssid, ssid and supported rate
+ * information. This function must only be called from within the
+ * .bss_info_changed callback function and only in managed mode. The function
+ * is only useful when the interface is associated, otherwise it will return
+ * %NULL.
+ *
+ * Return: The Probe Request template. %NULL on error.
+ */
+struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_beacon_loss - inform hardware does not receive beacons
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * When beacon filtering is enabled with %IEEE80211_VIF_BEACON_FILTER and
+ * %IEEE80211_CONF_PS is set, the driver needs to inform whenever the
+ * hardware is not receiving beacons with this function.
+ */
+void ieee80211_beacon_loss(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_connection_loss - inform hardware has lost connection to the AP
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * When beacon filtering is enabled with %IEEE80211_VIF_BEACON_FILTER, and
+ * %IEEE80211_CONF_PS and %IEEE80211_HW_CONNECTION_MONITOR are set, the driver
+ * needs to inform if the connection to the AP has been lost.
+ * The function may also be called if the connection needs to be terminated
+ * for some other reason, even if %IEEE80211_HW_CONNECTION_MONITOR isn't set.
+ *
+ * This function will cause immediate change to disassociated state,
+ * without connection recovery attempts.
+ */
+void ieee80211_connection_loss(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_disconnect - request disconnection
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @reconnect: immediate reconnect is desired
+ *
+ * Request disconnection from the current network and, if enabled, send a
+ * hint to the higher layers that immediate reconnect is desired.
+ */
+void ieee80211_disconnect(struct ieee80211_vif *vif, bool reconnect);
+
+/**
+ * ieee80211_resume_disconnect - disconnect from AP after resume
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Instructs mac80211 to disconnect from the AP after resume.
+ * Drivers can use this after WoWLAN if they know that the
+ * connection cannot be kept up, for example because keys were
+ * used while the device was asleep but the replay counters or
+ * similar cannot be retrieved from the device during resume.
+ *
+ * Note that due to implementation issues, if the driver uses
+ * the reconfiguration functionality during resume the interface
+ * will still be added as associated first during resume and then
+ * disconnect normally later.
+ *
+ * This function can only be called from the resume callback and
+ * the driver must not be holding any of its own locks while it
+ * calls this function, or at least not any locks it needs in the
+ * key configuration paths (if it supports HW crypto).
+ */
+void ieee80211_resume_disconnect(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_hw_restart_disconnect - disconnect from AP after
+ * hardware restart
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Instructs mac80211 to disconnect from the AP after
+ * hardware restart.
+ */
+void ieee80211_hw_restart_disconnect(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_cqm_rssi_notify - inform a configured connection quality monitoring
+ * rssi threshold triggered
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @rssi_event: the RSSI trigger event type
+ * @rssi_level: new RSSI level value or 0 if not available
+ * @gfp: context flags
+ *
+ * When the %IEEE80211_VIF_SUPPORTS_CQM_RSSI is set, and a connection quality
+ * monitoring is configured with an rssi threshold, the driver will inform
+ * whenever the rssi level reaches the threshold.
+ */
+void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
+ enum nl80211_cqm_rssi_threshold_event rssi_event,
+ s32 rssi_level,
+ gfp_t gfp);
+
+/**
+ * ieee80211_cqm_beacon_loss_notify - inform CQM of beacon loss
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @gfp: context flags
+ */
+void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp);
+
+/**
+ * ieee80211_radar_detected - inform that a radar was detected
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ */
+void ieee80211_radar_detected(struct ieee80211_hw *hw);
+
+/**
+ * ieee80211_chswitch_done - Complete channel switch process
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @success: make the channel switch successful or not
+ *
+ * Complete the channel switch post-process: set the new operational channel
+ * and wake up the suspended queues.
+ */
+void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success);
+
+/**
+ * ieee80211_channel_switch_disconnect - disconnect due to channel switch error
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @block_tx: if %true, do not send deauth frame.
+ *
+ * Instruct mac80211 to disconnect due to a channel switch error. The channel
+ * switch can request to block the tx and so, we need to make sure we do not send
+ * a deauth frame in this case.
+ */
+void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif,
+ bool block_tx);
+
+/**
+ * ieee80211_request_smps - request SM PS transition
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: link ID for MLO, or 0
+ * @smps_mode: new SM PS mode
+ *
+ * This allows the driver to request an SM PS transition in managed
+ * mode. This is useful when the driver has more information than
+ * the stack about possible interference, for example by bluetooth.
+ */
+void ieee80211_request_smps(struct ieee80211_vif *vif, unsigned int link_id,
+ enum ieee80211_smps_mode smps_mode);
+
+/**
+ * ieee80211_ready_on_channel - notification of remain-on-channel start
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ */
+void ieee80211_ready_on_channel(struct ieee80211_hw *hw);
+
+/**
+ * ieee80211_remain_on_channel_expired - remain_on_channel duration expired
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ */
+void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw);
+
+/**
+ * ieee80211_stop_rx_ba_session - callback to stop existing BA sessions
+ *
+ * in order not to harm the system performance and user experience, the device
+ * may request not to allow any rx ba session and tear down existing rx ba
+ * sessions based on system constraints such as periodic BT activity that needs
+ * to limit wlan activity (eg.sco or a2dp)."
+ * in such cases, the intention is to limit the duration of the rx ppdu and
+ * therefore prevent the peer device to use a-mpdu aggregation.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @ba_rx_bitmap: Bit map of open rx ba per tid
+ * @addr: & to bssid mac address
+ */
+void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
+ const u8 *addr);
+
+/**
+ * ieee80211_mark_rx_ba_filtered_frames - move RX BA window and mark filtered
+ * @pubsta: station struct
+ * @tid: the session's TID
+ * @ssn: starting sequence number of the bitmap, all frames before this are
+ * assumed to be out of the window after the call
+ * @filtered: bitmap of filtered frames, BIT(0) is the @ssn entry etc.
+ * @received_mpdus: number of received mpdus in firmware
+ *
+ * This function moves the BA window and releases all frames before @ssn, and
+ * marks frames marked in the bitmap as having been filtered. Afterwards, it
+ * checks if any frames in the window starting from @ssn can now be released
+ * (in case they were only waiting for frames that were filtered.)
+ * (Only work correctly if @max_rx_aggregation_subframes <= 64 frames)
+ */
+void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+ u16 ssn, u64 filtered,
+ u16 received_mpdus);
+
+/**
+ * ieee80211_send_bar - send a BlockAckReq frame
+ *
+ * can be used to flush pending frames from the peer's aggregation reorder
+ * buffer.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @ra: the peer's destination address
+ * @tid: the TID of the aggregation session
+ * @ssn: the new starting sequence number for the receiver
+ */
+void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn);
+
+/**
+ * ieee80211_manage_rx_ba_offl - helper to queue an RX BA work
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
+ * @addr: station mac address
+ * @tid: the rx tid
+ */
+void ieee80211_manage_rx_ba_offl(struct ieee80211_vif *vif, const u8 *addr,
+ unsigned int tid);
+
+/**
+ * ieee80211_start_rx_ba_session_offl - start a Rx BA session
+ *
+ * Some device drivers may offload part of the Rx aggregation flow including
+ * AddBa/DelBa negotiation but may otherwise be incapable of full Rx
+ * reordering.
+ *
+ * Create structures responsible for reordering so device drivers may call here
+ * when they complete AddBa negotiation.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
+ * @addr: station mac address
+ * @tid: the rx tid
+ */
+static inline void ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif,
+ const u8 *addr, u16 tid)
+{
+ if (WARN_ON(tid >= IEEE80211_NUM_TIDS))
+ return;
+ ieee80211_manage_rx_ba_offl(vif, addr, tid);
+}
+
+/**
+ * ieee80211_stop_rx_ba_session_offl - stop a Rx BA session
+ *
+ * Some device drivers may offload part of the Rx aggregation flow including
+ * AddBa/DelBa negotiation but may otherwise be incapable of full Rx
+ * reordering.
+ *
+ * Destroy structures responsible for reordering so device drivers may call here
+ * when they complete DelBa negotiation.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
+ * @addr: station mac address
+ * @tid: the rx tid
+ */
+static inline void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif,
+ const u8 *addr, u16 tid)
+{
+ if (WARN_ON(tid >= IEEE80211_NUM_TIDS))
+ return;
+ ieee80211_manage_rx_ba_offl(vif, addr, tid + IEEE80211_NUM_TIDS);
+}
+
+/**
+ * ieee80211_rx_ba_timer_expired - stop a Rx BA session due to timeout
+ *
+ * Some device drivers do not offload AddBa/DelBa negotiation, but handle rx
+ * buffer reording internally, and therefore also handle the session timer.
+ *
+ * Trigger the timeout flow, which sends a DelBa.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
+ * @addr: station mac address
+ * @tid: the rx tid
+ */
+void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif,
+ const u8 *addr, unsigned int tid);
+
+/* Rate control API */
+
+/**
+ * struct ieee80211_tx_rate_control - rate control information for/from RC algo
+ *
+ * @hw: The hardware the algorithm is invoked for.
+ * @sband: The band this frame is being transmitted on.
+ * @bss_conf: the current BSS configuration
+ * @skb: the skb that will be transmitted, the control information in it needs
+ * to be filled in
+ * @reported_rate: The rate control algorithm can fill this in to indicate
+ * which rate should be reported to userspace as the current rate and
+ * used for rate calculations in the mesh network.
+ * @rts: whether RTS will be used for this frame because it is longer than the
+ * RTS threshold
+ * @short_preamble: whether mac80211 will request short-preamble transmission
+ * if the selected rate supports it
+ * @rate_idx_mask: user-requested (legacy) rate mask
+ * @rate_idx_mcs_mask: user-requested MCS rate mask (NULL if not in use)
+ * @bss: whether this frame is sent out in AP or IBSS mode
+ */
+struct ieee80211_tx_rate_control {
+ struct ieee80211_hw *hw;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_bss_conf *bss_conf;
+ struct sk_buff *skb;
+ struct ieee80211_tx_rate reported_rate;
+ bool rts, short_preamble;
+ u32 rate_idx_mask;
+ u8 *rate_idx_mcs_mask;
+ bool bss;
+};
+
+/**
+ * enum rate_control_capabilities - rate control capabilities
+ */
+enum rate_control_capabilities {
+ /**
+ * @RATE_CTRL_CAPA_VHT_EXT_NSS_BW:
+ * Support for extended NSS BW support (dot11VHTExtendedNSSCapable)
+ * Note that this is only looked at if the minimum number of chains
+ * that the AP uses is < the number of TX chains the hardware has,
+ * otherwise the NSS difference doesn't bother us.
+ */
+ RATE_CTRL_CAPA_VHT_EXT_NSS_BW = BIT(0),
+ /**
+ * @RATE_CTRL_CAPA_AMPDU_TRIGGER:
+ * mac80211 should start A-MPDU sessions on tx
+ */
+ RATE_CTRL_CAPA_AMPDU_TRIGGER = BIT(1),
+};
+
+struct rate_control_ops {
+ unsigned long capa;
+ const char *name;
+ void *(*alloc)(struct ieee80211_hw *hw);
+ void (*add_debugfs)(struct ieee80211_hw *hw, void *priv,
+ struct dentry *debugfsdir);
+ void (*free)(void *priv);
+
+ void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp);
+ void (*rate_init)(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *priv_sta);
+ void (*rate_update)(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *priv_sta,
+ u32 changed);
+ void (*free_sta)(void *priv, struct ieee80211_sta *sta,
+ void *priv_sta);
+
+ void (*tx_status_ext)(void *priv,
+ struct ieee80211_supported_band *sband,
+ void *priv_sta, struct ieee80211_tx_status *st);
+ void (*tx_status)(void *priv, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ struct sk_buff *skb);
+ void (*get_rate)(void *priv, struct ieee80211_sta *sta, void *priv_sta,
+ struct ieee80211_tx_rate_control *txrc);
+
+ void (*add_sta_debugfs)(void *priv, void *priv_sta,
+ struct dentry *dir);
+
+ u32 (*get_expected_throughput)(void *priv_sta);
+};
+
+static inline int rate_supported(struct ieee80211_sta *sta,
+ enum nl80211_band band,
+ int index)
+{
+ return (sta == NULL || sta->deflink.supp_rates[band] & BIT(index));
+}
+
+static inline s8
+rate_lowest_index(struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++)
+ if (rate_supported(sta, sband->band, i))
+ return i;
+
+ /* warn when we cannot find a rate. */
+ WARN_ON_ONCE(1);
+
+ /* and return 0 (the lowest index) */
+ return 0;
+}
+
+static inline
+bool rate_usable_index_exists(struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta)
+{
+ unsigned int i;
+
+ for (i = 0; i < sband->n_bitrates; i++)
+ if (rate_supported(sta, sband->band, i))
+ return true;
+ return false;
+}
+
+/**
+ * rate_control_set_rates - pass the sta rate selection to mac80211/driver
+ *
+ * When not doing a rate control probe to test rates, rate control should pass
+ * its rate selection to mac80211. If the driver supports receiving a station
+ * rate table, it will use it to ensure that frames are always sent based on
+ * the most recent rate control module decision.
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @pubsta: &struct ieee80211_sta pointer to the target destination.
+ * @rates: new tx rate set to be used for this station.
+ */
+int rate_control_set_rates(struct ieee80211_hw *hw,
+ struct ieee80211_sta *pubsta,
+ struct ieee80211_sta_rates *rates);
+
+int ieee80211_rate_control_register(const struct rate_control_ops *ops);
+void ieee80211_rate_control_unregister(const struct rate_control_ops *ops);
+
+static inline bool
+conf_is_ht20(struct ieee80211_conf *conf)
+{
+ return conf->chandef.width == NL80211_CHAN_WIDTH_20;
+}
+
+static inline bool
+conf_is_ht40_minus(struct ieee80211_conf *conf)
+{
+ return conf->chandef.width == NL80211_CHAN_WIDTH_40 &&
+ conf->chandef.center_freq1 < conf->chandef.chan->center_freq;
+}
+
+static inline bool
+conf_is_ht40_plus(struct ieee80211_conf *conf)
+{
+ return conf->chandef.width == NL80211_CHAN_WIDTH_40 &&
+ conf->chandef.center_freq1 > conf->chandef.chan->center_freq;
+}
+
+static inline bool
+conf_is_ht40(struct ieee80211_conf *conf)
+{
+ return conf->chandef.width == NL80211_CHAN_WIDTH_40;
+}
+
+static inline bool
+conf_is_ht(struct ieee80211_conf *conf)
+{
+ return (conf->chandef.width != NL80211_CHAN_WIDTH_5) &&
+ (conf->chandef.width != NL80211_CHAN_WIDTH_10) &&
+ (conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT);
+}
+
+static inline enum nl80211_iftype
+ieee80211_iftype_p2p(enum nl80211_iftype type, bool p2p)
+{
+ if (p2p) {
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ return NL80211_IFTYPE_P2P_CLIENT;
+ case NL80211_IFTYPE_AP:
+ return NL80211_IFTYPE_P2P_GO;
+ default:
+ break;
+ }
+ }
+ return type;
+}
+
+static inline enum nl80211_iftype
+ieee80211_vif_type_p2p(struct ieee80211_vif *vif)
+{
+ return ieee80211_iftype_p2p(vif->type, vif->p2p);
+}
+
+/**
+ * ieee80211_update_mu_groups - set the VHT MU-MIMO groud data
+ *
+ * @vif: the specified virtual interface
+ * @link_id: the link ID for MLO, otherwise 0
+ * @membership: 64 bits array - a bit is set if station is member of the group
+ * @position: 2 bits per group id indicating the position in the group
+ *
+ * Note: This function assumes that the given vif is valid and the position and
+ * membership data is of the correct size and are in the same byte order as the
+ * matching GroupId management frame.
+ * Calls to this function need to be serialized with RX path.
+ */
+void ieee80211_update_mu_groups(struct ieee80211_vif *vif, unsigned int link_id,
+ const u8 *membership, const u8 *position);
+
+void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif,
+ int rssi_min_thold,
+ int rssi_max_thold);
+
+void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_ave_rssi - report the average RSSI for the specified interface
+ *
+ * @vif: the specified virtual interface
+ *
+ * Note: This function assumes that the given vif is valid.
+ *
+ * Return: The average RSSI value for the requested interface, or 0 if not
+ * applicable.
+ */
+int ieee80211_ave_rssi(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_report_wowlan_wakeup - report WoWLAN wakeup
+ * @vif: virtual interface
+ * @wakeup: wakeup reason(s)
+ * @gfp: allocation flags
+ *
+ * See cfg80211_report_wowlan_wakeup().
+ */
+void ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif,
+ struct cfg80211_wowlan_wakeup *wakeup,
+ gfp_t gfp);
+
+/**
+ * ieee80211_tx_prepare_skb - prepare an 802.11 skb for transmission
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @vif: virtual interface
+ * @skb: frame to be sent from within the driver
+ * @band: the band to transmit on
+ * @sta: optional pointer to get the station to send the frame to
+ *
+ * Note: must be called under RCU lock
+ */
+bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, struct sk_buff *skb,
+ int band, struct ieee80211_sta **sta);
+
+/**
+ * ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header
+ * of injected frames.
+ *
+ * To accurately parse and take into account rate and retransmission fields,
+ * you must initialize the chandef field in the ieee80211_tx_info structure
+ * of the skb before calling this function.
+ *
+ * @skb: packet injected by userspace
+ * @dev: the &struct device of this 802.11 device
+ */
+bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
+ struct net_device *dev);
+
+/**
+ * struct ieee80211_noa_data - holds temporary data for tracking P2P NoA state
+ *
+ * @next_tsf: TSF timestamp of the next absent state change
+ * @has_next_tsf: next absent state change event pending
+ *
+ * @absent: descriptor bitmask, set if GO is currently absent
+ *
+ * private:
+ *
+ * @count: count fields from the NoA descriptors
+ * @desc: adjusted data from the NoA
+ */
+struct ieee80211_noa_data {
+ u32 next_tsf;
+ bool has_next_tsf;
+
+ u8 absent;
+
+ u8 count[IEEE80211_P2P_NOA_DESC_MAX];
+ struct {
+ u32 start;
+ u32 duration;
+ u32 interval;
+ } desc[IEEE80211_P2P_NOA_DESC_MAX];
+};
+
+/**
+ * ieee80211_parse_p2p_noa - initialize NoA tracking data from P2P IE
+ *
+ * @attr: P2P NoA IE
+ * @data: NoA tracking data
+ * @tsf: current TSF timestamp
+ *
+ * Return: number of successfully parsed descriptors
+ */
+int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr,
+ struct ieee80211_noa_data *data, u32 tsf);
+
+/**
+ * ieee80211_update_p2p_noa - get next pending P2P GO absent state change
+ *
+ * @data: NoA tracking data
+ * @tsf: current TSF timestamp
+ */
+void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf);
+
+/**
+ * ieee80211_tdls_oper_request - request userspace to perform a TDLS operation
+ * @vif: virtual interface
+ * @peer: the peer's destination address
+ * @oper: the requested TDLS operation
+ * @reason_code: reason code for the operation, valid for TDLS teardown
+ * @gfp: allocation flags
+ *
+ * See cfg80211_tdls_oper_request().
+ */
+void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer,
+ enum nl80211_tdls_operation oper,
+ u16 reason_code, gfp_t gfp);
+
+/**
+ * ieee80211_reserve_tid - request to reserve a specific TID
+ *
+ * There is sometimes a need (such as in TDLS) for blocking the driver from
+ * using a specific TID so that the FW can use it for certain operations such
+ * as sending PTI requests. To make sure that the driver doesn't use that TID,
+ * this function must be called as it flushes out packets on this TID and marks
+ * it as blocked, so that any transmit for the station on this TID will be
+ * redirected to the alternative TID in the same AC.
+ *
+ * Note that this function blocks and may call back into the driver, so it
+ * should be called without driver locks held. Also note this function should
+ * only be called from the driver's @sta_state callback.
+ *
+ * @sta: the station to reserve the TID for
+ * @tid: the TID to reserve
+ *
+ * Returns: 0 on success, else on failure
+ */
+int ieee80211_reserve_tid(struct ieee80211_sta *sta, u8 tid);
+
+/**
+ * ieee80211_unreserve_tid - request to unreserve a specific TID
+ *
+ * Once there is no longer any need for reserving a certain TID, this function
+ * should be called, and no longer will packets have their TID modified for
+ * preventing use of this TID in the driver.
+ *
+ * Note that this function blocks and acquires a lock, so it should be called
+ * without driver locks held. Also note this function should only be called
+ * from the driver's @sta_state callback.
+ *
+ * @sta: the station
+ * @tid: the TID to unreserve
+ */
+void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
+
+/**
+ * ieee80211_tx_dequeue - dequeue a packet from a software tx queue
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @txq: pointer obtained from station or virtual interface, or from
+ * ieee80211_next_txq()
+ *
+ * Returns the skb if successful, %NULL if no frame was available.
+ *
+ * Note that this must be called in an rcu_read_lock() critical section,
+ * which can only be released after the SKB was handled. Some pointers in
+ * skb->cb, e.g. the key pointer, are protected by RCU and thus the
+ * critical section must persist not just for the duration of this call
+ * but for the duration of the frame handling.
+ * However, also note that while in the wake_tx_queue() method,
+ * rcu_read_lock() is already held.
+ *
+ * softirqs must also be disabled when this function is called.
+ * In process context, use ieee80211_tx_dequeue_ni() instead.
+ */
+struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+
+/**
+ * ieee80211_tx_dequeue_ni - dequeue a packet from a software tx queue
+ * (in process context)
+ *
+ * Like ieee80211_tx_dequeue() but can be called in process context
+ * (internally disables bottom halves).
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @txq: pointer obtained from station or virtual interface, or from
+ * ieee80211_next_txq()
+ */
+static inline struct sk_buff *ieee80211_tx_dequeue_ni(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct sk_buff *skb;
+
+ local_bh_disable();
+ skb = ieee80211_tx_dequeue(hw, txq);
+ local_bh_enable();
+
+ return skb;
+}
+
+/**
+ * ieee80211_next_txq - get next tx queue to pull packets from
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @ac: AC number to return packets from.
+ *
+ * Returns the next txq if successful, %NULL if no queue is eligible. If a txq
+ * is returned, it should be returned with ieee80211_return_txq() after the
+ * driver has finished scheduling it.
+ */
+struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac);
+
+/**
+ * ieee80211_txq_schedule_start - start new scheduling round for TXQs
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @ac: AC number to acquire locks for
+ *
+ * Should be called before ieee80211_next_txq() or ieee80211_return_txq().
+ * The driver must not call multiple TXQ scheduling rounds concurrently.
+ */
+void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac);
+
+/* (deprecated) */
+static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
+{
+}
+
+void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq, bool force);
+
+/**
+ * ieee80211_schedule_txq - schedule a TXQ for transmission
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @txq: pointer obtained from station or virtual interface
+ *
+ * Schedules a TXQ for transmission if it is not already scheduled,
+ * even if mac80211 does not have any packets buffered.
+ *
+ * The driver may call this function if it has buffered packets for
+ * this TXQ internally.
+ */
+static inline void
+ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+ __ieee80211_schedule_txq(hw, txq, true);
+}
+
+/**
+ * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @txq: pointer obtained from station or virtual interface
+ * @force: schedule txq even if mac80211 does not have any buffered packets.
+ *
+ * The driver may set force=true if it has buffered packets for this TXQ
+ * internally.
+ */
+static inline void
+ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
+ bool force)
+{
+ __ieee80211_schedule_txq(hw, txq, force);
+}
+
+/**
+ * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
+ *
+ * This function is used to check whether given txq is allowed to transmit by
+ * the airtime scheduler, and can be used by drivers to access the airtime
+ * fairness accounting without going using the scheduling order enfored by
+ * next_txq().
+ *
+ * Returns %true if the airtime scheduler thinks the TXQ should be allowed to
+ * transmit, and %false if it should be throttled. This function can also have
+ * the side effect of rotating the TXQ in the scheduler rotation, which will
+ * eventually bring the deficit to positive and allow the station to transmit
+ * again.
+ *
+ * The API ieee80211_txq_may_transmit() also ensures that TXQ list will be
+ * aligned against driver's own round-robin scheduler list. i.e it rotates
+ * the TXQ list till it makes the requested node becomes the first entry
+ * in TXQ list. Thus both the TXQ list and driver's list are in sync. If this
+ * function returns %true, the driver is expected to schedule packets
+ * for transmission, and then return the TXQ through ieee80211_return_txq().
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @txq: pointer obtained from station or virtual interface
+ */
+bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+
+/**
+ * ieee80211_txq_get_depth - get pending frame/byte count of given txq
+ *
+ * The values are not guaranteed to be coherent with regard to each other, i.e.
+ * txq state can change half-way of this function and the caller may end up
+ * with "new" frame_cnt and "old" byte_cnt or vice-versa.
+ *
+ * @txq: pointer obtained from station or virtual interface
+ * @frame_cnt: pointer to store frame count
+ * @byte_cnt: pointer to store byte count
+ */
+void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
+ unsigned long *frame_cnt,
+ unsigned long *byte_cnt);
+
+/**
+ * ieee80211_nan_func_terminated - notify about NAN function termination.
+ *
+ * This function is used to notify mac80211 about NAN function termination.
+ * Note that this function can't be called from hard irq.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @inst_id: the local instance id
+ * @reason: termination reason (one of the NL80211_NAN_FUNC_TERM_REASON_*)
+ * @gfp: allocation flags
+ */
+void ieee80211_nan_func_terminated(struct ieee80211_vif *vif,
+ u8 inst_id,
+ enum nl80211_nan_func_term_reason reason,
+ gfp_t gfp);
+
+/**
+ * ieee80211_nan_func_match - notify about NAN function match event.
+ *
+ * This function is used to notify mac80211 about NAN function match. The
+ * cookie inside the match struct will be assigned by mac80211.
+ * Note that this function can't be called from hard irq.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @match: match event information
+ * @gfp: allocation flags
+ */
+void ieee80211_nan_func_match(struct ieee80211_vif *vif,
+ struct cfg80211_nan_match_params *match,
+ gfp_t gfp);
+
+/**
+ * ieee80211_calc_rx_airtime - calculate estimated transmission airtime for RX.
+ *
+ * This function calculates the estimated airtime usage of a frame based on the
+ * rate information in the RX status struct and the frame length.
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @status: &struct ieee80211_rx_status containing the transmission rate
+ * information.
+ * @len: frame length in bytes
+ */
+u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *status,
+ int len);
+
+/**
+ * ieee80211_calc_tx_airtime - calculate estimated transmission airtime for TX.
+ *
+ * This function calculates the estimated airtime usage of a frame based on the
+ * rate information in the TX info struct and the frame length.
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @info: &struct ieee80211_tx_info of the frame.
+ * @len: frame length in bytes
+ */
+u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
+ struct ieee80211_tx_info *info,
+ int len);
+/**
+ * ieee80211_set_hw_80211_encap - enable hardware encapsulation offloading.
+ *
+ * This function is used to notify mac80211 that a vif can be passed raw 802.3
+ * frames. The driver needs to then handle the 802.11 encapsulation inside the
+ * hardware or firmware.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @enable: indicate if the feature should be turned on or off
+ */
+bool ieee80211_set_hw_80211_encap(struct ieee80211_vif *vif, bool enable);
+
+/**
+ * ieee80211_get_fils_discovery_tmpl - Get FILS discovery template.
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: FILS discovery template. %NULL on error.
+ */
+struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_get_unsol_bcast_probe_resp_tmpl - Get unsolicited broadcast
+ * probe response template.
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: Unsolicited broadcast probe response template. %NULL on error.
+ */
+struct sk_buff *
+ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
+ * ieeee80211_obss_color_collision_notify - notify userland about a BSS color
+ * collision.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @color_bitmap: a 64 bit bitmap representing the colors that the local BSS is
+ * aware of.
+ * @gfp: allocation flags
+ */
+void
+ieeee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
+ u64 color_bitmap, gfp_t gfp);
+
+/**
+ * ieee80211_is_tx_data - check if frame is a data frame
+ *
+ * The function is used to check if a frame is a data frame. Frames with
+ * hardware encapsulation enabled are data frames.
+ *
+ * @skb: the frame to be transmitted.
+ */
+static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *) skb->data;
+
+ return info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP ||
+ ieee80211_is_data(hdr->frame_control);
+}
+
+/**
+ * ieee80211_set_active_links - set active links in client mode
+ * @vif: interface to set active links on
+ * @active_links: the new active links bitmap
+ *
+ * This changes the active links on an interface. The interface
+ * must be in client mode (in AP mode, all links are always active),
+ * and @active_links must be a subset of the vif's valid_links.
+ *
+ * If a link is switched off and another is switched on at the same
+ * time (e.g. active_links going from 0x1 to 0x10) then you will get
+ * a sequence of calls like
+ * - change_vif_links(0x11)
+ * - unassign_vif_chanctx(link_id=0)
+ * - change_sta_links(0x11) for each affected STA (the AP)
+ * (TDLS connections on now inactive links should be torn down)
+ * - remove group keys on the old link (link_id 0)
+ * - add new group keys (GTK/IGTK/BIGTK) on the new link (link_id 4)
+ * - change_sta_links(0x10) for each affected STA (the AP)
+ * - assign_vif_chanctx(link_id=4)
+ * - change_vif_links(0x10)
+ *
+ * Note: This function acquires some mac80211 locks and must not
+ * be called with any driver locks held that could cause a
+ * lock dependency inversion. Best call it without locks.
+ */
+int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links);
+
+/**
+ * ieee80211_set_active_links_async - asynchronously set active links
+ * @vif: interface to set active links on
+ * @active_links: the new active links bitmap
+ *
+ * See ieee80211_set_active_links() for more information, the only
+ * difference here is that the link change is triggered async and
+ * can be called in any context, but the link switch will only be
+ * completed after it returns.
+ */
+void ieee80211_set_active_links_async(struct ieee80211_vif *vif,
+ u16 active_links);
+
+#endif /* MAC80211_H */
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
new file mode 100644
index 000000000..bdac0ddbd
--- /dev/null
+++ b/include/net/mac802154.h
@@ -0,0 +1,520 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE802.15.4-2003 specification
+ *
+ * Copyright (C) 2007-2012 Siemens AG
+ */
+#ifndef NET_MAC802154_H
+#define NET_MAC802154_H
+
+#include <asm/unaligned.h>
+#include <net/af_ieee802154.h>
+#include <linux/ieee802154.h>
+#include <linux/skbuff.h>
+
+#include <net/cfg802154.h>
+
+/**
+ * enum ieee802154_hw_addr_filt_flags - hardware address filtering flags
+ *
+ * The following flags are used to indicate changed address settings from
+ * the stack to the hardware.
+ *
+ * @IEEE802154_AFILT_SADDR_CHANGED: Indicates that the short address will be
+ * change.
+ *
+ * @IEEE802154_AFILT_IEEEADDR_CHANGED: Indicates that the extended address
+ * will be change.
+ *
+ * @IEEE802154_AFILT_PANID_CHANGED: Indicates that the pan id will be change.
+ *
+ * @IEEE802154_AFILT_PANC_CHANGED: Indicates that the address filter will
+ * do frame address filtering as a pan coordinator.
+ */
+enum ieee802154_hw_addr_filt_flags {
+ IEEE802154_AFILT_SADDR_CHANGED = BIT(0),
+ IEEE802154_AFILT_IEEEADDR_CHANGED = BIT(1),
+ IEEE802154_AFILT_PANID_CHANGED = BIT(2),
+ IEEE802154_AFILT_PANC_CHANGED = BIT(3),
+};
+
+/**
+ * struct ieee802154_hw_addr_filt - hardware address filtering settings
+ *
+ * @pan_id: pan_id which should be set to the hardware address filter.
+ *
+ * @short_addr: short_addr which should be set to the hardware address filter.
+ *
+ * @ieee_addr: extended address which should be set to the hardware address
+ * filter.
+ *
+ * @pan_coord: boolean if hardware filtering should be operate as coordinator.
+ */
+struct ieee802154_hw_addr_filt {
+ __le16 pan_id;
+ __le16 short_addr;
+ __le64 ieee_addr;
+ bool pan_coord;
+};
+
+/**
+ * struct ieee802154_hw - ieee802154 hardware
+ *
+ * @extra_tx_headroom: headroom to reserve in each transmit skb for use by the
+ * driver (e.g. for transmit headers.)
+ *
+ * @flags: hardware flags, see &enum ieee802154_hw_flags
+ *
+ * @parent: parent device of the hardware.
+ *
+ * @priv: pointer to private area that was allocated for driver use along with
+ * this structure.
+ *
+ * @phy: This points to the &struct wpan_phy allocated for this 802.15.4 PHY.
+ */
+struct ieee802154_hw {
+ /* filled by the driver */
+ int extra_tx_headroom;
+ u32 flags;
+ struct device *parent;
+ void *priv;
+
+ /* filled by mac802154 core */
+ struct wpan_phy *phy;
+};
+
+/**
+ * enum ieee802154_hw_flags - hardware flags
+ *
+ * These flags are used to indicate hardware capabilities to
+ * the stack. Generally, flags here should have their meaning
+ * done in a way that the simplest hardware doesn't need setting
+ * any particular flags. There are some exceptions to this rule,
+ * however, so you are advised to review these flags carefully.
+ *
+ * @IEEE802154_HW_TX_OMIT_CKSUM: Indicates that xmitter will add FCS on it's
+ * own.
+ *
+ * @IEEE802154_HW_LBT: Indicates that transceiver will support listen before
+ * transmit.
+ *
+ * @IEEE802154_HW_CSMA_PARAMS: Indicates that transceiver will support csma
+ * parameters (max_be, min_be, backoff exponents).
+ *
+ * @IEEE802154_HW_FRAME_RETRIES: Indicates that transceiver will support ARET
+ * frame retries setting.
+ *
+ * @IEEE802154_HW_AFILT: Indicates that transceiver will support hardware
+ * address filter setting.
+ *
+ * @IEEE802154_HW_PROMISCUOUS: Indicates that transceiver will support
+ * promiscuous mode setting.
+ *
+ * @IEEE802154_HW_RX_OMIT_CKSUM: Indicates that receiver omits FCS.
+ *
+ * @IEEE802154_HW_RX_DROP_BAD_CKSUM: Indicates that receiver will not filter
+ * frames with bad checksum.
+ */
+enum ieee802154_hw_flags {
+ IEEE802154_HW_TX_OMIT_CKSUM = BIT(0),
+ IEEE802154_HW_LBT = BIT(1),
+ IEEE802154_HW_CSMA_PARAMS = BIT(2),
+ IEEE802154_HW_FRAME_RETRIES = BIT(3),
+ IEEE802154_HW_AFILT = BIT(4),
+ IEEE802154_HW_PROMISCUOUS = BIT(5),
+ IEEE802154_HW_RX_OMIT_CKSUM = BIT(6),
+ IEEE802154_HW_RX_DROP_BAD_CKSUM = BIT(7),
+};
+
+/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
+#define IEEE802154_HW_OMIT_CKSUM (IEEE802154_HW_TX_OMIT_CKSUM | \
+ IEEE802154_HW_RX_OMIT_CKSUM)
+
+/* struct ieee802154_ops - callbacks from mac802154 to the driver
+ *
+ * This structure contains various callbacks that the driver may
+ * handle or, in some cases, must handle, for example to transmit
+ * a frame.
+ *
+ * start: Handler that 802.15.4 module calls for device initialization.
+ * This function is called before the first interface is attached.
+ *
+ * stop: Handler that 802.15.4 module calls for device cleanup.
+ * This function is called after the last interface is removed.
+ *
+ * xmit_sync:
+ * Handler that 802.15.4 module calls for each transmitted frame.
+ * skb cntains the buffer starting from the IEEE 802.15.4 header.
+ * The low-level driver should send the frame based on available
+ * configuration. This is called by a workqueue and useful for
+ * synchronous 802.15.4 drivers.
+ * This function should return zero or negative errno.
+ *
+ * WARNING:
+ * This will be deprecated soon. We don't accept synced xmit callbacks
+ * drivers anymore.
+ *
+ * xmit_async:
+ * Handler that 802.15.4 module calls for each transmitted frame.
+ * skb cntains the buffer starting from the IEEE 802.15.4 header.
+ * The low-level driver should send the frame based on available
+ * configuration.
+ * This function should return zero or negative errno.
+ *
+ * ed: Handler that 802.15.4 module calls for Energy Detection.
+ * This function should place the value for detected energy
+ * (usually device-dependant) in the level pointer and return
+ * either zero or negative errno. Called with pib_lock held.
+ *
+ * set_channel:
+ * Set radio for listening on specific channel.
+ * Set the device for listening on specified channel.
+ * Returns either zero, or negative errno. Called with pib_lock held.
+ *
+ * set_hw_addr_filt:
+ * Set radio for listening on specific address.
+ * Set the device for listening on specified address.
+ * Returns either zero, or negative errno.
+ *
+ * set_txpower:
+ * Set radio transmit power in mBm. Called with pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_lbt
+ * Enables or disables listen before talk on the device. Called with
+ * pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_cca_mode
+ * Sets the CCA mode used by the device. Called with pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_cca_ed_level
+ * Sets the CCA energy detection threshold in mBm. Called with pib_lock
+ * held.
+ * Returns either zero, or negative errno.
+ *
+ * set_csma_params
+ * Sets the CSMA parameter set for the PHY. Called with pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_frame_retries
+ * Sets the retransmission attempt limit. Called with pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_promiscuous_mode
+ * Enables or disable promiscuous mode.
+ */
+struct ieee802154_ops {
+ struct module *owner;
+ int (*start)(struct ieee802154_hw *hw);
+ void (*stop)(struct ieee802154_hw *hw);
+ int (*xmit_sync)(struct ieee802154_hw *hw,
+ struct sk_buff *skb);
+ int (*xmit_async)(struct ieee802154_hw *hw,
+ struct sk_buff *skb);
+ int (*ed)(struct ieee802154_hw *hw, u8 *level);
+ int (*set_channel)(struct ieee802154_hw *hw, u8 page,
+ u8 channel);
+ int (*set_hw_addr_filt)(struct ieee802154_hw *hw,
+ struct ieee802154_hw_addr_filt *filt,
+ unsigned long changed);
+ int (*set_txpower)(struct ieee802154_hw *hw, s32 mbm);
+ int (*set_lbt)(struct ieee802154_hw *hw, bool on);
+ int (*set_cca_mode)(struct ieee802154_hw *hw,
+ const struct wpan_phy_cca *cca);
+ int (*set_cca_ed_level)(struct ieee802154_hw *hw, s32 mbm);
+ int (*set_csma_params)(struct ieee802154_hw *hw,
+ u8 min_be, u8 max_be, u8 retries);
+ int (*set_frame_retries)(struct ieee802154_hw *hw,
+ s8 retries);
+ int (*set_promiscuous_mode)(struct ieee802154_hw *hw,
+ const bool on);
+};
+
+/**
+ * ieee802154_get_fc_from_skb - get the frame control field from an skb
+ * @skb: skb where the frame control field will be get from
+ */
+static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb)
+{
+ __le16 fc;
+
+ /* check if we can fc at skb_mac_header of sk buffer */
+ if (WARN_ON(!skb_mac_header_was_set(skb) ||
+ (skb_tail_pointer(skb) -
+ skb_mac_header(skb)) < IEEE802154_FC_LEN))
+ return cpu_to_le16(0);
+
+ memcpy(&fc, skb_mac_header(skb), IEEE802154_FC_LEN);
+ return fc;
+}
+
+/**
+ * ieee802154_skb_dst_pan - get the pointer to destination pan field
+ * @fc: mac header frame control field
+ * @skb: skb where the destination pan pointer will be get from
+ */
+static inline unsigned char *ieee802154_skb_dst_pan(__le16 fc,
+ const struct sk_buff *skb)
+{
+ unsigned char *dst_pan;
+
+ switch (ieee802154_daddr_mode(fc)) {
+ case cpu_to_le16(IEEE802154_FCTL_ADDR_NONE):
+ dst_pan = NULL;
+ break;
+ case cpu_to_le16(IEEE802154_FCTL_DADDR_SHORT):
+ case cpu_to_le16(IEEE802154_FCTL_DADDR_EXTENDED):
+ dst_pan = skb_mac_header(skb) +
+ IEEE802154_FC_LEN +
+ IEEE802154_SEQ_LEN;
+ break;
+ default:
+ WARN_ONCE(1, "invalid addr mode detected");
+ dst_pan = NULL;
+ break;
+ }
+
+ return dst_pan;
+}
+
+/**
+ * ieee802154_skb_src_pan - get the pointer to source pan field
+ * @fc: mac header frame control field
+ * @skb: skb where the source pan pointer will be get from
+ */
+static inline unsigned char *ieee802154_skb_src_pan(__le16 fc,
+ const struct sk_buff *skb)
+{
+ unsigned char *src_pan;
+
+ switch (ieee802154_saddr_mode(fc)) {
+ case cpu_to_le16(IEEE802154_FCTL_ADDR_NONE):
+ src_pan = NULL;
+ break;
+ case cpu_to_le16(IEEE802154_FCTL_SADDR_SHORT):
+ case cpu_to_le16(IEEE802154_FCTL_SADDR_EXTENDED):
+ /* if intra-pan and source addr mode is non none,
+ * then source pan id is equal destination pan id.
+ */
+ if (ieee802154_is_intra_pan(fc)) {
+ src_pan = ieee802154_skb_dst_pan(fc, skb);
+ break;
+ }
+
+ switch (ieee802154_daddr_mode(fc)) {
+ case cpu_to_le16(IEEE802154_FCTL_ADDR_NONE):
+ src_pan = skb_mac_header(skb) +
+ IEEE802154_FC_LEN +
+ IEEE802154_SEQ_LEN;
+ break;
+ case cpu_to_le16(IEEE802154_FCTL_DADDR_SHORT):
+ src_pan = skb_mac_header(skb) +
+ IEEE802154_FC_LEN +
+ IEEE802154_SEQ_LEN +
+ IEEE802154_PAN_ID_LEN +
+ IEEE802154_SHORT_ADDR_LEN;
+ break;
+ case cpu_to_le16(IEEE802154_FCTL_DADDR_EXTENDED):
+ src_pan = skb_mac_header(skb) +
+ IEEE802154_FC_LEN +
+ IEEE802154_SEQ_LEN +
+ IEEE802154_PAN_ID_LEN +
+ IEEE802154_EXTENDED_ADDR_LEN;
+ break;
+ default:
+ WARN_ONCE(1, "invalid addr mode detected");
+ src_pan = NULL;
+ break;
+ }
+ break;
+ default:
+ WARN_ONCE(1, "invalid addr mode detected");
+ src_pan = NULL;
+ break;
+ }
+
+ return src_pan;
+}
+
+/**
+ * ieee802154_skb_is_intra_pan_addressing - checks whenever the mac addressing
+ * is an intra pan communication
+ * @fc: mac header frame control field
+ * @skb: skb where the source and destination pan should be get from
+ */
+static inline bool ieee802154_skb_is_intra_pan_addressing(__le16 fc,
+ const struct sk_buff *skb)
+{
+ unsigned char *dst_pan = ieee802154_skb_dst_pan(fc, skb),
+ *src_pan = ieee802154_skb_src_pan(fc, skb);
+
+ /* if one is NULL is no intra pan addressing */
+ if (!dst_pan || !src_pan)
+ return false;
+
+ return !memcmp(dst_pan, src_pan, IEEE802154_PAN_ID_LEN);
+}
+
+/**
+ * ieee802154_be64_to_le64 - copies and convert be64 to le64
+ * @le64_dst: le64 destination pointer
+ * @be64_src: be64 source pointer
+ */
+static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
+{
+ put_unaligned_le64(get_unaligned_be64(be64_src), le64_dst);
+}
+
+/**
+ * ieee802154_le64_to_be64 - copies and convert le64 to be64
+ * @be64_dst: be64 destination pointer
+ * @le64_src: le64 source pointer
+ */
+static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
+{
+ put_unaligned_be64(get_unaligned_le64(le64_src), be64_dst);
+}
+
+/**
+ * ieee802154_le16_to_be16 - copies and convert le16 to be16
+ * @be16_dst: be16 destination pointer
+ * @le16_src: le16 source pointer
+ */
+static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src)
+{
+ put_unaligned_be16(get_unaligned_le16(le16_src), be16_dst);
+}
+
+/**
+ * ieee802154_be16_to_le16 - copies and convert be16 to le16
+ * @le16_dst: le16 destination pointer
+ * @be16_src: be16 source pointer
+ */
+static inline void ieee802154_be16_to_le16(void *le16_dst, const void *be16_src)
+{
+ put_unaligned_le16(get_unaligned_be16(be16_src), le16_dst);
+}
+
+/**
+ * ieee802154_alloc_hw - Allocate a new hardware device
+ *
+ * This must be called once for each hardware device. The returned pointer
+ * must be used to refer to this device when calling other functions.
+ * mac802154 allocates a private data area for the driver pointed to by
+ * @priv in &struct ieee802154_hw, the size of this area is given as
+ * @priv_data_len.
+ *
+ * @priv_data_len: length of private data
+ * @ops: callbacks for this device
+ *
+ * Return: A pointer to the new hardware device, or %NULL on error.
+ */
+struct ieee802154_hw *
+ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops);
+
+/**
+ * ieee802154_free_hw - free hardware descriptor
+ *
+ * This function frees everything that was allocated, including the
+ * private data for the driver. You must call ieee802154_unregister_hw()
+ * before calling this function.
+ *
+ * @hw: the hardware to free
+ */
+void ieee802154_free_hw(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_register_hw - Register hardware device
+ *
+ * You must call this function before any other functions in
+ * mac802154. Note that before a hardware can be registered, you
+ * need to fill the contained wpan_phy's information.
+ *
+ * @hw: the device to register as returned by ieee802154_alloc_hw()
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+int ieee802154_register_hw(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_unregister_hw - Unregister a hardware device
+ *
+ * This function instructs mac802154 to free allocated resources
+ * and unregister netdevices from the networking subsystem.
+ *
+ * @hw: the hardware to unregister
+ */
+void ieee802154_unregister_hw(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_rx_irqsafe - receive frame
+ *
+ * Like ieee802154_rx() but can be called in IRQ context
+ * (internally defers to a tasklet.)
+ *
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac802154 after this call
+ * @lqi: link quality indicator
+ */
+void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
+ u8 lqi);
+/**
+ * ieee802154_wake_queue - wake ieee802154 queue
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ *
+ * Tranceivers usually have either one transmit framebuffer or one framebuffer
+ * for both transmitting and receiving. Hence, the core currently only handles
+ * one frame at a time for each phy, which means we had to stop the queue to
+ * avoid new skb to come during the transmission. The queue then needs to be
+ * woken up after the operation.
+ *
+ * Drivers should use this function instead of netif_wake_queue.
+ */
+void ieee802154_wake_queue(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_stop_queue - stop ieee802154 queue
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ *
+ * Tranceivers usually have either one transmit framebuffer or one framebuffer
+ * for both transmitting and receiving. Hence, the core currently only handles
+ * one frame at a time for each phy, which means we need to tell upper layers to
+ * stop giving us new skbs while we are busy with the transmitted one. The queue
+ * must then be stopped before transmitting.
+ *
+ * Drivers should use this function instead of netif_stop_queue.
+ */
+void ieee802154_stop_queue(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_xmit_complete - frame transmission complete
+ *
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ * @skb: buffer for transmission
+ * @ifs_handling: indicate interframe space handling
+ */
+void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
+ bool ifs_handling);
+
+/**
+ * ieee802154_xmit_error - offloaded frame transmission failed
+ *
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ * @skb: buffer for transmission
+ * @reason: error code
+ */
+void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb,
+ int reason);
+
+/**
+ * ieee802154_xmit_hw_error - frame could not be offloaded to the transmitter
+ * because of a hardware error (bus error, timeout, etc)
+ *
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ * @skb: buffer for transmission
+ */
+void ieee802154_xmit_hw_error(struct ieee802154_hw *hw, struct sk_buff *skb);
+
+#endif /* NET_MAC802154_H */
diff --git a/include/net/macsec.h b/include/net/macsec.h
new file mode 100644
index 000000000..65c93959c
--- /dev/null
+++ b/include/net/macsec.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * MACsec netdev header, used for h/w accelerated implementations.
+ *
+ * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
+ */
+#ifndef _NET_MACSEC_H_
+#define _NET_MACSEC_H_
+
+#include <linux/u64_stats_sync.h>
+#include <uapi/linux/if_link.h>
+#include <uapi/linux/if_macsec.h>
+
+#define MACSEC_DEFAULT_PN_LEN 4
+#define MACSEC_XPN_PN_LEN 8
+
+#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
+
+#define MACSEC_SCI_LEN 8
+#define MACSEC_PORT_ES (htons(0x0001))
+
+#define MACSEC_TCI_VERSION 0x80
+#define MACSEC_TCI_ES 0x40 /* end station */
+#define MACSEC_TCI_SC 0x20 /* SCI present */
+#define MACSEC_TCI_SCB 0x10 /* epon */
+#define MACSEC_TCI_E 0x08 /* encryption */
+#define MACSEC_TCI_C 0x04 /* changed text */
+#define MACSEC_AN_MASK 0x03 /* association number */
+#define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
+
+#define MACSEC_DEFAULT_ICV_LEN 16
+
+typedef u64 __bitwise sci_t;
+typedef u32 __bitwise ssci_t;
+
+struct metadata_dst;
+
+typedef union salt {
+ struct {
+ u32 ssci;
+ u64 pn;
+ } __packed;
+ u8 bytes[MACSEC_SALT_LEN];
+} __packed salt_t;
+
+typedef union pn {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 lower;
+ u32 upper;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 upper;
+ u32 lower;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ };
+ u64 full64;
+} pn_t;
+
+/**
+ * struct macsec_key - SA key
+ * @id: user-provided key identifier
+ * @tfm: crypto struct, key storage
+ * @salt: salt used to generate IV in XPN cipher suites
+ */
+struct macsec_key {
+ u8 id[MACSEC_KEYID_LEN];
+ struct crypto_aead *tfm;
+ salt_t salt;
+};
+
+struct macsec_rx_sc_stats {
+ __u64 InOctetsValidated;
+ __u64 InOctetsDecrypted;
+ __u64 InPktsUnchecked;
+ __u64 InPktsDelayed;
+ __u64 InPktsOK;
+ __u64 InPktsInvalid;
+ __u64 InPktsLate;
+ __u64 InPktsNotValid;
+ __u64 InPktsNotUsingSA;
+ __u64 InPktsUnusedSA;
+};
+
+struct macsec_rx_sa_stats {
+ __u32 InPktsOK;
+ __u32 InPktsInvalid;
+ __u32 InPktsNotValid;
+ __u32 InPktsNotUsingSA;
+ __u32 InPktsUnusedSA;
+};
+
+struct macsec_tx_sa_stats {
+ __u32 OutPktsProtected;
+ __u32 OutPktsEncrypted;
+};
+
+struct macsec_tx_sc_stats {
+ __u64 OutPktsProtected;
+ __u64 OutPktsEncrypted;
+ __u64 OutOctetsProtected;
+ __u64 OutOctetsEncrypted;
+};
+
+struct macsec_dev_stats {
+ __u64 OutPktsUntagged;
+ __u64 InPktsUntagged;
+ __u64 OutPktsTooLong;
+ __u64 InPktsNoTag;
+ __u64 InPktsBadTag;
+ __u64 InPktsUnknownSCI;
+ __u64 InPktsNoSCI;
+ __u64 InPktsOverrun;
+};
+
+/**
+ * struct macsec_rx_sa - receive secure association
+ * @active:
+ * @next_pn: packet number expected for the next packet
+ * @lock: protects next_pn manipulations
+ * @key: key structure
+ * @ssci: short secure channel identifier
+ * @stats: per-SA stats
+ */
+struct macsec_rx_sa {
+ struct macsec_key key;
+ ssci_t ssci;
+ spinlock_t lock;
+ union {
+ pn_t next_pn_halves;
+ u64 next_pn;
+ };
+ refcount_t refcnt;
+ bool active;
+ struct macsec_rx_sa_stats __percpu *stats;
+ struct macsec_rx_sc *sc;
+ struct rcu_head rcu;
+};
+
+struct pcpu_rx_sc_stats {
+ struct macsec_rx_sc_stats stats;
+ struct u64_stats_sync syncp;
+};
+
+struct pcpu_tx_sc_stats {
+ struct macsec_tx_sc_stats stats;
+ struct u64_stats_sync syncp;
+};
+
+/**
+ * struct macsec_rx_sc - receive secure channel
+ * @sci: secure channel identifier for this SC
+ * @active: channel is active
+ * @sa: array of secure associations
+ * @stats: per-SC stats
+ */
+struct macsec_rx_sc {
+ struct macsec_rx_sc __rcu *next;
+ sci_t sci;
+ bool active;
+ struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
+ struct pcpu_rx_sc_stats __percpu *stats;
+ refcount_t refcnt;
+ struct rcu_head rcu_head;
+};
+
+/**
+ * struct macsec_tx_sa - transmit secure association
+ * @active:
+ * @next_pn: packet number to use for the next packet
+ * @lock: protects next_pn manipulations
+ * @key: key structure
+ * @ssci: short secure channel identifier
+ * @stats: per-SA stats
+ */
+struct macsec_tx_sa {
+ struct macsec_key key;
+ ssci_t ssci;
+ spinlock_t lock;
+ union {
+ pn_t next_pn_halves;
+ u64 next_pn;
+ };
+ refcount_t refcnt;
+ bool active;
+ struct macsec_tx_sa_stats __percpu *stats;
+ struct rcu_head rcu;
+};
+
+/**
+ * struct macsec_tx_sc - transmit secure channel
+ * @active:
+ * @encoding_sa: association number of the SA currently in use
+ * @encrypt: encrypt packets on transmit, or authenticate only
+ * @send_sci: always include the SCI in the SecTAG
+ * @end_station:
+ * @scb: single copy broadcast flag
+ * @sa: array of secure associations
+ * @stats: stats for this TXSC
+ * @md_dst: MACsec offload metadata dst
+ */
+struct macsec_tx_sc {
+ bool active;
+ u8 encoding_sa;
+ bool encrypt;
+ bool send_sci;
+ bool end_station;
+ bool scb;
+ struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
+ struct pcpu_tx_sc_stats __percpu *stats;
+ struct metadata_dst *md_dst;
+};
+
+/**
+ * struct macsec_secy - MACsec Security Entity
+ * @netdev: netdevice for this SecY
+ * @n_rx_sc: number of receive secure channels configured on this SecY
+ * @sci: secure channel identifier used for tx
+ * @key_len: length of keys used by the cipher suite
+ * @icv_len: length of ICV used by the cipher suite
+ * @validate_frames: validation mode
+ * @xpn: enable XPN for this SecY
+ * @operational: MAC_Operational flag
+ * @protect_frames: enable protection for this SecY
+ * @replay_protect: enable packet number checks on receive
+ * @replay_window: size of the replay window
+ * @tx_sc: transmit secure channel
+ * @rx_sc: linked list of receive secure channels
+ */
+struct macsec_secy {
+ struct net_device *netdev;
+ unsigned int n_rx_sc;
+ sci_t sci;
+ u16 key_len;
+ u16 icv_len;
+ enum macsec_validation_type validate_frames;
+ bool xpn;
+ bool operational;
+ bool protect_frames;
+ bool replay_protect;
+ u32 replay_window;
+ struct macsec_tx_sc tx_sc;
+ struct macsec_rx_sc __rcu *rx_sc;
+};
+
+/**
+ * struct macsec_context - MACsec context for hardware offloading
+ */
+struct macsec_context {
+ union {
+ struct net_device *netdev;
+ struct phy_device *phydev;
+ };
+ enum macsec_offload offload;
+
+ struct macsec_secy *secy;
+ struct macsec_rx_sc *rx_sc;
+ struct {
+ bool update_pn;
+ unsigned char assoc_num;
+ u8 key[MACSEC_MAX_KEY_LEN];
+ union {
+ struct macsec_rx_sa *rx_sa;
+ struct macsec_tx_sa *tx_sa;
+ };
+ } sa;
+ union {
+ struct macsec_tx_sc_stats *tx_sc_stats;
+ struct macsec_tx_sa_stats *tx_sa_stats;
+ struct macsec_rx_sc_stats *rx_sc_stats;
+ struct macsec_rx_sa_stats *rx_sa_stats;
+ struct macsec_dev_stats *dev_stats;
+ } stats;
+};
+
+/**
+ * struct macsec_ops - MACsec offloading operations
+ */
+struct macsec_ops {
+ /* Device wide */
+ int (*mdo_dev_open)(struct macsec_context *ctx);
+ int (*mdo_dev_stop)(struct macsec_context *ctx);
+ /* SecY */
+ int (*mdo_add_secy)(struct macsec_context *ctx);
+ int (*mdo_upd_secy)(struct macsec_context *ctx);
+ int (*mdo_del_secy)(struct macsec_context *ctx);
+ /* Security channels */
+ int (*mdo_add_rxsc)(struct macsec_context *ctx);
+ int (*mdo_upd_rxsc)(struct macsec_context *ctx);
+ int (*mdo_del_rxsc)(struct macsec_context *ctx);
+ /* Security associations */
+ int (*mdo_add_rxsa)(struct macsec_context *ctx);
+ int (*mdo_upd_rxsa)(struct macsec_context *ctx);
+ int (*mdo_del_rxsa)(struct macsec_context *ctx);
+ int (*mdo_add_txsa)(struct macsec_context *ctx);
+ int (*mdo_upd_txsa)(struct macsec_context *ctx);
+ int (*mdo_del_txsa)(struct macsec_context *ctx);
+ /* Statistics */
+ int (*mdo_get_dev_stats)(struct macsec_context *ctx);
+ int (*mdo_get_tx_sc_stats)(struct macsec_context *ctx);
+ int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
+ int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
+ int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
+};
+
+void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
+static inline bool macsec_send_sci(const struct macsec_secy *secy)
+{
+ const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+
+ return tx_sc->send_sci ||
+ (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
+}
+
+#endif /* _NET_MACSEC_H_ */
diff --git a/include/net/mctp.h b/include/net/mctp.h
new file mode 100644
index 000000000..82800d521
--- /dev/null
+++ b/include/net/mctp.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Management Component Transport Protocol (MCTP)
+ *
+ * Copyright (c) 2021 Code Construct
+ * Copyright (c) 2021 Google
+ */
+
+#ifndef __NET_MCTP_H
+#define __NET_MCTP_H
+
+#include <linux/bits.h>
+#include <linux/mctp.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+
+/* MCTP packet definitions */
+struct mctp_hdr {
+ u8 ver;
+ u8 dest;
+ u8 src;
+ u8 flags_seq_tag;
+};
+
+#define MCTP_VER_MIN 1
+#define MCTP_VER_MAX 1
+
+/* Definitions for flags_seq_tag field */
+#define MCTP_HDR_FLAG_SOM BIT(7)
+#define MCTP_HDR_FLAG_EOM BIT(6)
+#define MCTP_HDR_FLAG_TO BIT(3)
+#define MCTP_HDR_FLAGS GENMASK(5, 3)
+#define MCTP_HDR_SEQ_SHIFT 4
+#define MCTP_HDR_SEQ_MASK GENMASK(1, 0)
+#define MCTP_HDR_TAG_SHIFT 0
+#define MCTP_HDR_TAG_MASK GENMASK(2, 0)
+
+#define MCTP_INITIAL_DEFAULT_NET 1
+
+static inline bool mctp_address_unicast(mctp_eid_t eid)
+{
+ return eid >= 8 && eid < 255;
+}
+
+static inline bool mctp_address_broadcast(mctp_eid_t eid)
+{
+ return eid == 255;
+}
+
+static inline bool mctp_address_null(mctp_eid_t eid)
+{
+ return eid == 0;
+}
+
+static inline bool mctp_address_matches(mctp_eid_t match, mctp_eid_t eid)
+{
+ return match == eid || match == MCTP_ADDR_ANY;
+}
+
+static inline struct mctp_hdr *mctp_hdr(struct sk_buff *skb)
+{
+ return (struct mctp_hdr *)skb_network_header(skb);
+}
+
+/* socket implementation */
+struct mctp_sock {
+ struct sock sk;
+
+ /* bind() params */
+ unsigned int bind_net;
+ mctp_eid_t bind_addr;
+ __u8 bind_type;
+
+ /* sendmsg()/recvmsg() uses struct sockaddr_mctp_ext */
+ bool addr_ext;
+
+ /* list of mctp_sk_key, for incoming tag lookup. updates protected
+ * by sk->net->keys_lock
+ */
+ struct hlist_head keys;
+
+ /* mechanism for expiring allocated keys; will release an allocated
+ * tag, and any netdev state for a request/response pairing
+ */
+ struct timer_list key_expiry;
+};
+
+/* Key for matching incoming packets to sockets or reassembly contexts.
+ * Packets are matched on (src,dest,tag).
+ *
+ * Lifetime / locking requirements:
+ *
+ * - individual key data (ie, the struct itself) is protected by key->lock;
+ * changes must be made with that lock held.
+ *
+ * - the lookup fields: peer_addr, local_addr and tag are set before the
+ * key is added to lookup lists, and never updated.
+ *
+ * - A ref to the key must be held (throuh key->refs) if a pointer to the
+ * key is to be accessed after key->lock is released.
+ *
+ * - a mctp_sk_key contains a reference to a struct sock; this is valid
+ * for the life of the key. On sock destruction (through unhash), the key is
+ * removed from lists (see below), and marked invalid.
+ *
+ * - these mctp_sk_keys appear on two lists:
+ * 1) the struct mctp_sock->keys list
+ * 2) the struct netns_mctp->keys list
+ *
+ * presences on these lists requires a (single) refcount to be held; both
+ * lists are updated as a single operation.
+ *
+ * Updates and lookups in either list are performed under the
+ * netns_mctp->keys lock. Lookup functions will need to lock the key and
+ * take a reference before unlocking the keys_lock. Consequently, the list's
+ * keys_lock *cannot* be acquired with the individual key->lock held.
+ *
+ * - a key may have a sk_buff attached as part of an in-progress message
+ * reassembly (->reasm_head). The reasm data is protected by the individual
+ * key->lock.
+ *
+ * - there are two destruction paths for a mctp_sk_key:
+ *
+ * - through socket unhash (see mctp_sk_unhash). This performs the list
+ * removal under keys_lock.
+ *
+ * - where a key is established to receive a reply message: after receiving
+ * the (complete) reply, or during reassembly errors. Here, we clean up
+ * the reassembly context (marking reasm_dead, to prevent another from
+ * starting), and remove the socket from the netns & socket lists.
+ *
+ * - through an expiry timeout, on a per-socket timer
+ */
+struct mctp_sk_key {
+ mctp_eid_t peer_addr;
+ mctp_eid_t local_addr; /* MCTP_ADDR_ANY for local owned tags */
+ __u8 tag; /* incoming tag match; invert TO for local */
+
+ /* we hold a ref to sk when set */
+ struct sock *sk;
+
+ /* routing lookup list */
+ struct hlist_node hlist;
+
+ /* per-socket list */
+ struct hlist_node sklist;
+
+ /* lock protects against concurrent updates to the reassembly and
+ * expiry data below.
+ */
+ spinlock_t lock;
+
+ /* Keys are referenced during the output path, which may sleep */
+ refcount_t refs;
+
+ /* incoming fragment reassembly context */
+ struct sk_buff *reasm_head;
+ struct sk_buff **reasm_tailp;
+ bool reasm_dead;
+ u8 last_seq;
+
+ /* key validity */
+ bool valid;
+
+ /* expiry timeout; valid (above) cleared on expiry */
+ unsigned long expiry;
+
+ /* free to use for device flow state tracking. Initialised to
+ * zero on initial key creation
+ */
+ unsigned long dev_flow_state;
+ struct mctp_dev *dev;
+
+ /* a tag allocated with SIOCMCTPALLOCTAG ioctl will not expire
+ * automatically on timeout or response, instead SIOCMCTPDROPTAG
+ * is used.
+ */
+ bool manual_alloc;
+};
+
+struct mctp_skb_cb {
+ unsigned int magic;
+ unsigned int net;
+ int ifindex; /* extended/direct addressing if set */
+ mctp_eid_t src;
+ unsigned char halen;
+ unsigned char haddr[MAX_ADDR_LEN];
+};
+
+/* skb control-block accessors with a little extra debugging for initial
+ * development.
+ *
+ * TODO: remove checks & mctp_skb_cb->magic; replace callers of __mctp_cb
+ * with mctp_cb().
+ *
+ * __mctp_cb() is only for the initial ingress code; we should see ->magic set
+ * at all times after this.
+ */
+static inline struct mctp_skb_cb *__mctp_cb(struct sk_buff *skb)
+{
+ struct mctp_skb_cb *cb = (void *)skb->cb;
+
+ cb->magic = 0x4d435450;
+ return cb;
+}
+
+static inline struct mctp_skb_cb *mctp_cb(struct sk_buff *skb)
+{
+ struct mctp_skb_cb *cb = (void *)skb->cb;
+
+ BUILD_BUG_ON(sizeof(struct mctp_skb_cb) > sizeof(skb->cb));
+ WARN_ON(cb->magic != 0x4d435450);
+ return (void *)(skb->cb);
+}
+
+/* If CONFIG_MCTP_FLOWS, we may add one of these as a SKB extension,
+ * indicating the flow to the device driver.
+ */
+struct mctp_flow {
+ struct mctp_sk_key *key;
+};
+
+/* Route definition.
+ *
+ * These are held in the pernet->mctp.routes list, with RCU protection for
+ * removed routes. We hold a reference to the netdev; routes need to be
+ * dropped on NETDEV_UNREGISTER events.
+ *
+ * Updates to the route table are performed under rtnl; all reads under RCU,
+ * so routes cannot be referenced over a RCU grace period. Specifically: A
+ * caller cannot block between mctp_route_lookup and mctp_route_release()
+ */
+struct mctp_route {
+ mctp_eid_t min, max;
+
+ struct mctp_dev *dev;
+ unsigned int mtu;
+ unsigned char type;
+ int (*output)(struct mctp_route *route,
+ struct sk_buff *skb);
+
+ struct list_head list;
+ refcount_t refs;
+ struct rcu_head rcu;
+};
+
+/* route interfaces */
+struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
+ mctp_eid_t daddr);
+
+int mctp_local_output(struct sock *sk, struct mctp_route *rt,
+ struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag);
+
+void mctp_key_unref(struct mctp_sk_key *key);
+struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
+ mctp_eid_t daddr, mctp_eid_t saddr,
+ bool manual, u8 *tagp);
+
+/* routing <--> device interface */
+unsigned int mctp_default_net(struct net *net);
+int mctp_default_net_set(struct net *net, unsigned int index);
+int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr);
+int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr);
+void mctp_route_remove_dev(struct mctp_dev *mdev);
+
+/* neighbour definitions */
+enum mctp_neigh_source {
+ MCTP_NEIGH_STATIC,
+ MCTP_NEIGH_DISCOVER,
+};
+
+struct mctp_neigh {
+ struct mctp_dev *dev;
+ mctp_eid_t eid;
+ enum mctp_neigh_source source;
+
+ unsigned char ha[MAX_ADDR_LEN];
+
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+int mctp_neigh_init(void);
+void mctp_neigh_exit(void);
+
+// ret_hwaddr may be NULL, otherwise must have space for MAX_ADDR_LEN
+int mctp_neigh_lookup(struct mctp_dev *dev, mctp_eid_t eid,
+ void *ret_hwaddr);
+void mctp_neigh_remove_dev(struct mctp_dev *mdev);
+
+int mctp_routes_init(void);
+void mctp_routes_exit(void);
+
+void mctp_device_init(void);
+void mctp_device_exit(void);
+
+#endif /* __NET_MCTP_H */
diff --git a/include/net/mctpdevice.h b/include/net/mctpdevice.h
new file mode 100644
index 000000000..5c0d04b5c
--- /dev/null
+++ b/include/net/mctpdevice.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Management Component Transport Protocol (MCTP) - device
+ * definitions.
+ *
+ * Copyright (c) 2021 Code Construct
+ * Copyright (c) 2021 Google
+ */
+
+#ifndef __NET_MCTPDEVICE_H
+#define __NET_MCTPDEVICE_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/refcount.h>
+
+struct mctp_sk_key;
+
+struct mctp_dev {
+ struct net_device *dev;
+
+ refcount_t refs;
+
+ unsigned int net;
+
+ const struct mctp_netdev_ops *ops;
+
+ /* Only modified under RTNL. Reads have addrs_lock held */
+ u8 *addrs;
+ size_t num_addrs;
+ spinlock_t addrs_lock;
+
+ struct rcu_head rcu;
+};
+
+struct mctp_netdev_ops {
+ void (*release_flow)(struct mctp_dev *dev,
+ struct mctp_sk_key *key);
+};
+
+#define MCTP_INITIAL_DEFAULT_NET 1
+
+struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev);
+struct mctp_dev *__mctp_dev_get(const struct net_device *dev);
+
+int mctp_register_netdev(struct net_device *dev,
+ const struct mctp_netdev_ops *ops);
+void mctp_unregister_netdev(struct net_device *dev);
+
+void mctp_dev_hold(struct mctp_dev *mdev);
+void mctp_dev_put(struct mctp_dev *mdev);
+
+void mctp_dev_set_key(struct mctp_dev *dev, struct mctp_sk_key *key);
+void mctp_dev_release_key(struct mctp_dev *dev, struct mctp_sk_key *key);
+
+#endif /* __NET_MCTPDEVICE_H */
diff --git a/include/net/mip6.h b/include/net/mip6.h
new file mode 100644
index 000000000..67cd7e508
--- /dev/null
+++ b/include/net/mip6.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C)2003-2006 Helsinki University of Technology
+ * Copyright (C)2003-2006 USAGI/WIDE Project
+ */
+/*
+ * Authors:
+ * Noriaki TAKAMIYA @USAGI
+ * Masahide NAKAMURA @USAGI
+ * YOSHIFUJI Hideaki @USAGI
+ */
+#ifndef _NET_MIP6_H
+#define _NET_MIP6_H
+
+#include <linux/skbuff.h>
+#include <net/sock.h>
+
+/*
+ * Mobility Header
+ */
+struct ip6_mh {
+ __u8 ip6mh_proto;
+ __u8 ip6mh_hdrlen;
+ __u8 ip6mh_type;
+ __u8 ip6mh_reserved;
+ __u16 ip6mh_cksum;
+ /* Followed by type specific messages */
+ __u8 data[];
+} __packed;
+
+#define IP6_MH_TYPE_BRR 0 /* Binding Refresh Request */
+#define IP6_MH_TYPE_HOTI 1 /* HOTI Message */
+#define IP6_MH_TYPE_COTI 2 /* COTI Message */
+#define IP6_MH_TYPE_HOT 3 /* HOT Message */
+#define IP6_MH_TYPE_COT 4 /* COT Message */
+#define IP6_MH_TYPE_BU 5 /* Binding Update */
+#define IP6_MH_TYPE_BACK 6 /* Binding ACK */
+#define IP6_MH_TYPE_BERROR 7 /* Binding Error */
+#define IP6_MH_TYPE_MAX IP6_MH_TYPE_BERROR
+
+#endif
diff --git a/include/net/mld.h b/include/net/mld.h
new file mode 100644
index 000000000..c07359808
--- /dev/null
+++ b/include/net/mld.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_MLD_H
+#define LINUX_MLD_H
+
+#include <linux/in6.h>
+#include <linux/icmpv6.h>
+
+/* MLDv1 Query/Report/Done */
+struct mld_msg {
+ struct icmp6hdr mld_hdr;
+ struct in6_addr mld_mca;
+};
+
+#define mld_type mld_hdr.icmp6_type
+#define mld_code mld_hdr.icmp6_code
+#define mld_cksum mld_hdr.icmp6_cksum
+#define mld_maxdelay mld_hdr.icmp6_maxdelay
+#define mld_reserved mld_hdr.icmp6_dataun.un_data16[1]
+
+/* Multicast Listener Discovery version 2 headers */
+/* MLDv2 Report */
+struct mld2_grec {
+ __u8 grec_type;
+ __u8 grec_auxwords;
+ __be16 grec_nsrcs;
+ struct in6_addr grec_mca;
+ struct in6_addr grec_src[];
+};
+
+struct mld2_report {
+ struct icmp6hdr mld2r_hdr;
+ struct mld2_grec mld2r_grec[];
+};
+
+#define mld2r_type mld2r_hdr.icmp6_type
+#define mld2r_resv1 mld2r_hdr.icmp6_code
+#define mld2r_cksum mld2r_hdr.icmp6_cksum
+#define mld2r_resv2 mld2r_hdr.icmp6_dataun.un_data16[0]
+#define mld2r_ngrec mld2r_hdr.icmp6_dataun.un_data16[1]
+
+/* MLDv2 Query */
+struct mld2_query {
+ struct icmp6hdr mld2q_hdr;
+ struct in6_addr mld2q_mca;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 mld2q_qrv:3,
+ mld2q_suppress:1,
+ mld2q_resv2:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 mld2q_resv2:4,
+ mld2q_suppress:1,
+ mld2q_qrv:3;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 mld2q_qqic;
+ __be16 mld2q_nsrcs;
+ struct in6_addr mld2q_srcs[];
+};
+
+#define mld2q_type mld2q_hdr.icmp6_type
+#define mld2q_code mld2q_hdr.icmp6_code
+#define mld2q_cksum mld2q_hdr.icmp6_cksum
+#define mld2q_mrc mld2q_hdr.icmp6_maxdelay
+#define mld2q_resv1 mld2q_hdr.icmp6_dataun.un_data16[1]
+
+/* RFC3810, 5.1.3. Maximum Response Code:
+ *
+ * If Maximum Response Code >= 32768, Maximum Response Code represents a
+ * floating-point value as follows:
+ *
+ * 0 1 2 3 4 5 6 7 8 9 A B C D E F
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |1| exp | mant |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+#define MLDV2_MRC_EXP(value) (((value) >> 12) & 0x0007)
+#define MLDV2_MRC_MAN(value) ((value) & 0x0fff)
+
+/* RFC3810, 5.1.9. QQIC (Querier's Query Interval Code):
+ *
+ * If QQIC >= 128, QQIC represents a floating-point value as follows:
+ *
+ * 0 1 2 3 4 5 6 7
+ * +-+-+-+-+-+-+-+-+
+ * |1| exp | mant |
+ * +-+-+-+-+-+-+-+-+
+ */
+#define MLDV2_QQIC_EXP(value) (((value) >> 4) & 0x07)
+#define MLDV2_QQIC_MAN(value) ((value) & 0x0f)
+
+#define MLD_EXP_MIN_LIMIT 32768UL
+#define MLDV1_MRD_MAX_COMPAT (MLD_EXP_MIN_LIMIT - 1)
+
+#define MLD_MAX_QUEUE 8
+#define MLD_MAX_SKBS 32
+
+static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2)
+{
+ /* RFC3810, 5.1.3. Maximum Response Code */
+ unsigned long ret, mc_mrc = ntohs(mlh2->mld2q_mrc);
+
+ if (mc_mrc < MLD_EXP_MIN_LIMIT) {
+ ret = mc_mrc;
+ } else {
+ unsigned long mc_man, mc_exp;
+
+ mc_exp = MLDV2_MRC_EXP(mc_mrc);
+ mc_man = MLDV2_MRC_MAN(mc_mrc);
+
+ ret = (mc_man | 0x1000) << (mc_exp + 3);
+ }
+
+ return ret;
+}
+
+#endif
diff --git a/include/net/mpls.h b/include/net/mpls.h
new file mode 100644
index 000000000..0bb7944e7
--- /dev/null
+++ b/include/net/mpls.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014 Nicira, Inc.
+ */
+
+#ifndef _NET_MPLS_H
+#define _NET_MPLS_H 1
+
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/mpls.h>
+
+#define MPLS_HLEN 4
+
+struct mpls_shim_hdr {
+ __be32 label_stack_entry;
+};
+
+static inline bool eth_p_mpls(__be16 eth_type)
+{
+ return eth_type == htons(ETH_P_MPLS_UC) ||
+ eth_type == htons(ETH_P_MPLS_MC);
+}
+
+static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
+{
+ return (struct mpls_shim_hdr *)skb_network_header(skb);
+}
+
+static inline struct mpls_shim_hdr mpls_entry_encode(u32 label,
+ unsigned int ttl,
+ unsigned int tc,
+ bool bos)
+{
+ struct mpls_shim_hdr result;
+
+ result.label_stack_entry =
+ cpu_to_be32((label << MPLS_LS_LABEL_SHIFT) |
+ (tc << MPLS_LS_TC_SHIFT) |
+ (bos ? (1 << MPLS_LS_S_SHIFT) : 0) |
+ (ttl << MPLS_LS_TTL_SHIFT));
+ return result;
+}
+
+#endif
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
new file mode 100644
index 000000000..0c71c2797
--- /dev/null
+++ b/include/net/mpls_iptunnel.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015 Cumulus Networks, Inc.
+ */
+
+#ifndef _NET_MPLS_IPTUNNEL_H
+#define _NET_MPLS_IPTUNNEL_H 1
+
+#include <linux/types.h>
+#include <net/lwtunnel.h>
+
+struct mpls_iptunnel_encap {
+ u8 labels;
+ u8 ttl_propagate;
+ u8 default_ttl;
+ u8 reserved1;
+ u32 label[];
+};
+
+static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
+{
+ return (struct mpls_iptunnel_encap *)lwtstate->data;
+}
+
+#endif
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
new file mode 100644
index 000000000..3c5c68618
--- /dev/null
+++ b/include/net/mptcp.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Multipath TCP
+ *
+ * Copyright (c) 2017 - 2019, Intel Corporation.
+ */
+
+#ifndef __NET_MPTCP_H
+#define __NET_MPTCP_H
+
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+
+struct mptcp_info;
+struct mptcp_sock;
+struct seq_file;
+
+/* MPTCP sk_buff extension data */
+struct mptcp_ext {
+ union {
+ u64 data_ack;
+ u32 data_ack32;
+ };
+ u64 data_seq;
+ u32 subflow_seq;
+ u16 data_len;
+ __sum16 csum;
+ u8 use_map:1,
+ dsn64:1,
+ data_fin:1,
+ use_ack:1,
+ ack64:1,
+ mpc_map:1,
+ frozen:1,
+ reset_transient:1;
+ u8 reset_reason:4,
+ csum_reqd:1,
+ infinite_map:1;
+};
+
+#define MPTCPOPT_HMAC_LEN 20
+#define MPTCP_RM_IDS_MAX 8
+
+struct mptcp_rm_list {
+ u8 ids[MPTCP_RM_IDS_MAX];
+ u8 nr;
+};
+
+struct mptcp_addr_info {
+ u8 id;
+ sa_family_t family;
+ __be16 port;
+ union {
+ struct in_addr addr;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ struct in6_addr addr6;
+#endif
+ };
+};
+
+struct mptcp_out_options {
+#if IS_ENABLED(CONFIG_MPTCP)
+ u16 suboptions;
+ struct mptcp_rm_list rm_list;
+ u8 join_id;
+ u8 backup;
+ u8 reset_reason:4,
+ reset_transient:1,
+ csum_reqd:1,
+ allow_join_id0:1;
+ union {
+ struct {
+ u64 sndr_key;
+ u64 rcvr_key;
+ u64 data_seq;
+ u32 subflow_seq;
+ u16 data_len;
+ __sum16 csum;
+ };
+ struct {
+ struct mptcp_addr_info addr;
+ u64 ahmac;
+ };
+ struct {
+ struct mptcp_ext ext_copy;
+ u64 fail_seq;
+ };
+ struct {
+ u32 nonce;
+ u32 token;
+ u64 thmac;
+ u8 hmac[MPTCPOPT_HMAC_LEN];
+ };
+ };
+#endif
+};
+
+#ifdef CONFIG_MPTCP
+void mptcp_init(void);
+
+static inline bool sk_is_mptcp(const struct sock *sk)
+{
+ return tcp_sk(sk)->is_mptcp;
+}
+
+static inline bool rsk_is_mptcp(const struct request_sock *req)
+{
+ return tcp_rsk(req)->is_mptcp;
+}
+
+static inline bool rsk_drop_req(const struct request_sock *req)
+{
+ return tcp_rsk(req)->is_mptcp && tcp_rsk(req)->drop_req;
+}
+
+void mptcp_space(const struct sock *ssk, int *space, int *full_space);
+bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
+ unsigned int *size, struct mptcp_out_options *opts);
+bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
+ struct mptcp_out_options *opts);
+bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
+ unsigned int *size, unsigned int remaining,
+ struct mptcp_out_options *opts);
+bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
+
+void mptcp_write_options(struct tcphdr *th, __be32 *ptr, struct tcp_sock *tp,
+ struct mptcp_out_options *opts);
+
+void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info);
+
+/* move the skb extension owership, with the assumption that 'to' is
+ * newly allocated
+ */
+static inline void mptcp_skb_ext_move(struct sk_buff *to,
+ struct sk_buff *from)
+{
+ if (!skb_ext_exist(from, SKB_EXT_MPTCP))
+ return;
+
+ if (WARN_ON_ONCE(to->active_extensions))
+ skb_ext_put(to);
+
+ to->active_extensions = from->active_extensions;
+ to->extensions = from->extensions;
+ from->active_extensions = 0;
+}
+
+static inline void mptcp_skb_ext_copy(struct sk_buff *to,
+ struct sk_buff *from)
+{
+ struct mptcp_ext *from_ext;
+
+ from_ext = skb_ext_find(from, SKB_EXT_MPTCP);
+ if (!from_ext)
+ return;
+
+ from_ext->frozen = 1;
+ skb_ext_copy(to, from);
+}
+
+static inline bool mptcp_ext_matches(const struct mptcp_ext *to_ext,
+ const struct mptcp_ext *from_ext)
+{
+ /* MPTCP always clears the ext when adding it to the skb, so
+ * holes do not bother us here
+ */
+ return !from_ext ||
+ (to_ext && from_ext &&
+ !memcmp(from_ext, to_ext, sizeof(struct mptcp_ext)));
+}
+
+/* check if skbs can be collapsed.
+ * MPTCP collapse is allowed if neither @to or @from carry an mptcp data
+ * mapping, or if the extension of @to is the same as @from.
+ * Collapsing is not possible if @to lacks an extension, but @from carries one.
+ */
+static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
+ const struct sk_buff *from)
+{
+ return mptcp_ext_matches(skb_ext_find(to, SKB_EXT_MPTCP),
+ skb_ext_find(from, SKB_EXT_MPTCP));
+}
+
+void mptcp_seq_show(struct seq_file *seq);
+int mptcp_subflow_init_cookie_req(struct request_sock *req,
+ const struct sock *sk_listener,
+ struct sk_buff *skb);
+struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk_listener,
+ bool attach_listener);
+
+__be32 mptcp_get_reset_option(const struct sk_buff *skb);
+
+static inline __be32 mptcp_reset_option(const struct sk_buff *skb)
+{
+ if (skb_ext_exist(skb, SKB_EXT_MPTCP))
+ return mptcp_get_reset_option(skb);
+
+ return htonl(0u);
+}
+#else
+
+static inline void mptcp_init(void)
+{
+}
+
+static inline bool sk_is_mptcp(const struct sock *sk)
+{
+ return false;
+}
+
+static inline bool rsk_is_mptcp(const struct request_sock *req)
+{
+ return false;
+}
+
+static inline bool rsk_drop_req(const struct request_sock *req)
+{
+ return false;
+}
+
+static inline bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
+ unsigned int *size,
+ struct mptcp_out_options *opts)
+{
+ return false;
+}
+
+static inline bool mptcp_synack_options(const struct request_sock *req,
+ unsigned int *size,
+ struct mptcp_out_options *opts)
+{
+ return false;
+}
+
+static inline bool mptcp_established_options(struct sock *sk,
+ struct sk_buff *skb,
+ unsigned int *size,
+ unsigned int remaining,
+ struct mptcp_out_options *opts)
+{
+ return false;
+}
+
+static inline bool mptcp_incoming_options(struct sock *sk,
+ struct sk_buff *skb)
+{
+ return true;
+}
+
+static inline void mptcp_skb_ext_move(struct sk_buff *to,
+ const struct sk_buff *from)
+{
+}
+
+static inline void mptcp_skb_ext_copy(struct sk_buff *to,
+ struct sk_buff *from)
+{
+}
+
+static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
+ const struct sk_buff *from)
+{
+ return true;
+}
+
+static inline void mptcp_space(const struct sock *ssk, int *s, int *fs) { }
+static inline void mptcp_seq_show(struct seq_file *seq) { }
+
+static inline int mptcp_subflow_init_cookie_req(struct request_sock *req,
+ const struct sock *sk_listener,
+ struct sk_buff *skb)
+{
+ return 0; /* TCP fallback */
+}
+
+static inline struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk_listener,
+ bool attach_listener)
+{
+ return NULL;
+}
+
+static inline __be32 mptcp_reset_option(const struct sk_buff *skb) { return htonl(0u); }
+#endif /* CONFIG_MPTCP */
+
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+int mptcpv6_init(void);
+void mptcpv6_handle_mapped(struct sock *sk, bool mapped);
+#elif IS_ENABLED(CONFIG_IPV6)
+static inline int mptcpv6_init(void) { return 0; }
+static inline void mptcpv6_handle_mapped(struct sock *sk, bool mapped) { }
+#endif
+
+#if defined(CONFIG_MPTCP) && defined(CONFIG_BPF_SYSCALL)
+struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk);
+#else
+static inline struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk) { return NULL; }
+#endif
+
+#if !IS_ENABLED(CONFIG_MPTCP)
+struct mptcp_sock { };
+#endif
+
+#endif /* __NET_MPTCP_H */
diff --git a/include/net/mrp.h b/include/net/mrp.h
new file mode 100644
index 000000000..b28915ffe
--- /dev/null
+++ b/include/net/mrp.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_MRP_H
+#define _NET_MRP_H
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#define MRP_END_MARK 0x0
+
+struct mrp_pdu_hdr {
+ u8 version;
+};
+
+struct mrp_msg_hdr {
+ u8 attrtype;
+ u8 attrlen;
+};
+
+struct mrp_vecattr_hdr {
+ __be16 lenflags;
+ unsigned char firstattrvalue[];
+#define MRP_VECATTR_HDR_LEN_MASK cpu_to_be16(0x1FFF)
+#define MRP_VECATTR_HDR_FLAG_LA cpu_to_be16(0x2000)
+};
+
+enum mrp_vecattr_event {
+ MRP_VECATTR_EVENT_NEW,
+ MRP_VECATTR_EVENT_JOIN_IN,
+ MRP_VECATTR_EVENT_IN,
+ MRP_VECATTR_EVENT_JOIN_MT,
+ MRP_VECATTR_EVENT_MT,
+ MRP_VECATTR_EVENT_LV,
+ __MRP_VECATTR_EVENT_MAX
+};
+
+struct mrp_skb_cb {
+ struct mrp_msg_hdr *mh;
+ struct mrp_vecattr_hdr *vah;
+ unsigned char attrvalue[];
+};
+
+static inline struct mrp_skb_cb *mrp_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct mrp_skb_cb) >
+ sizeof_field(struct sk_buff, cb));
+ return (struct mrp_skb_cb *)skb->cb;
+}
+
+enum mrp_applicant_state {
+ MRP_APPLICANT_INVALID,
+ MRP_APPLICANT_VO,
+ MRP_APPLICANT_VP,
+ MRP_APPLICANT_VN,
+ MRP_APPLICANT_AN,
+ MRP_APPLICANT_AA,
+ MRP_APPLICANT_QA,
+ MRP_APPLICANT_LA,
+ MRP_APPLICANT_AO,
+ MRP_APPLICANT_QO,
+ MRP_APPLICANT_AP,
+ MRP_APPLICANT_QP,
+ __MRP_APPLICANT_MAX
+};
+#define MRP_APPLICANT_MAX (__MRP_APPLICANT_MAX - 1)
+
+enum mrp_event {
+ MRP_EVENT_NEW,
+ MRP_EVENT_JOIN,
+ MRP_EVENT_LV,
+ MRP_EVENT_TX,
+ MRP_EVENT_R_NEW,
+ MRP_EVENT_R_JOIN_IN,
+ MRP_EVENT_R_IN,
+ MRP_EVENT_R_JOIN_MT,
+ MRP_EVENT_R_MT,
+ MRP_EVENT_R_LV,
+ MRP_EVENT_R_LA,
+ MRP_EVENT_REDECLARE,
+ MRP_EVENT_PERIODIC,
+ __MRP_EVENT_MAX
+};
+#define MRP_EVENT_MAX (__MRP_EVENT_MAX - 1)
+
+enum mrp_tx_action {
+ MRP_TX_ACTION_NONE,
+ MRP_TX_ACTION_S_NEW,
+ MRP_TX_ACTION_S_JOIN_IN,
+ MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
+ MRP_TX_ACTION_S_IN_OPTIONAL,
+ MRP_TX_ACTION_S_LV,
+};
+
+struct mrp_attr {
+ struct rb_node node;
+ enum mrp_applicant_state state;
+ u8 type;
+ u8 len;
+ unsigned char value[];
+};
+
+enum mrp_applications {
+ MRP_APPLICATION_MVRP,
+ __MRP_APPLICATION_MAX
+};
+#define MRP_APPLICATION_MAX (__MRP_APPLICATION_MAX - 1)
+
+struct mrp_application {
+ enum mrp_applications type;
+ unsigned int maxattr;
+ struct packet_type pkttype;
+ unsigned char group_address[ETH_ALEN];
+ u8 version;
+};
+
+struct mrp_applicant {
+ struct mrp_application *app;
+ struct net_device *dev;
+ struct timer_list join_timer;
+ struct timer_list periodic_timer;
+
+ spinlock_t lock;
+ struct sk_buff_head queue;
+ struct sk_buff *pdu;
+ struct rb_root mad;
+ struct rcu_head rcu;
+ bool active;
+};
+
+struct mrp_port {
+ struct mrp_applicant __rcu *applicants[MRP_APPLICATION_MAX + 1];
+ struct rcu_head rcu;
+};
+
+int mrp_register_application(struct mrp_application *app);
+void mrp_unregister_application(struct mrp_application *app);
+
+int mrp_init_applicant(struct net_device *dev, struct mrp_application *app);
+void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *app);
+
+int mrp_request_join(const struct net_device *dev,
+ const struct mrp_application *app,
+ const void *value, u8 len, u8 type);
+void mrp_request_leave(const struct net_device *dev,
+ const struct mrp_application *app,
+ const void *value, u8 len, u8 type);
+
+#endif /* _NET_MRP_H */
diff --git a/include/net/ncsi.h b/include/net/ncsi.h
new file mode 100644
index 000000000..08a50d9ac
--- /dev/null
+++ b/include/net/ncsi.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_NCSI_H
+#define __NET_NCSI_H
+
+#include <linux/types.h>
+
+/*
+ * The NCSI device states seen from external. More NCSI device states are
+ * only visible internally (in net/ncsi/internal.h). When the NCSI device
+ * is registered, it's in ncsi_dev_state_registered state. The state
+ * ncsi_dev_state_start is used to drive to choose active package and
+ * channel. After that, its state is changed to ncsi_dev_state_functional.
+ *
+ * The state ncsi_dev_state_stop helps to shut down the currently active
+ * package and channel while ncsi_dev_state_config helps to reconfigure
+ * them.
+ */
+enum {
+ ncsi_dev_state_registered = 0x0000,
+ ncsi_dev_state_functional = 0x0100,
+ ncsi_dev_state_probe = 0x0200,
+ ncsi_dev_state_config = 0x0300,
+ ncsi_dev_state_suspend = 0x0400,
+};
+
+struct ncsi_dev {
+ int state;
+ int link_up;
+ struct net_device *dev;
+ void (*handler)(struct ncsi_dev *ndev);
+};
+
+#ifdef CONFIG_NET_NCSI
+int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
+int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
+struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
+ void (*notifier)(struct ncsi_dev *nd));
+int ncsi_start_dev(struct ncsi_dev *nd);
+void ncsi_stop_dev(struct ncsi_dev *nd);
+void ncsi_unregister_dev(struct ncsi_dev *nd);
+#else /* !CONFIG_NET_NCSI */
+static inline int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ return -EINVAL;
+}
+
+static inline int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ return -EINVAL;
+}
+
+static inline struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
+ void (*notifier)(struct ncsi_dev *nd))
+{
+ return NULL;
+}
+
+static inline int ncsi_start_dev(struct ncsi_dev *nd)
+{
+ return -ENOTTY;
+}
+
+static void ncsi_stop_dev(struct ncsi_dev *nd)
+{
+}
+
+static inline void ncsi_unregister_dev(struct ncsi_dev *nd)
+{
+}
+#endif /* CONFIG_NET_NCSI */
+
+#endif /* __NET_NCSI_H */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
new file mode 100644
index 000000000..325a6fb65
--- /dev/null
+++ b/include/net/ndisc.h
@@ -0,0 +1,498 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NDISC_H
+#define _NDISC_H
+
+#include <net/ipv6_stubs.h>
+
+/*
+ * ICMP codes for neighbour discovery messages
+ */
+
+#define NDISC_ROUTER_SOLICITATION 133
+#define NDISC_ROUTER_ADVERTISEMENT 134
+#define NDISC_NEIGHBOUR_SOLICITATION 135
+#define NDISC_NEIGHBOUR_ADVERTISEMENT 136
+#define NDISC_REDIRECT 137
+
+/*
+ * Router type: cross-layer information from link-layer to
+ * IPv6 layer reported by certain link types (e.g., RFC4214).
+ */
+#define NDISC_NODETYPE_UNSPEC 0 /* unspecified (default) */
+#define NDISC_NODETYPE_HOST 1 /* host or unauthorized router */
+#define NDISC_NODETYPE_NODEFAULT 2 /* non-default router */
+#define NDISC_NODETYPE_DEFAULT 3 /* default router */
+
+/*
+ * ndisc options
+ */
+
+enum {
+ __ND_OPT_PREFIX_INFO_END = 0,
+ ND_OPT_SOURCE_LL_ADDR = 1, /* RFC2461 */
+ ND_OPT_TARGET_LL_ADDR = 2, /* RFC2461 */
+ ND_OPT_PREFIX_INFO = 3, /* RFC2461 */
+ ND_OPT_REDIRECT_HDR = 4, /* RFC2461 */
+ ND_OPT_MTU = 5, /* RFC2461 */
+ ND_OPT_NONCE = 14, /* RFC7527 */
+ __ND_OPT_ARRAY_MAX,
+ ND_OPT_ROUTE_INFO = 24, /* RFC4191 */
+ ND_OPT_RDNSS = 25, /* RFC5006 */
+ ND_OPT_DNSSL = 31, /* RFC6106 */
+ ND_OPT_6CO = 34, /* RFC6775 */
+ ND_OPT_CAPTIVE_PORTAL = 37, /* RFC7710 */
+ ND_OPT_PREF64 = 38, /* RFC8781 */
+ __ND_OPT_MAX
+};
+
+#define MAX_RTR_SOLICITATION_DELAY HZ
+
+#define ND_REACHABLE_TIME (30*HZ)
+#define ND_RETRANS_TIMER HZ
+
+#include <linux/compiler.h>
+#include <linux/icmpv6.h>
+#include <linux/in6.h>
+#include <linux/types.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/hash.h>
+
+#include <net/neighbour.h>
+
+/* Set to 3 to get tracing... */
+#define ND_DEBUG 1
+
+#define ND_PRINTK(val, level, fmt, ...) \
+do { \
+ if (val <= ND_DEBUG) \
+ net_##level##_ratelimited(fmt, ##__VA_ARGS__); \
+} while (0)
+
+struct ctl_table;
+struct inet6_dev;
+struct net_device;
+struct net_proto_family;
+struct sk_buff;
+struct prefix_info;
+
+extern struct neigh_table nd_tbl;
+
+struct nd_msg {
+ struct icmp6hdr icmph;
+ struct in6_addr target;
+ __u8 opt[];
+};
+
+struct rs_msg {
+ struct icmp6hdr icmph;
+ __u8 opt[];
+};
+
+struct ra_msg {
+ struct icmp6hdr icmph;
+ __be32 reachable_time;
+ __be32 retrans_timer;
+};
+
+struct rd_msg {
+ struct icmp6hdr icmph;
+ struct in6_addr target;
+ struct in6_addr dest;
+ __u8 opt[];
+};
+
+struct nd_opt_hdr {
+ __u8 nd_opt_type;
+ __u8 nd_opt_len;
+} __packed;
+
+/* ND options */
+struct ndisc_options {
+ struct nd_opt_hdr *nd_opt_array[__ND_OPT_ARRAY_MAX];
+#ifdef CONFIG_IPV6_ROUTE_INFO
+ struct nd_opt_hdr *nd_opts_ri;
+ struct nd_opt_hdr *nd_opts_ri_end;
+#endif
+ struct nd_opt_hdr *nd_useropts;
+ struct nd_opt_hdr *nd_useropts_end;
+#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
+ struct nd_opt_hdr *nd_802154_opt_array[ND_OPT_TARGET_LL_ADDR + 1];
+#endif
+};
+
+#define nd_opts_src_lladdr nd_opt_array[ND_OPT_SOURCE_LL_ADDR]
+#define nd_opts_tgt_lladdr nd_opt_array[ND_OPT_TARGET_LL_ADDR]
+#define nd_opts_pi nd_opt_array[ND_OPT_PREFIX_INFO]
+#define nd_opts_pi_end nd_opt_array[__ND_OPT_PREFIX_INFO_END]
+#define nd_opts_rh nd_opt_array[ND_OPT_REDIRECT_HDR]
+#define nd_opts_mtu nd_opt_array[ND_OPT_MTU]
+#define nd_opts_nonce nd_opt_array[ND_OPT_NONCE]
+#define nd_802154_opts_src_lladdr nd_802154_opt_array[ND_OPT_SOURCE_LL_ADDR]
+#define nd_802154_opts_tgt_lladdr nd_802154_opt_array[ND_OPT_TARGET_LL_ADDR]
+
+#define NDISC_OPT_SPACE(len) (((len)+2+7)&~7)
+
+struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
+ u8 *opt, int opt_len,
+ struct ndisc_options *ndopts);
+
+void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data,
+ int data_len, int pad);
+
+#define NDISC_OPS_REDIRECT_DATA_SPACE 2
+
+/*
+ * This structure defines the hooks for IPv6 neighbour discovery.
+ * The following hooks can be defined; unless noted otherwise, they are
+ * optional and can be filled with a null pointer.
+ *
+ * int (*is_useropt)(u8 nd_opt_type):
+ * This function is called when IPv6 decide RA userspace options. if
+ * this function returns 1 then the option given by nd_opt_type will
+ * be handled as userspace option additional to the IPv6 options.
+ *
+ * int (*parse_options)(const struct net_device *dev,
+ * struct nd_opt_hdr *nd_opt,
+ * struct ndisc_options *ndopts):
+ * This function is called while parsing ndisc ops and put each position
+ * as pointer into ndopts. If this function return unequal 0, then this
+ * function took care about the ndisc option, if 0 then the IPv6 ndisc
+ * option parser will take care about that option.
+ *
+ * void (*update)(const struct net_device *dev, struct neighbour *n,
+ * u32 flags, u8 icmp6_type,
+ * const struct ndisc_options *ndopts):
+ * This function is called when IPv6 ndisc updates the neighbour cache
+ * entry. Additional options which can be updated may be previously
+ * parsed by parse_opts callback and accessible over ndopts parameter.
+ *
+ * int (*opt_addr_space)(const struct net_device *dev, u8 icmp6_type,
+ * struct neighbour *neigh, u8 *ha_buf,
+ * u8 **ha):
+ * This function is called when the necessary option space will be
+ * calculated before allocating a skb. The parameters neigh, ha_buf
+ * abd ha are available on NDISC_REDIRECT messages only.
+ *
+ * void (*fill_addr_option)(const struct net_device *dev,
+ * struct sk_buff *skb, u8 icmp6_type,
+ * const u8 *ha):
+ * This function is called when the skb will finally fill the option
+ * fields inside skb. NOTE: this callback should fill the option
+ * fields to the skb which are previously indicated by opt_space
+ * parameter. That means the decision to add such option should
+ * not lost between these two callbacks, e.g. protected by interface
+ * up state.
+ *
+ * void (*prefix_rcv_add_addr)(struct net *net, struct net_device *dev,
+ * const struct prefix_info *pinfo,
+ * struct inet6_dev *in6_dev,
+ * struct in6_addr *addr,
+ * int addr_type, u32 addr_flags,
+ * bool sllao, bool tokenized,
+ * __u32 valid_lft, u32 prefered_lft,
+ * bool dev_addr_generated):
+ * This function is called when a RA messages is received with valid
+ * PIO option fields and an IPv6 address will be added to the interface
+ * for autoconfiguration. The parameter dev_addr_generated reports about
+ * if the address was based on dev->dev_addr or not. This can be used
+ * to add a second address if link-layer operates with two link layer
+ * addresses. E.g. 802.15.4 6LoWPAN.
+ */
+struct ndisc_ops {
+ int (*is_useropt)(u8 nd_opt_type);
+ int (*parse_options)(const struct net_device *dev,
+ struct nd_opt_hdr *nd_opt,
+ struct ndisc_options *ndopts);
+ void (*update)(const struct net_device *dev, struct neighbour *n,
+ u32 flags, u8 icmp6_type,
+ const struct ndisc_options *ndopts);
+ int (*opt_addr_space)(const struct net_device *dev, u8 icmp6_type,
+ struct neighbour *neigh, u8 *ha_buf,
+ u8 **ha);
+ void (*fill_addr_option)(const struct net_device *dev,
+ struct sk_buff *skb, u8 icmp6_type,
+ const u8 *ha);
+ void (*prefix_rcv_add_addr)(struct net *net, struct net_device *dev,
+ const struct prefix_info *pinfo,
+ struct inet6_dev *in6_dev,
+ struct in6_addr *addr,
+ int addr_type, u32 addr_flags,
+ bool sllao, bool tokenized,
+ __u32 valid_lft, u32 prefered_lft,
+ bool dev_addr_generated);
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int ndisc_ops_is_useropt(const struct net_device *dev,
+ u8 nd_opt_type)
+{
+ if (dev->ndisc_ops && dev->ndisc_ops->is_useropt)
+ return dev->ndisc_ops->is_useropt(nd_opt_type);
+ else
+ return 0;
+}
+
+static inline int ndisc_ops_parse_options(const struct net_device *dev,
+ struct nd_opt_hdr *nd_opt,
+ struct ndisc_options *ndopts)
+{
+ if (dev->ndisc_ops && dev->ndisc_ops->parse_options)
+ return dev->ndisc_ops->parse_options(dev, nd_opt, ndopts);
+ else
+ return 0;
+}
+
+static inline void ndisc_ops_update(const struct net_device *dev,
+ struct neighbour *n, u32 flags,
+ u8 icmp6_type,
+ const struct ndisc_options *ndopts)
+{
+ if (dev->ndisc_ops && dev->ndisc_ops->update)
+ dev->ndisc_ops->update(dev, n, flags, icmp6_type, ndopts);
+}
+
+static inline int ndisc_ops_opt_addr_space(const struct net_device *dev,
+ u8 icmp6_type)
+{
+ if (dev->ndisc_ops && dev->ndisc_ops->opt_addr_space &&
+ icmp6_type != NDISC_REDIRECT)
+ return dev->ndisc_ops->opt_addr_space(dev, icmp6_type, NULL,
+ NULL, NULL);
+ else
+ return 0;
+}
+
+static inline int ndisc_ops_redirect_opt_addr_space(const struct net_device *dev,
+ struct neighbour *neigh,
+ u8 *ha_buf, u8 **ha)
+{
+ if (dev->ndisc_ops && dev->ndisc_ops->opt_addr_space)
+ return dev->ndisc_ops->opt_addr_space(dev, NDISC_REDIRECT,
+ neigh, ha_buf, ha);
+ else
+ return 0;
+}
+
+static inline void ndisc_ops_fill_addr_option(const struct net_device *dev,
+ struct sk_buff *skb,
+ u8 icmp6_type)
+{
+ if (dev->ndisc_ops && dev->ndisc_ops->fill_addr_option &&
+ icmp6_type != NDISC_REDIRECT)
+ dev->ndisc_ops->fill_addr_option(dev, skb, icmp6_type, NULL);
+}
+
+static inline void ndisc_ops_fill_redirect_addr_option(const struct net_device *dev,
+ struct sk_buff *skb,
+ const u8 *ha)
+{
+ if (dev->ndisc_ops && dev->ndisc_ops->fill_addr_option)
+ dev->ndisc_ops->fill_addr_option(dev, skb, NDISC_REDIRECT, ha);
+}
+
+static inline void ndisc_ops_prefix_rcv_add_addr(struct net *net,
+ struct net_device *dev,
+ const struct prefix_info *pinfo,
+ struct inet6_dev *in6_dev,
+ struct in6_addr *addr,
+ int addr_type, u32 addr_flags,
+ bool sllao, bool tokenized,
+ __u32 valid_lft,
+ u32 prefered_lft,
+ bool dev_addr_generated)
+{
+ if (dev->ndisc_ops && dev->ndisc_ops->prefix_rcv_add_addr)
+ dev->ndisc_ops->prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
+ addr, addr_type,
+ addr_flags, sllao,
+ tokenized, valid_lft,
+ prefered_lft,
+ dev_addr_generated);
+}
+#endif
+
+/*
+ * Return the padding between the option length and the start of the
+ * link addr. Currently only IP-over-InfiniBand needs this, although
+ * if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may
+ * also need a pad of 2.
+ */
+static inline int ndisc_addr_option_pad(unsigned short type)
+{
+ switch (type) {
+ case ARPHRD_INFINIBAND: return 2;
+ default: return 0;
+ }
+}
+
+static inline int __ndisc_opt_addr_space(unsigned char addr_len, int pad)
+{
+ return NDISC_OPT_SPACE(addr_len + pad);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int ndisc_opt_addr_space(struct net_device *dev, u8 icmp6_type)
+{
+ return __ndisc_opt_addr_space(dev->addr_len,
+ ndisc_addr_option_pad(dev->type)) +
+ ndisc_ops_opt_addr_space(dev, icmp6_type);
+}
+
+static inline int ndisc_redirect_opt_addr_space(struct net_device *dev,
+ struct neighbour *neigh,
+ u8 *ops_data_buf,
+ u8 **ops_data)
+{
+ return __ndisc_opt_addr_space(dev->addr_len,
+ ndisc_addr_option_pad(dev->type)) +
+ ndisc_ops_redirect_opt_addr_space(dev, neigh, ops_data_buf,
+ ops_data);
+}
+#endif
+
+static inline u8 *__ndisc_opt_addr_data(struct nd_opt_hdr *p,
+ unsigned char addr_len, int prepad)
+{
+ u8 *lladdr = (u8 *)(p + 1);
+ int lladdrlen = p->nd_opt_len << 3;
+ if (lladdrlen != __ndisc_opt_addr_space(addr_len, prepad))
+ return NULL;
+ return lladdr + prepad;
+}
+
+static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
+ struct net_device *dev)
+{
+ return __ndisc_opt_addr_data(p, dev->addr_len,
+ ndisc_addr_option_pad(dev->type));
+}
+
+static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, __u32 *hash_rnd)
+{
+ const u32 *p32 = pkey;
+
+ return (((p32[0] ^ hash32_ptr(dev)) * hash_rnd[0]) +
+ (p32[1] * hash_rnd[1]) +
+ (p32[2] * hash_rnd[2]) +
+ (p32[3] * hash_rnd[3]));
+}
+
+static inline struct neighbour *__ipv6_neigh_lookup_noref(struct net_device *dev, const void *pkey)
+{
+ return ___neigh_lookup_noref(&nd_tbl, neigh_key_eq128, ndisc_hashfn, pkey, dev);
+}
+
+static inline
+struct neighbour *__ipv6_neigh_lookup_noref_stub(struct net_device *dev,
+ const void *pkey)
+{
+ return ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
+ ndisc_hashfn, pkey, dev);
+}
+
+static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, const void *pkey)
+{
+ struct neighbour *n;
+
+ rcu_read_lock();
+ n = __ipv6_neigh_lookup_noref(dev, pkey);
+ if (n && !refcount_inc_not_zero(&n->refcnt))
+ n = NULL;
+ rcu_read_unlock();
+
+ return n;
+}
+
+static inline void __ipv6_confirm_neigh(struct net_device *dev,
+ const void *pkey)
+{
+ struct neighbour *n;
+
+ rcu_read_lock();
+ n = __ipv6_neigh_lookup_noref(dev, pkey);
+ neigh_confirm(n);
+ rcu_read_unlock();
+}
+
+static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
+ const void *pkey)
+{
+ struct neighbour *n;
+
+ rcu_read_lock();
+ n = __ipv6_neigh_lookup_noref_stub(dev, pkey);
+ neigh_confirm(n);
+ rcu_read_unlock();
+}
+
+/* uses ipv6_stub and is meant for use outside of IPv6 core */
+static inline struct neighbour *ip_neigh_gw6(struct net_device *dev,
+ const void *addr)
+{
+ struct neighbour *neigh;
+
+ neigh = __ipv6_neigh_lookup_noref_stub(dev, addr);
+ if (unlikely(!neigh))
+ neigh = __neigh_create(ipv6_stub->nd_tbl, addr, dev, false);
+
+ return neigh;
+}
+
+int ndisc_init(void);
+int ndisc_late_init(void);
+
+void ndisc_late_cleanup(void);
+void ndisc_cleanup(void);
+
+int ndisc_rcv(struct sk_buff *skb);
+
+struct sk_buff *ndisc_ns_create(struct net_device *dev, const struct in6_addr *solicit,
+ const struct in6_addr *saddr, u64 nonce);
+void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
+ const struct in6_addr *daddr, const struct in6_addr *saddr,
+ u64 nonce);
+
+void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
+ const struct in6_addr *saddr);
+
+void ndisc_send_rs(struct net_device *dev,
+ const struct in6_addr *saddr, const struct in6_addr *daddr);
+void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr,
+ const struct in6_addr *solicited_addr,
+ bool router, bool solicited, bool override, bool inc_opt);
+
+void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target);
+
+int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev,
+ int dir);
+
+void ndisc_update(const struct net_device *dev, struct neighbour *neigh,
+ const u8 *lladdr, u8 new, u32 flags, u8 icmp6_type,
+ struct ndisc_options *ndopts);
+
+/*
+ * IGMP
+ */
+int igmp6_init(void);
+int igmp6_late_init(void);
+
+void igmp6_cleanup(void);
+void igmp6_late_cleanup(void);
+
+void igmp6_event_query(struct sk_buff *skb);
+
+void igmp6_event_report(struct sk_buff *skb);
+
+
+#ifdef CONFIG_SYSCTL
+int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen);
+#endif
+
+void inet6_ifinfo_notify(int event, struct inet6_dev *idev);
+
+#endif
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
new file mode 100644
index 000000000..ccc4a0f8b
--- /dev/null
+++ b/include/net/neighbour.h
@@ -0,0 +1,605 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_NEIGHBOUR_H
+#define _NET_NEIGHBOUR_H
+
+#include <linux/neighbour.h>
+
+/*
+ * Generic neighbour manipulation
+ *
+ * Authors:
+ * Pedro Roque <roque@di.fc.ul.pt>
+ * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+ *
+ * Changes:
+ *
+ * Harald Welte: <laforge@gnumonks.org>
+ * - Add neighbour cache statistics like rtstat
+ */
+
+#include <linux/atomic.h>
+#include <linux/refcount.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/bitmap.h>
+
+#include <linux/err.h>
+#include <linux/sysctl.h>
+#include <linux/workqueue.h>
+#include <net/rtnetlink.h>
+
+/*
+ * NUD stands for "neighbor unreachability detection"
+ */
+
+#define NUD_IN_TIMER (NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE)
+#define NUD_VALID (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY)
+#define NUD_CONNECTED (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE)
+
+struct neighbour;
+
+enum {
+ NEIGH_VAR_MCAST_PROBES,
+ NEIGH_VAR_UCAST_PROBES,
+ NEIGH_VAR_APP_PROBES,
+ NEIGH_VAR_MCAST_REPROBES,
+ NEIGH_VAR_RETRANS_TIME,
+ NEIGH_VAR_BASE_REACHABLE_TIME,
+ NEIGH_VAR_DELAY_PROBE_TIME,
+ NEIGH_VAR_INTERVAL_PROBE_TIME_MS,
+ NEIGH_VAR_GC_STALETIME,
+ NEIGH_VAR_QUEUE_LEN_BYTES,
+ NEIGH_VAR_PROXY_QLEN,
+ NEIGH_VAR_ANYCAST_DELAY,
+ NEIGH_VAR_PROXY_DELAY,
+ NEIGH_VAR_LOCKTIME,
+#define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1)
+ /* Following are used as a second way to access one of the above */
+ NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */
+ NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */
+ NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */
+ /* Following are used by "default" only */
+ NEIGH_VAR_GC_INTERVAL,
+ NEIGH_VAR_GC_THRESH1,
+ NEIGH_VAR_GC_THRESH2,
+ NEIGH_VAR_GC_THRESH3,
+ NEIGH_VAR_MAX
+};
+
+struct neigh_parms {
+ possible_net_t net;
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+ struct list_head list;
+ int (*neigh_setup)(struct neighbour *);
+ struct neigh_table *tbl;
+
+ void *sysctl_table;
+
+ int dead;
+ refcount_t refcnt;
+ struct rcu_head rcu_head;
+
+ int reachable_time;
+ u32 qlen;
+ int data[NEIGH_VAR_DATA_MAX];
+ DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
+};
+
+static inline void neigh_var_set(struct neigh_parms *p, int index, int val)
+{
+ set_bit(index, p->data_state);
+ p->data[index] = val;
+}
+
+#define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr])
+
+/* In ndo_neigh_setup, NEIGH_VAR_INIT should be used.
+ * In other cases, NEIGH_VAR_SET should be used.
+ */
+#define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val)
+#define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val)
+
+static inline void neigh_parms_data_state_setall(struct neigh_parms *p)
+{
+ bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX);
+}
+
+static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p)
+{
+ bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX);
+}
+
+struct neigh_statistics {
+ unsigned long allocs; /* number of allocated neighs */
+ unsigned long destroys; /* number of destroyed neighs */
+ unsigned long hash_grows; /* number of hash resizes */
+
+ unsigned long res_failed; /* number of failed resolutions */
+
+ unsigned long lookups; /* number of lookups */
+ unsigned long hits; /* number of hits (among lookups) */
+
+ unsigned long rcv_probes_mcast; /* number of received mcast ipv6 */
+ unsigned long rcv_probes_ucast; /* number of received ucast ipv6 */
+
+ unsigned long periodic_gc_runs; /* number of periodic GC runs */
+ unsigned long forced_gc_runs; /* number of forced GC runs */
+
+ unsigned long unres_discards; /* number of unresolved drops */
+ unsigned long table_fulls; /* times even gc couldn't help */
+};
+
+#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
+
+struct neighbour {
+ struct neighbour __rcu *next;
+ struct neigh_table *tbl;
+ struct neigh_parms *parms;
+ unsigned long confirmed;
+ unsigned long updated;
+ rwlock_t lock;
+ refcount_t refcnt;
+ unsigned int arp_queue_len_bytes;
+ struct sk_buff_head arp_queue;
+ struct timer_list timer;
+ unsigned long used;
+ atomic_t probes;
+ u8 nud_state;
+ u8 type;
+ u8 dead;
+ u8 protocol;
+ u32 flags;
+ seqlock_t ha_lock;
+ unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8);
+ struct hh_cache hh;
+ int (*output)(struct neighbour *, struct sk_buff *);
+ const struct neigh_ops *ops;
+ struct list_head gc_list;
+ struct list_head managed_list;
+ struct rcu_head rcu;
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+ u8 primary_key[0];
+} __randomize_layout;
+
+struct neigh_ops {
+ int family;
+ void (*solicit)(struct neighbour *, struct sk_buff *);
+ void (*error_report)(struct neighbour *, struct sk_buff *);
+ int (*output)(struct neighbour *, struct sk_buff *);
+ int (*connected_output)(struct neighbour *, struct sk_buff *);
+};
+
+struct pneigh_entry {
+ struct pneigh_entry *next;
+ possible_net_t net;
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+ u32 flags;
+ u8 protocol;
+ u32 key[];
+};
+
+/*
+ * neighbour table manipulation
+ */
+
+#define NEIGH_NUM_HASH_RND 4
+
+struct neigh_hash_table {
+ struct neighbour __rcu **hash_buckets;
+ unsigned int hash_shift;
+ __u32 hash_rnd[NEIGH_NUM_HASH_RND];
+ struct rcu_head rcu;
+};
+
+
+struct neigh_table {
+ int family;
+ unsigned int entry_size;
+ unsigned int key_len;
+ __be16 protocol;
+ __u32 (*hash)(const void *pkey,
+ const struct net_device *dev,
+ __u32 *hash_rnd);
+ bool (*key_eq)(const struct neighbour *, const void *pkey);
+ int (*constructor)(struct neighbour *);
+ int (*pconstructor)(struct pneigh_entry *);
+ void (*pdestructor)(struct pneigh_entry *);
+ void (*proxy_redo)(struct sk_buff *skb);
+ int (*is_multicast)(const void *pkey);
+ bool (*allow_add)(const struct net_device *dev,
+ struct netlink_ext_ack *extack);
+ char *id;
+ struct neigh_parms parms;
+ struct list_head parms_list;
+ int gc_interval;
+ int gc_thresh1;
+ int gc_thresh2;
+ int gc_thresh3;
+ unsigned long last_flush;
+ struct delayed_work gc_work;
+ struct delayed_work managed_work;
+ struct timer_list proxy_timer;
+ struct sk_buff_head proxy_queue;
+ atomic_t entries;
+ atomic_t gc_entries;
+ struct list_head gc_list;
+ struct list_head managed_list;
+ rwlock_t lock;
+ unsigned long last_rand;
+ struct neigh_statistics __percpu *stats;
+ struct neigh_hash_table __rcu *nht;
+ struct pneigh_entry **phash_buckets;
+};
+
+enum {
+ NEIGH_ARP_TABLE = 0,
+ NEIGH_ND_TABLE = 1,
+ NEIGH_DN_TABLE = 2,
+ NEIGH_NR_TABLES,
+ NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
+};
+
+static inline int neigh_parms_family(struct neigh_parms *p)
+{
+ return p->tbl->family;
+}
+
+#define NEIGH_PRIV_ALIGN sizeof(long long)
+#define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN)
+
+static inline void *neighbour_priv(const struct neighbour *n)
+{
+ return (char *)n + n->tbl->entry_size;
+}
+
+/* flags for neigh_update() */
+#define NEIGH_UPDATE_F_OVERRIDE BIT(0)
+#define NEIGH_UPDATE_F_WEAK_OVERRIDE BIT(1)
+#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER BIT(2)
+#define NEIGH_UPDATE_F_USE BIT(3)
+#define NEIGH_UPDATE_F_MANAGED BIT(4)
+#define NEIGH_UPDATE_F_EXT_LEARNED BIT(5)
+#define NEIGH_UPDATE_F_ISROUTER BIT(6)
+#define NEIGH_UPDATE_F_ADMIN BIT(7)
+
+/* In-kernel representation for NDA_FLAGS_EXT flags: */
+#define NTF_OLD_MASK 0xff
+#define NTF_EXT_SHIFT 8
+#define NTF_EXT_MASK (NTF_EXT_MANAGED)
+
+#define NTF_MANAGED (NTF_EXT_MANAGED << NTF_EXT_SHIFT)
+
+extern const struct nla_policy nda_policy[];
+
+static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
+{
+ return *(const u32 *)n->primary_key == *(const u32 *)pkey;
+}
+
+static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey)
+{
+ const u32 *n32 = (const u32 *)n->primary_key;
+ const u32 *p32 = pkey;
+
+ return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
+ (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0;
+}
+
+static inline struct neighbour *___neigh_lookup_noref(
+ struct neigh_table *tbl,
+ bool (*key_eq)(const struct neighbour *n, const void *pkey),
+ __u32 (*hash)(const void *pkey,
+ const struct net_device *dev,
+ __u32 *hash_rnd),
+ const void *pkey,
+ struct net_device *dev)
+{
+ struct neigh_hash_table *nht = rcu_dereference(tbl->nht);
+ struct neighbour *n;
+ u32 hash_val;
+
+ hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+ for (n = rcu_dereference(nht->hash_buckets[hash_val]);
+ n != NULL;
+ n = rcu_dereference(n->next)) {
+ if (n->dev == dev && key_eq(n, pkey))
+ return n;
+ }
+
+ return NULL;
+}
+
+static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
+ const void *pkey,
+ struct net_device *dev)
+{
+ return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
+}
+
+static inline void neigh_confirm(struct neighbour *n)
+{
+ if (n) {
+ unsigned long now = jiffies;
+
+ /* avoid dirtying neighbour */
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
+ }
+}
+
+void neigh_table_init(int index, struct neigh_table *tbl);
+int neigh_table_clear(int index, struct neigh_table *tbl);
+struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev);
+struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev, bool want_ref);
+static inline struct neighbour *neigh_create(struct neigh_table *tbl,
+ const void *pkey,
+ struct net_device *dev)
+{
+ return __neigh_create(tbl, pkey, dev, true);
+}
+void neigh_destroy(struct neighbour *neigh);
+int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
+ const bool immediate_ok);
+int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
+ u32 nlmsg_pid);
+void __neigh_set_probe_once(struct neighbour *neigh);
+bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl);
+void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
+int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev);
+int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
+struct neighbour *neigh_event_ns(struct neigh_table *tbl,
+ u8 *lladdr, void *saddr,
+ struct net_device *dev);
+
+struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
+ struct neigh_table *tbl);
+void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
+
+static inline
+struct net *neigh_parms_net(const struct neigh_parms *parms)
+{
+ return read_pnet(&parms->net);
+}
+
+unsigned long neigh_rand_reach_time(unsigned long base);
+
+void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
+ struct sk_buff *skb);
+struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net,
+ const void *key, struct net_device *dev,
+ int creat);
+struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net,
+ const void *key, struct net_device *dev);
+int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key,
+ struct net_device *dev);
+
+static inline struct net *pneigh_net(const struct pneigh_entry *pneigh)
+{
+ return read_pnet(&pneigh->net);
+}
+
+void neigh_app_ns(struct neighbour *n);
+void neigh_for_each(struct neigh_table *tbl,
+ void (*cb)(struct neighbour *, void *), void *cookie);
+void __neigh_for_each_release(struct neigh_table *tbl,
+ int (*cb)(struct neighbour *));
+int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
+void pneigh_for_each(struct neigh_table *tbl,
+ void (*cb)(struct pneigh_entry *));
+
+struct neigh_seq_state {
+ struct seq_net_private p;
+ struct neigh_table *tbl;
+ struct neigh_hash_table *nht;
+ void *(*neigh_sub_iter)(struct neigh_seq_state *state,
+ struct neighbour *n, loff_t *pos);
+ unsigned int bucket;
+ unsigned int flags;
+#define NEIGH_SEQ_NEIGH_ONLY 0x00000001
+#define NEIGH_SEQ_IS_PNEIGH 0x00000002
+#define NEIGH_SEQ_SKIP_NOARP 0x00000004
+};
+void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
+ unsigned int);
+void *neigh_seq_next(struct seq_file *, void *, loff_t *);
+void neigh_seq_stop(struct seq_file *, void *);
+
+int neigh_proc_dointvec(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
+ void *buffer,
+ size_t *lenp, loff_t *ppos);
+int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+
+int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
+ proc_handler *proc_handler);
+void neigh_sysctl_unregister(struct neigh_parms *p);
+
+static inline void __neigh_parms_put(struct neigh_parms *parms)
+{
+ refcount_dec(&parms->refcnt);
+}
+
+static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
+{
+ refcount_inc(&parms->refcnt);
+ return parms;
+}
+
+/*
+ * Neighbour references
+ */
+
+static inline void neigh_release(struct neighbour *neigh)
+{
+ if (refcount_dec_and_test(&neigh->refcnt))
+ neigh_destroy(neigh);
+}
+
+static inline struct neighbour * neigh_clone(struct neighbour *neigh)
+{
+ if (neigh)
+ refcount_inc(&neigh->refcnt);
+ return neigh;
+}
+
+#define neigh_hold(n) refcount_inc(&(n)->refcnt)
+
+static __always_inline int neigh_event_send_probe(struct neighbour *neigh,
+ struct sk_buff *skb,
+ const bool immediate_ok)
+{
+ unsigned long now = jiffies;
+
+ if (READ_ONCE(neigh->used) != now)
+ WRITE_ONCE(neigh->used, now);
+ if (!(READ_ONCE(neigh->nud_state) & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)))
+ return __neigh_event_send(neigh, skb, immediate_ok);
+ return 0;
+}
+
+static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+{
+ return neigh_event_send_probe(neigh, skb, true);
+}
+
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
+{
+ unsigned int seq, hh_alen;
+
+ do {
+ seq = read_seqbegin(&hh->hh_lock);
+ hh_alen = HH_DATA_ALIGN(ETH_HLEN);
+ memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN);
+ } while (read_seqretry(&hh->hh_lock, seq));
+ return 0;
+}
+#endif
+
+static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
+{
+ unsigned int hh_alen = 0;
+ unsigned int seq;
+ unsigned int hh_len;
+
+ do {
+ seq = read_seqbegin(&hh->hh_lock);
+ hh_len = READ_ONCE(hh->hh_len);
+ if (likely(hh_len <= HH_DATA_MOD)) {
+ hh_alen = HH_DATA_MOD;
+
+ /* skb_push() would proceed silently if we have room for
+ * the unaligned size but not for the aligned size:
+ * check headroom explicitly.
+ */
+ if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
+ /* this is inlined by gcc */
+ memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
+ HH_DATA_MOD);
+ }
+ } else {
+ hh_alen = HH_DATA_ALIGN(hh_len);
+
+ if (likely(skb_headroom(skb) >= hh_alen)) {
+ memcpy(skb->data - hh_alen, hh->hh_data,
+ hh_alen);
+ }
+ }
+ } while (read_seqretry(&hh->hh_lock, seq));
+
+ if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ __skb_push(skb, hh_len);
+ return dev_queue_xmit(skb);
+}
+
+static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
+ bool skip_cache)
+{
+ const struct hh_cache *hh = &n->hh;
+
+ /* n->nud_state and hh->hh_len could be changed under us.
+ * neigh_hh_output() is taking care of the race later.
+ */
+ if (!skip_cache &&
+ (READ_ONCE(n->nud_state) & NUD_CONNECTED) &&
+ READ_ONCE(hh->hh_len))
+ return neigh_hh_output(hh, skb);
+
+ return READ_ONCE(n->output)(n, skb);
+}
+
+static inline struct neighbour *
+__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
+{
+ struct neighbour *n = neigh_lookup(tbl, pkey, dev);
+
+ if (n || !creat)
+ return n;
+
+ n = neigh_create(tbl, pkey, dev);
+ return IS_ERR(n) ? NULL : n;
+}
+
+static inline struct neighbour *
+__neigh_lookup_errno(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev)
+{
+ struct neighbour *n = neigh_lookup(tbl, pkey, dev);
+
+ if (n)
+ return n;
+
+ return neigh_create(tbl, pkey, dev);
+}
+
+struct neighbour_cb {
+ unsigned long sched_next;
+ unsigned int flags;
+};
+
+#define LOCALLY_ENQUEUED 0x1
+
+#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
+
+static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
+ const struct net_device *dev)
+{
+ unsigned int seq;
+
+ do {
+ seq = read_seqbegin(&n->ha_lock);
+ memcpy(dst, n->ha, dev->addr_len);
+ } while (read_seqretry(&n->ha_lock, seq));
+}
+
+static inline void neigh_update_is_router(struct neighbour *neigh, u32 flags,
+ int *notify)
+{
+ u8 ndm_flags = 0;
+
+ ndm_flags |= (flags & NEIGH_UPDATE_F_ISROUTER) ? NTF_ROUTER : 0;
+ if ((neigh->flags ^ ndm_flags) & NTF_ROUTER) {
+ if (ndm_flags & NTF_ROUTER)
+ neigh->flags |= NTF_ROUTER;
+ else
+ neigh->flags &= ~NTF_ROUTER;
+ *notify = 1;
+ }
+}
+#endif
diff --git a/include/net/net_debug.h b/include/net/net_debug.h
new file mode 100644
index 000000000..1e74684cb
--- /dev/null
+++ b/include/net/net_debug.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_NET_DEBUG_H
+#define _LINUX_NET_DEBUG_H
+
+#include <linux/bug.h>
+#include <linux/kern_levels.h>
+
+struct net_device;
+
+__printf(3, 4) __cold
+void netdev_printk(const char *level, const struct net_device *dev,
+ const char *format, ...);
+__printf(2, 3) __cold
+void netdev_emerg(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_alert(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_crit(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_err(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_warn(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_notice(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_info(const struct net_device *dev, const char *format, ...);
+
+#define netdev_level_once(level, dev, fmt, ...) \
+do { \
+ static bool __section(".data.once") __print_once; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#define netdev_emerg_once(dev, fmt, ...) \
+ netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
+#define netdev_alert_once(dev, fmt, ...) \
+ netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
+#define netdev_crit_once(dev, fmt, ...) \
+ netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
+#define netdev_err_once(dev, fmt, ...) \
+ netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
+#define netdev_warn_once(dev, fmt, ...) \
+ netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
+#define netdev_notice_once(dev, fmt, ...) \
+ netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
+#define netdev_info_once(dev, fmt, ...) \
+ netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
+
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
+#define netdev_dbg(__dev, format, args...) \
+do { \
+ dynamic_netdev_dbg(__dev, format, ##args); \
+} while (0)
+#elif defined(DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args)
+#else
+#define netdev_dbg(__dev, format, args...) \
+({ \
+ if (0) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args); \
+})
+#endif
+
+#if defined(VERBOSE_DEBUG)
+#define netdev_vdbg netdev_dbg
+#else
+
+#define netdev_vdbg(dev, format, args...) \
+({ \
+ if (0) \
+ netdev_printk(KERN_DEBUG, dev, format, ##args); \
+ 0; \
+})
+#endif
+
+/* netif printk helpers, similar to netdev_printk */
+
+#define netif_printk(priv, type, level, dev, fmt, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ netdev_printk(level, (dev), fmt, ##args); \
+} while (0)
+
+#define netif_level(level, priv, type, dev, fmt, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ netdev_##level(dev, fmt, ##args); \
+} while (0)
+
+#define netif_emerg(priv, type, dev, fmt, args...) \
+ netif_level(emerg, priv, type, dev, fmt, ##args)
+#define netif_alert(priv, type, dev, fmt, args...) \
+ netif_level(alert, priv, type, dev, fmt, ##args)
+#define netif_crit(priv, type, dev, fmt, args...) \
+ netif_level(crit, priv, type, dev, fmt, ##args)
+#define netif_err(priv, type, dev, fmt, args...) \
+ netif_level(err, priv, type, dev, fmt, ##args)
+#define netif_warn(priv, type, dev, fmt, args...) \
+ netif_level(warn, priv, type, dev, fmt, ##args)
+#define netif_notice(priv, type, dev, fmt, args...) \
+ netif_level(notice, priv, type, dev, fmt, ##args)
+#define netif_info(priv, type, dev, fmt, args...) \
+ netif_level(info, priv, type, dev, fmt, ##args)
+
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
+#define netif_dbg(priv, type, netdev, format, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ dynamic_netdev_dbg(netdev, format, ##args); \
+} while (0)
+#elif defined(DEBUG)
+#define netif_dbg(priv, type, dev, format, args...) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
+#else
+#define netif_dbg(priv, type, dev, format, args...) \
+({ \
+ if (0) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
+ 0; \
+})
+#endif
+
+/* if @cond then downgrade to debug, else print at @level */
+#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
+ do { \
+ if (cond) \
+ netif_dbg(priv, type, netdev, fmt, ##args); \
+ else \
+ netif_ ## level(priv, type, netdev, fmt, ##args); \
+ } while (0)
+
+#if defined(VERBOSE_DEBUG)
+#define netif_vdbg netif_dbg
+#else
+#define netif_vdbg(priv, type, dev, format, args...) \
+({ \
+ if (0) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
+ 0; \
+})
+#endif
+
+
+#if defined(CONFIG_DEBUG_NET)
+#define DEBUG_NET_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
+#else
+#define DEBUG_NET_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
+#endif
+
+#endif /* _LINUX_NET_DEBUG_H */
diff --git a/include/net/net_failover.h b/include/net/net_failover.h
new file mode 100644
index 000000000..b12a1c469
--- /dev/null
+++ b/include/net/net_failover.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _NET_FAILOVER_H
+#define _NET_FAILOVER_H
+
+#include <net/failover.h>
+
+/* failover state */
+struct net_failover_info {
+ /* primary netdev with same MAC */
+ struct net_device __rcu *primary_dev;
+
+ /* standby netdev */
+ struct net_device __rcu *standby_dev;
+
+ /* primary netdev stats */
+ struct rtnl_link_stats64 primary_stats;
+
+ /* standby netdev stats */
+ struct rtnl_link_stats64 standby_stats;
+
+ /* aggregated stats */
+ struct rtnl_link_stats64 failover_stats;
+
+ /* spinlock while updating stats */
+ spinlock_t stats_lock;
+};
+
+struct failover *net_failover_create(struct net_device *standby_dev);
+void net_failover_destroy(struct failover *failover);
+
+#define FAILOVER_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_HIGHDMA | NETIF_F_LRO)
+
+#define FAILOVER_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
+
+#endif /* _NET_FAILOVER_H */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
new file mode 100644
index 000000000..8c3587d5c
--- /dev/null
+++ b/include/net/net_namespace.h
@@ -0,0 +1,530 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Operations on the network namespace
+ */
+#ifndef __NET_NET_NAMESPACE_H
+#define __NET_NET_NAMESPACE_H
+
+#include <linux/atomic.h>
+#include <linux/refcount.h>
+#include <linux/workqueue.h>
+#include <linux/list.h>
+#include <linux/sysctl.h>
+#include <linux/uidgid.h>
+
+#include <net/flow.h>
+#include <net/netns/core.h>
+#include <net/netns/mib.h>
+#include <net/netns/unix.h>
+#include <net/netns/packet.h>
+#include <net/netns/ipv4.h>
+#include <net/netns/ipv6.h>
+#include <net/netns/nexthop.h>
+#include <net/netns/ieee802154_6lowpan.h>
+#include <net/netns/sctp.h>
+#include <net/netns/netfilter.h>
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#include <net/netns/conntrack.h>
+#endif
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+#include <net/netns/flow_table.h>
+#endif
+#include <net/netns/nftables.h>
+#include <net/netns/xfrm.h>
+#include <net/netns/mpls.h>
+#include <net/netns/can.h>
+#include <net/netns/xdp.h>
+#include <net/netns/smc.h>
+#include <net/netns/bpf.h>
+#include <net/netns/mctp.h>
+#include <net/net_trackers.h>
+#include <linux/ns_common.h>
+#include <linux/idr.h>
+#include <linux/skbuff.h>
+#include <linux/notifier.h>
+
+struct user_namespace;
+struct proc_dir_entry;
+struct net_device;
+struct sock;
+struct ctl_table_header;
+struct net_generic;
+struct uevent_sock;
+struct netns_ipvs;
+struct bpf_prog;
+
+
+#define NETDEV_HASHBITS 8
+#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
+
+struct net {
+ /* First cache line can be often dirtied.
+ * Do not place here read-mostly fields.
+ */
+ refcount_t passive; /* To decide when the network
+ * namespace should be freed.
+ */
+ spinlock_t rules_mod_lock;
+
+ atomic_t dev_unreg_count;
+
+ unsigned int dev_base_seq; /* protected by rtnl_mutex */
+ int ifindex;
+
+ spinlock_t nsid_lock;
+ atomic_t fnhe_genid;
+
+ struct list_head list; /* list of network namespaces */
+ struct list_head exit_list; /* To linked to call pernet exit
+ * methods on dead net (
+ * pernet_ops_rwsem read locked),
+ * or to unregister pernet ops
+ * (pernet_ops_rwsem write locked).
+ */
+ struct llist_node cleanup_list; /* namespaces on death row */
+
+#ifdef CONFIG_KEYS
+ struct key_tag *key_domain; /* Key domain of operation tag */
+#endif
+ struct user_namespace *user_ns; /* Owning user namespace */
+ struct ucounts *ucounts;
+ struct idr netns_ids;
+
+ struct ns_common ns;
+ struct ref_tracker_dir refcnt_tracker;
+
+ struct list_head dev_base_head;
+ struct proc_dir_entry *proc_net;
+ struct proc_dir_entry *proc_net_stat;
+
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_set sysctls;
+#endif
+
+ struct sock *rtnl; /* rtnetlink socket */
+ struct sock *genl_sock;
+
+ struct uevent_sock *uevent_sock; /* uevent socket */
+
+ struct hlist_head *dev_name_head;
+ struct hlist_head *dev_index_head;
+ struct raw_notifier_head netdev_chain;
+
+ /* Note that @hash_mix can be read millions times per second,
+ * it is critical that it is on a read_mostly cache line.
+ */
+ u32 hash_mix;
+
+ struct net_device *loopback_dev; /* The loopback */
+
+ /* core fib_rules */
+ struct list_head rules_ops;
+
+ struct netns_core core;
+ struct netns_mib mib;
+ struct netns_packet packet;
+#if IS_ENABLED(CONFIG_UNIX)
+ struct netns_unix unx;
+#endif
+ struct netns_nexthop nexthop;
+ struct netns_ipv4 ipv4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct netns_ipv6 ipv6;
+#endif
+#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
+ struct netns_ieee802154_lowpan ieee802154_lowpan;
+#endif
+#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
+ struct netns_sctp sctp;
+#endif
+#ifdef CONFIG_NETFILTER
+ struct netns_nf nf;
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ struct netns_ct ct;
+#endif
+#if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
+ struct netns_nftables nft;
+#endif
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ struct netns_ft ft;
+#endif
+#endif
+#ifdef CONFIG_WEXT_CORE
+ struct sk_buff_head wext_nlevents;
+#endif
+ struct net_generic __rcu *gen;
+
+ /* Used to store attached BPF programs */
+ struct netns_bpf bpf;
+
+ /* Note : following structs are cache line aligned */
+#ifdef CONFIG_XFRM
+ struct netns_xfrm xfrm;
+#endif
+
+ u64 net_cookie; /* written once */
+
+#if IS_ENABLED(CONFIG_IP_VS)
+ struct netns_ipvs *ipvs;
+#endif
+#if IS_ENABLED(CONFIG_MPLS)
+ struct netns_mpls mpls;
+#endif
+#if IS_ENABLED(CONFIG_CAN)
+ struct netns_can can;
+#endif
+#ifdef CONFIG_XDP_SOCKETS
+ struct netns_xdp xdp;
+#endif
+#if IS_ENABLED(CONFIG_MCTP)
+ struct netns_mctp mctp;
+#endif
+#if IS_ENABLED(CONFIG_CRYPTO_USER)
+ struct sock *crypto_nlsk;
+#endif
+ struct sock *diag_nlsk;
+#if IS_ENABLED(CONFIG_SMC)
+ struct netns_smc smc;
+#endif
+} __randomize_layout;
+
+#include <linux/seq_file_net.h>
+
+/* Init's network namespace */
+extern struct net init_net;
+
+#ifdef CONFIG_NET_NS
+struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
+ struct net *old_net);
+
+void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
+
+void net_ns_barrier(void);
+
+struct ns_common *get_net_ns(struct ns_common *ns);
+struct net *get_net_ns_by_fd(int fd);
+#else /* CONFIG_NET_NS */
+#include <linux/sched.h>
+#include <linux/nsproxy.h>
+static inline struct net *copy_net_ns(unsigned long flags,
+ struct user_namespace *user_ns, struct net *old_net)
+{
+ if (flags & CLONE_NEWNET)
+ return ERR_PTR(-EINVAL);
+ return old_net;
+}
+
+static inline void net_ns_get_ownership(const struct net *net,
+ kuid_t *uid, kgid_t *gid)
+{
+ *uid = GLOBAL_ROOT_UID;
+ *gid = GLOBAL_ROOT_GID;
+}
+
+static inline void net_ns_barrier(void) {}
+
+static inline struct ns_common *get_net_ns(struct ns_common *ns)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct net *get_net_ns_by_fd(int fd)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_NET_NS */
+
+
+extern struct list_head net_namespace_list;
+
+struct net *get_net_ns_by_pid(pid_t pid);
+
+#ifdef CONFIG_SYSCTL
+void ipx_register_sysctl(void);
+void ipx_unregister_sysctl(void);
+#else
+#define ipx_register_sysctl()
+#define ipx_unregister_sysctl()
+#endif
+
+#ifdef CONFIG_NET_NS
+void __put_net(struct net *net);
+
+/* Try using get_net_track() instead */
+static inline struct net *get_net(struct net *net)
+{
+ refcount_inc(&net->ns.count);
+ return net;
+}
+
+static inline struct net *maybe_get_net(struct net *net)
+{
+ /* Used when we know struct net exists but we
+ * aren't guaranteed a previous reference count
+ * exists. If the reference count is zero this
+ * function fails and returns NULL.
+ */
+ if (!refcount_inc_not_zero(&net->ns.count))
+ net = NULL;
+ return net;
+}
+
+/* Try using put_net_track() instead */
+static inline void put_net(struct net *net)
+{
+ if (refcount_dec_and_test(&net->ns.count))
+ __put_net(net);
+}
+
+static inline
+int net_eq(const struct net *net1, const struct net *net2)
+{
+ return net1 == net2;
+}
+
+static inline int check_net(const struct net *net)
+{
+ return refcount_read(&net->ns.count) != 0;
+}
+
+void net_drop_ns(void *);
+
+#else
+
+static inline struct net *get_net(struct net *net)
+{
+ return net;
+}
+
+static inline void put_net(struct net *net)
+{
+}
+
+static inline struct net *maybe_get_net(struct net *net)
+{
+ return net;
+}
+
+static inline
+int net_eq(const struct net *net1, const struct net *net2)
+{
+ return 1;
+}
+
+static inline int check_net(const struct net *net)
+{
+ return 1;
+}
+
+#define net_drop_ns NULL
+#endif
+
+
+static inline void netns_tracker_alloc(struct net *net,
+ netns_tracker *tracker, gfp_t gfp)
+{
+#ifdef CONFIG_NET_NS_REFCNT_TRACKER
+ ref_tracker_alloc(&net->refcnt_tracker, tracker, gfp);
+#endif
+}
+
+static inline void netns_tracker_free(struct net *net,
+ netns_tracker *tracker)
+{
+#ifdef CONFIG_NET_NS_REFCNT_TRACKER
+ ref_tracker_free(&net->refcnt_tracker, tracker);
+#endif
+}
+
+static inline struct net *get_net_track(struct net *net,
+ netns_tracker *tracker, gfp_t gfp)
+{
+ get_net(net);
+ netns_tracker_alloc(net, tracker, gfp);
+ return net;
+}
+
+static inline void put_net_track(struct net *net, netns_tracker *tracker)
+{
+ netns_tracker_free(net, tracker);
+ put_net(net);
+}
+
+typedef struct {
+#ifdef CONFIG_NET_NS
+ struct net *net;
+#endif
+} possible_net_t;
+
+static inline void write_pnet(possible_net_t *pnet, struct net *net)
+{
+#ifdef CONFIG_NET_NS
+ pnet->net = net;
+#endif
+}
+
+static inline struct net *read_pnet(const possible_net_t *pnet)
+{
+#ifdef CONFIG_NET_NS
+ return pnet->net;
+#else
+ return &init_net;
+#endif
+}
+
+/* Protected by net_rwsem */
+#define for_each_net(VAR) \
+ list_for_each_entry(VAR, &net_namespace_list, list)
+#define for_each_net_continue_reverse(VAR) \
+ list_for_each_entry_continue_reverse(VAR, &net_namespace_list, list)
+#define for_each_net_rcu(VAR) \
+ list_for_each_entry_rcu(VAR, &net_namespace_list, list)
+
+#ifdef CONFIG_NET_NS
+#define __net_init
+#define __net_exit
+#define __net_initdata
+#define __net_initconst
+#else
+#define __net_init __init
+#define __net_exit __ref
+#define __net_initdata __initdata
+#define __net_initconst __initconst
+#endif
+
+int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
+int peernet2id(const struct net *net, struct net *peer);
+bool peernet_has_id(const struct net *net, struct net *peer);
+struct net *get_net_ns_by_id(const struct net *net, int id);
+
+struct pernet_operations {
+ struct list_head list;
+ /*
+ * Below methods are called without any exclusive locks.
+ * More than one net may be constructed and destructed
+ * in parallel on several cpus. Every pernet_operations
+ * have to keep in mind all other pernet_operations and
+ * to introduce a locking, if they share common resources.
+ *
+ * The only time they are called with exclusive lock is
+ * from register_pernet_subsys(), unregister_pernet_subsys()
+ * register_pernet_device() and unregister_pernet_device().
+ *
+ * Exit methods using blocking RCU primitives, such as
+ * synchronize_rcu(), should be implemented via exit_batch.
+ * Then, destruction of a group of net requires single
+ * synchronize_rcu() related to these pernet_operations,
+ * instead of separate synchronize_rcu() for every net.
+ * Please, avoid synchronize_rcu() at all, where it's possible.
+ *
+ * Note that a combination of pre_exit() and exit() can
+ * be used, since a synchronize_rcu() is guaranteed between
+ * the calls.
+ */
+ int (*init)(struct net *net);
+ void (*pre_exit)(struct net *net);
+ void (*exit)(struct net *net);
+ void (*exit_batch)(struct list_head *net_exit_list);
+ unsigned int *id;
+ size_t size;
+};
+
+/*
+ * Use these carefully. If you implement a network device and it
+ * needs per network namespace operations use device pernet operations,
+ * otherwise use pernet subsys operations.
+ *
+ * Network interfaces need to be removed from a dying netns _before_
+ * subsys notifiers can be called, as most of the network code cleanup
+ * (which is done from subsys notifiers) runs with the assumption that
+ * dev_remove_pack has been called so no new packets will arrive during
+ * and after the cleanup functions have been called. dev_remove_pack
+ * is not per namespace so instead the guarantee of no more packets
+ * arriving in a network namespace is provided by ensuring that all
+ * network devices and all sockets have left the network namespace
+ * before the cleanup methods are called.
+ *
+ * For the longest time the ipv4 icmp code was registered as a pernet
+ * device which caused kernel oops, and panics during network
+ * namespace cleanup. So please don't get this wrong.
+ */
+int register_pernet_subsys(struct pernet_operations *);
+void unregister_pernet_subsys(struct pernet_operations *);
+int register_pernet_device(struct pernet_operations *);
+void unregister_pernet_device(struct pernet_operations *);
+
+struct ctl_table;
+
+#ifdef CONFIG_SYSCTL
+int net_sysctl_init(void);
+struct ctl_table_header *register_net_sysctl(struct net *net, const char *path,
+ struct ctl_table *table);
+void unregister_net_sysctl_table(struct ctl_table_header *header);
+#else
+static inline int net_sysctl_init(void) { return 0; }
+static inline struct ctl_table_header *register_net_sysctl(struct net *net,
+ const char *path, struct ctl_table *table)
+{
+ return NULL;
+}
+static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
+{
+}
+#endif
+
+static inline int rt_genid_ipv4(const struct net *net)
+{
+ return atomic_read(&net->ipv4.rt_genid);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int rt_genid_ipv6(const struct net *net)
+{
+ return atomic_read(&net->ipv6.fib6_sernum);
+}
+#endif
+
+static inline void rt_genid_bump_ipv4(struct net *net)
+{
+ atomic_inc(&net->ipv4.rt_genid);
+}
+
+extern void (*__fib6_flush_trees)(struct net *net);
+static inline void rt_genid_bump_ipv6(struct net *net)
+{
+ if (__fib6_flush_trees)
+ __fib6_flush_trees(net);
+}
+
+#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
+static inline struct netns_ieee802154_lowpan *
+net_ieee802154_lowpan(struct net *net)
+{
+ return &net->ieee802154_lowpan;
+}
+#endif
+
+/* For callers who don't really care about whether it's IPv4 or IPv6 */
+static inline void rt_genid_bump_all(struct net *net)
+{
+ rt_genid_bump_ipv4(net);
+ rt_genid_bump_ipv6(net);
+}
+
+static inline int fnhe_genid(const struct net *net)
+{
+ return atomic_read(&net->fnhe_genid);
+}
+
+static inline void fnhe_genid_bump(struct net *net)
+{
+ atomic_inc(&net->fnhe_genid);
+}
+
+#ifdef CONFIG_NET
+void net_ns_init(void);
+#else
+static inline void net_ns_init(void) {}
+#endif
+
+#endif /* __NET_NET_NAMESPACE_H */
diff --git a/include/net/net_ratelimit.h b/include/net/net_ratelimit.h
new file mode 100644
index 000000000..93c1bd513
--- /dev/null
+++ b/include/net/net_ratelimit.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_NET_RATELIMIT_H
+#define _LINUX_NET_RATELIMIT_H
+
+#include <linux/ratelimit.h>
+
+extern struct ratelimit_state net_ratelimit_state;
+
+#endif /* _LINUX_NET_RATELIMIT_H */
diff --git a/include/net/net_trackers.h b/include/net/net_trackers.h
new file mode 100644
index 000000000..d94c76cf1
--- /dev/null
+++ b/include/net/net_trackers.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_NET_TRACKERS_H
+#define __NET_NET_TRACKERS_H
+#include <linux/ref_tracker.h>
+
+#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
+typedef struct ref_tracker *netdevice_tracker;
+#else
+typedef struct {} netdevice_tracker;
+#endif
+
+#ifdef CONFIG_NET_NS_REFCNT_TRACKER
+typedef struct ref_tracker *netns_tracker;
+#else
+typedef struct {} netns_tracker;
+#endif
+
+#endif /* __NET_NET_TRACKERS_H */
diff --git a/include/net/netevent.h b/include/net/netevent.h
new file mode 100644
index 000000000..1be3757a8
--- /dev/null
+++ b/include/net/netevent.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_EVENT_H
+#define _NET_EVENT_H
+
+/*
+ * Generic netevent notifiers
+ *
+ * Authors:
+ * Tom Tucker <tom@opengridcomputing.com>
+ * Steve Wise <swise@opengridcomputing.com>
+ *
+ * Changes:
+ */
+
+struct dst_entry;
+struct neighbour;
+struct notifier_block ;
+
+struct netevent_redirect {
+ struct dst_entry *old;
+ struct dst_entry *new;
+ struct neighbour *neigh;
+ const void *daddr;
+};
+
+enum netevent_notif_type {
+ NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */
+ NETEVENT_REDIRECT, /* arg is struct netevent_redirect ptr */
+ NETEVENT_DELAY_PROBE_TIME_UPDATE, /* arg is struct neigh_parms ptr */
+ NETEVENT_IPV4_MPATH_HASH_UPDATE, /* arg is struct net ptr */
+ NETEVENT_IPV6_MPATH_HASH_UPDATE, /* arg is struct net ptr */
+ NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE, /* arg is struct net ptr */
+};
+
+int register_netevent_notifier(struct notifier_block *nb);
+int unregister_netevent_notifier(struct notifier_block *nb);
+int call_netevent_notifiers(unsigned long val, void *v);
+
+#endif
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
new file mode 100644
index 000000000..371696ec1
--- /dev/null
+++ b/include/net/netfilter/br_netfilter.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BR_NETFILTER_H_
+#define _BR_NETFILTER_H_
+
+#include <linux/netfilter.h>
+
+#include "../../../net/bridge/br_private.h"
+
+static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ struct nf_bridge_info *b = skb_ext_add(skb, SKB_EXT_BRIDGE_NF);
+
+ if (b)
+ memset(b, 0, sizeof(*b));
+
+ return b;
+#else
+ return NULL;
+#endif
+}
+
+void nf_bridge_update_protocol(struct sk_buff *skb);
+
+int br_nf_hook_thresh(unsigned int hook, struct net *net, struct sock *sk,
+ struct sk_buff *skb, struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct net *, struct sock *,
+ struct sk_buff *));
+
+unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb);
+
+static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
+{
+ unsigned int len = nf_bridge_encap_header_len(skb);
+
+ skb_push(skb, len);
+ skb->network_header -= len;
+}
+
+int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb);
+
+static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ struct net_bridge_port *port;
+
+ port = br_port_get_rcu(dev);
+ return port ? &port->br->fake_rtable : NULL;
+#else
+ return NULL;
+#endif
+}
+
+struct net_device *setup_pre_routing(struct sk_buff *skb,
+ const struct net *net);
+
+#if IS_ENABLED(CONFIG_IPV6)
+int br_validate_ipv6(struct net *net, struct sk_buff *skb);
+unsigned int br_nf_pre_routing_ipv6(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state);
+#else
+static inline int br_validate_ipv6(struct net *net, struct sk_buff *skb)
+{
+ return -1;
+}
+
+static inline unsigned int
+br_nf_pre_routing_ipv6(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ return NF_ACCEPT;
+}
+#endif
+
+#endif /* _BR_NETFILTER_H_ */
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
new file mode 100644
index 000000000..2c8c2b023
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IPv4 support for nf_conntrack.
+ *
+ * 23 Mar 2004: Yasuyuki Kozakai @ USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - move L3 protocol dependent part from include/linux/netfilter_ipv4/
+ * ip_conntarck.h
+ */
+
+#ifndef _NF_CONNTRACK_IPV4_H
+#define _NF_CONNTRACK_IPV4_H
+
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre;
+#endif
+
+#endif /*_NF_CONNTRACK_IPV4_H*/
diff --git a/include/net/netfilter/ipv4/nf_defrag_ipv4.h b/include/net/netfilter/ipv4/nf_defrag_ipv4.h
new file mode 100644
index 000000000..7fda9ce9f
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_defrag_ipv4.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_DEFRAG_IPV4_H
+#define _NF_DEFRAG_IPV4_H
+
+struct net;
+int nf_defrag_ipv4_enable(struct net *net);
+void nf_defrag_ipv4_disable(struct net *net);
+
+#endif /* _NF_DEFRAG_IPV4_H */
diff --git a/include/net/netfilter/ipv4/nf_dup_ipv4.h b/include/net/netfilter/ipv4/nf_dup_ipv4.h
new file mode 100644
index 000000000..a2bc16cdb
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_dup_ipv4.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_DUP_IPV4_H_
+#define _NF_DUP_IPV4_H_
+
+#include <linux/skbuff.h>
+#include <uapi/linux/in.h>
+
+void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
+ const struct in_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV4_H_ */
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
new file mode 100644
index 000000000..c653fcb88
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _IPV4_NF_REJECT_H
+#define _IPV4_NF_REJECT_H
+
+#include <linux/skbuff.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/netfilter/nf_reject.h>
+
+void nf_send_unreach(struct sk_buff *skb_in, int code, int hook);
+void nf_send_reset(struct net *net, struct sock *, struct sk_buff *oldskb,
+ int hook);
+const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
+ struct tcphdr *_oth, int hook);
+struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
+ const struct sk_buff *oldskb,
+ __u8 protocol, int ttl);
+void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
+ const struct tcphdr *oth);
+
+struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code);
+struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook);
+
+
+#endif /* _IPV4_NF_REJECT_H */
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
new file mode 100644
index 000000000..e95483192
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_CONNTRACK_IPV6_H
+#define _NF_CONNTRACK_IPV6_H
+
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
+
+#endif /* _NF_CONNTRACK_IPV6_H*/
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
new file mode 100644
index 000000000..ceadf8ba2
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_DEFRAG_IPV6_H
+#define _NF_DEFRAG_IPV6_H
+
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+int nf_defrag_ipv6_enable(struct net *net);
+void nf_defrag_ipv6_disable(struct net *net);
+
+int nf_ct_frag6_init(void);
+void nf_ct_frag6_cleanup(void);
+int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user);
+
+struct inet_frags_ctl;
+
+struct nft_ct_frag6_pernet {
+ struct ctl_table_header *nf_frag_frags_hdr;
+ struct fqdir *fqdir;
+};
+
+#endif /* _NF_DEFRAG_IPV6_H */
diff --git a/include/net/netfilter/ipv6/nf_dup_ipv6.h b/include/net/netfilter/ipv6/nf_dup_ipv6.h
new file mode 100644
index 000000000..f6312bb04
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_dup_ipv6.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_DUP_IPV6_H_
+#define _NF_DUP_IPV6_H_
+
+#include <linux/skbuff.h>
+
+void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
+ const struct in6_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV6_H_ */
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
new file mode 100644
index 000000000..d729344ba
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _IPV6_NF_REJECT_H
+#define _IPV6_NF_REJECT_H
+
+#include <linux/icmpv6.h>
+#include <net/netfilter/nf_reject.h>
+
+void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
+ unsigned int hooknum);
+void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ int hook);
+const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
+ struct tcphdr *otcph,
+ unsigned int *otcplen, int hook);
+struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
+ const struct sk_buff *oldskb,
+ __u8 protocol, int hoplimit);
+void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
+ const struct sk_buff *oldskb,
+ const struct tcphdr *oth, unsigned int otcplen);
+
+struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook);
+struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code);
+
+#endif /* _IPV6_NF_REJECT_H */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
new file mode 100644
index 000000000..6a2019aaa
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Connection state tracking for netfilter. This is separated from,
+ * but required by, the (future) NAT layer; it can also be used by an iptables
+ * extension.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - generalize L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack.h
+ */
+
+#ifndef _NF_CONNTRACK_H
+#define _NF_CONNTRACK_H
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <linux/netfilter/nf_conntrack_dccp.h>
+#include <linux/netfilter/nf_conntrack_sctp.h>
+#include <linux/netfilter/nf_conntrack_proto_gre.h>
+
+#include <net/netfilter/nf_conntrack_tuple.h>
+
+struct nf_ct_udp {
+ unsigned long stream_ts;
+};
+
+/* per conntrack: protocol private data */
+union nf_conntrack_proto {
+ /* insert conntrack proto private data here */
+ struct nf_ct_dccp dccp;
+ struct ip_ct_sctp sctp;
+ struct ip_ct_tcp tcp;
+ struct nf_ct_udp udp;
+ struct nf_ct_gre gre;
+ unsigned int tmpl_padto;
+};
+
+union nf_conntrack_expect_proto {
+ /* insert expect proto private data here */
+};
+
+struct nf_conntrack_net_ecache {
+ struct delayed_work dwork;
+ spinlock_t dying_lock;
+ struct hlist_nulls_head dying_list;
+};
+
+struct nf_conntrack_net {
+ /* only used when new connection is allocated: */
+ atomic_t count;
+ unsigned int expect_count;
+
+ /* only used from work queues, configuration plane, and so on: */
+ unsigned int users4;
+ unsigned int users6;
+ unsigned int users_bridge;
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *sysctl_header;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ struct nf_conntrack_net_ecache ecache;
+#endif
+};
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+
+struct nf_conn {
+ /* Usage count in here is 1 for hash table, 1 per skb,
+ * plus 1 for any connection(s) we are `master' for
+ *
+ * Hint, SKB address this struct and refcnt via skb->_nfct and
+ * helpers nf_conntrack_get() and nf_conntrack_put().
+ * Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
+ * except that the latter uses internal indirection and does not
+ * result in a conntrack module dependency.
+ * beware nf_ct_get() is different and don't inc refcnt.
+ */
+ struct nf_conntrack ct_general;
+
+ spinlock_t lock;
+ /* jiffies32 when this ct is considered dead */
+ u32 timeout;
+
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ struct nf_conntrack_zone zone;
+#endif
+ /* XXX should I move this to the tail ? - Y.K */
+ /* These are my tuples; original and reply */
+ struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
+
+ /* Have we seen traffic both ways yet? (bitset) */
+ unsigned long status;
+
+ possible_net_t ct_net;
+
+#if IS_ENABLED(CONFIG_NF_NAT)
+ struct hlist_node nat_bysource;
+#endif
+ /* all members below initialized via memset */
+ struct { } __nfct_init_offset;
+
+ /* If we were expected by an expectation, this will be it */
+ struct nf_conn *master;
+
+#if defined(CONFIG_NF_CONNTRACK_MARK)
+ u_int32_t mark;
+#endif
+
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+ u_int32_t secmark;
+#endif
+
+ /* Extensions */
+ struct nf_ct_ext *ext;
+
+ /* Storage reserved for other modules, must be the last member */
+ union nf_conntrack_proto proto;
+};
+
+static inline struct nf_conn *
+nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
+{
+ return container_of(hash, struct nf_conn,
+ tuplehash[hash->tuple.dst.dir]);
+}
+
+static inline u_int16_t nf_ct_l3num(const struct nf_conn *ct)
+{
+ return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+}
+
+static inline u_int8_t nf_ct_protonum(const struct nf_conn *ct)
+{
+ return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
+}
+
+#define nf_ct_tuple(ct, dir) (&(ct)->tuplehash[dir].tuple)
+
+/* get master conntrack via master expectation */
+#define master_ct(conntr) (conntr->master)
+
+extern struct net init_net;
+
+static inline struct net *nf_ct_net(const struct nf_conn *ct)
+{
+ return read_pnet(&ct->ct_net);
+}
+
+/* Alter reply tuple (maybe alter helper). */
+void nf_conntrack_alter_reply(struct nf_conn *ct,
+ const struct nf_conntrack_tuple *newreply);
+
+/* Is this tuple taken? (ignoring any belonging to the given
+ conntrack). */
+int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_conntrack);
+
+/* Return conntrack_info and tuple hash for given skb. */
+static inline struct nf_conn *
+nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
+{
+ unsigned long nfct = skb_get_nfct(skb);
+
+ *ctinfo = nfct & NFCT_INFOMASK;
+ return (struct nf_conn *)(nfct & NFCT_PTRMASK);
+}
+
+void nf_ct_destroy(struct nf_conntrack *nfct);
+
+/* decrement reference count on a conntrack */
+static inline void nf_ct_put(struct nf_conn *ct)
+{
+ if (ct && refcount_dec_and_test(&ct->ct_general.use))
+ nf_ct_destroy(&ct->ct_general);
+}
+
+/* Protocol module loading */
+int nf_ct_l3proto_try_module_get(unsigned short l3proto);
+void nf_ct_l3proto_module_put(unsigned short l3proto);
+
+/* load module; enable/disable conntrack in this namespace */
+int nf_ct_netns_get(struct net *net, u8 nfproto);
+void nf_ct_netns_put(struct net *net, u8 nfproto);
+
+/*
+ * Allocate a hashtable of hlist_head (if nulls == 0),
+ * or hlist_nulls_head (if nulls == 1)
+ */
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
+
+int nf_conntrack_hash_check_insert(struct nf_conn *ct);
+bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
+
+bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
+ u_int16_t l3num, struct net *net,
+ struct nf_conntrack_tuple *tuple);
+
+void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb,
+ u32 extra_jiffies, bool do_acct);
+
+/* Refresh conntrack for this many jiffies and do accounting */
+static inline void nf_ct_refresh_acct(struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb,
+ u32 extra_jiffies)
+{
+ __nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, true);
+}
+
+/* Refresh conntrack for this many jiffies */
+static inline void nf_ct_refresh(struct nf_conn *ct,
+ const struct sk_buff *skb,
+ u32 extra_jiffies)
+{
+ __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, false);
+}
+
+/* kill conntrack and do accounting */
+bool nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb);
+
+/* kill conntrack without accounting */
+static inline bool nf_ct_kill(struct nf_conn *ct)
+{
+ return nf_ct_delete(ct, 0, 0);
+}
+
+struct nf_ct_iter_data {
+ struct net *net;
+ void *data;
+ u32 portid;
+ int report;
+};
+
+/* Iterate over all conntracks: if iter returns true, it's deleted. */
+void nf_ct_iterate_cleanup_net(int (*iter)(struct nf_conn *i, void *data),
+ const struct nf_ct_iter_data *iter_data);
+
+/* also set unconfirmed conntracks as dying. Only use in module exit path. */
+void nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data),
+ void *data);
+
+struct nf_conntrack_zone;
+
+void nf_conntrack_free(struct nf_conn *ct);
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *orig,
+ const struct nf_conntrack_tuple *repl,
+ gfp_t gfp);
+
+static inline int nf_ct_is_template(const struct nf_conn *ct)
+{
+ return test_bit(IPS_TEMPLATE_BIT, &ct->status);
+}
+
+/* It's confirmed if it is, or has been in the hash table. */
+static inline int nf_ct_is_confirmed(const struct nf_conn *ct)
+{
+ return test_bit(IPS_CONFIRMED_BIT, &ct->status);
+}
+
+static inline int nf_ct_is_dying(const struct nf_conn *ct)
+{
+ return test_bit(IPS_DYING_BIT, &ct->status);
+}
+
+/* Packet is received from loopback */
+static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
+{
+ return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
+}
+
+#define nfct_time_stamp ((u32)(jiffies))
+
+/* jiffies until ct expires, 0 if already expired */
+static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
+{
+ s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
+
+ return max(timeout, 0);
+}
+
+static inline bool nf_ct_is_expired(const struct nf_conn *ct)
+{
+ return (__s32)(READ_ONCE(ct->timeout) - nfct_time_stamp) <= 0;
+}
+
+/* use after obtaining a reference count */
+static inline bool nf_ct_should_gc(const struct nf_conn *ct)
+{
+ return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
+ !nf_ct_is_dying(ct);
+}
+
+#define NF_CT_DAY (86400 * HZ)
+
+/* Set an arbitrary timeout large enough not to ever expire, this save
+ * us a check for the IPS_OFFLOAD_BIT from the packet path via
+ * nf_ct_is_expired().
+ */
+static inline void nf_ct_offload_timeout(struct nf_conn *ct)
+{
+ if (nf_ct_expires(ct) < NF_CT_DAY / 2)
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
+}
+
+struct kernel_param;
+
+int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp);
+int nf_conntrack_hash_resize(unsigned int hashsize);
+
+extern struct hlist_nulls_head *nf_conntrack_hash;
+extern unsigned int nf_conntrack_htable_size;
+extern seqcount_spinlock_t nf_conntrack_generation;
+extern unsigned int nf_conntrack_max;
+
+/* must be called with rcu read lock held */
+static inline void
+nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize)
+{
+ struct hlist_nulls_head *hptr;
+ unsigned int sequence, hsz;
+
+ do {
+ sequence = read_seqcount_begin(&nf_conntrack_generation);
+ hsz = nf_conntrack_htable_size;
+ hptr = nf_conntrack_hash;
+ } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
+
+ *hash = hptr;
+ *hsize = hsz;
+}
+
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ gfp_t flags);
+void nf_ct_tmpl_free(struct nf_conn *tmpl);
+
+u32 nf_ct_get_id(const struct nf_conn *ct);
+u32 nf_conntrack_count(const struct net *net);
+
+static inline void
+nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
+{
+ skb_set_nfct(skb, (unsigned long)ct | info);
+}
+
+extern unsigned int nf_conntrack_net_id;
+
+static inline struct nf_conntrack_net *nf_ct_pernet(const struct net *net)
+{
+ return net_generic(net, nf_conntrack_net_id);
+}
+
+#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
+#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
+#define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
+
+#define MODULE_ALIAS_NFCT_HELPER(helper) \
+ MODULE_ALIAS("nfct-helper-" helper)
+
+#endif /* _NF_CONNTRACK_H */
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
new file mode 100644
index 000000000..4b2b7f891
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (C) 2008 Krzysztof Piotr Oledzki <ole@ans.pl>
+ */
+
+#ifndef _NF_CONNTRACK_ACCT_H
+#define _NF_CONNTRACK_ACCT_H
+#include <net/net_namespace.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+struct nf_conn_counter {
+ atomic64_t packets;
+ atomic64_t bytes;
+};
+
+struct nf_conn_acct {
+ struct nf_conn_counter counter[IP_CT_DIR_MAX];
+};
+
+static inline
+struct nf_conn_acct *nf_conn_acct_find(const struct nf_conn *ct)
+{
+ return nf_ct_ext_find(ct, NF_CT_EXT_ACCT);
+}
+
+static inline
+struct nf_conn_acct *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ struct net *net = nf_ct_net(ct);
+ struct nf_conn_acct *acct;
+
+ if (!net->ct.sysctl_acct)
+ return NULL;
+
+ acct = nf_ct_ext_add(ct, NF_CT_EXT_ACCT, gfp);
+ if (!acct)
+ pr_debug("failed to add accounting extension area");
+
+
+ return acct;
+#else
+ return NULL;
+#endif
+}
+
+/* Check if connection tracking accounting is enabled */
+static inline bool nf_ct_acct_enabled(struct net *net)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ return net->ct.sysctl_acct != 0;
+#else
+ return false;
+#endif
+}
+
+/* Enable/disable connection tracking accounting */
+static inline void nf_ct_set_acct(struct net *net, bool enable)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ net->ct.sysctl_acct = enable;
+#endif
+}
+
+void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets,
+ unsigned int bytes);
+
+static inline void nf_ct_acct_update(struct nf_conn *ct, u32 dir,
+ unsigned int bytes)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ nf_ct_acct_add(ct, dir, 1, bytes);
+#endif
+}
+
+void nf_conntrack_acct_pernet_init(struct net *net);
+
+void nf_conntrack_acct_fini(void);
+
+#endif /* _NF_CONNTRACK_ACCT_H */
diff --git a/include/net/netfilter/nf_conntrack_act_ct.h b/include/net/netfilter/nf_conntrack_act_ct.h
new file mode 100644
index 000000000..e5f2f0b73
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_act_ct.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _NF_CONNTRACK_ACT_CT_H
+#define _NF_CONNTRACK_ACT_CT_H
+
+#include <net/netfilter/nf_conntrack.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+struct nf_conn_act_ct_ext {
+ int ifindex[IP_CT_DIR_MAX];
+};
+
+static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ return nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
+#else
+ return NULL;
+#endif
+}
+
+static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ struct nf_conn_act_ct_ext *act_ct_ext;
+
+ act_ct_ext = nf_conn_act_ct_ext_find(ct);
+ if (dev_net(skb->dev) == &init_net && act_ct_ext)
+ act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
+#endif
+}
+
+static inline struct
+nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
+
+ if (act_ct)
+ return act_ct;
+
+ act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC);
+ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
+ return act_ct;
+#else
+ return NULL;
+#endif
+}
+
+#endif /* _NF_CONNTRACK_ACT_CT_H */
diff --git a/include/net/netfilter/nf_conntrack_bpf.h b/include/net/netfilter/nf_conntrack_bpf.h
new file mode 100644
index 000000000..2d0da478c
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_bpf.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _NF_CONNTRACK_BPF_H
+#define _NF_CONNTRACK_BPF_H
+
+#include <linux/kconfig.h>
+#include <net/netfilter/nf_conntrack.h>
+
+struct nf_conn___init {
+ struct nf_conn ct;
+};
+
+#if (IS_BUILTIN(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
+ (IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
+
+extern int register_nf_conntrack_bpf(void);
+extern void cleanup_nf_conntrack_bpf(void);
+
+#else
+
+static inline int register_nf_conntrack_bpf(void)
+{
+ return 0;
+}
+
+static inline void cleanup_nf_conntrack_bpf(void)
+{
+}
+
+#endif
+
+#if (IS_BUILTIN(CONFIG_NF_NAT) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
+ (IS_MODULE(CONFIG_NF_NAT) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
+
+extern int register_nf_nat_bpf(void);
+
+#else
+
+static inline int register_nf_nat_bpf(void)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _NF_CONNTRACK_BPF_H */
diff --git a/include/net/netfilter/nf_conntrack_bridge.h b/include/net/netfilter/nf_conntrack_bridge.h
new file mode 100644
index 000000000..c564281ed
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_bridge.h
@@ -0,0 +1,19 @@
+#ifndef NF_CONNTRACK_BRIDGE_
+#define NF_CONNTRACK_BRIDGE_
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <uapi/linux/if_ether.h>
+
+struct nf_hook_ops;
+
+struct nf_ct_bridge_info {
+ struct nf_hook_ops *ops;
+ unsigned int ops_size;
+ struct module *me;
+};
+
+void nf_ct_bridge_register(struct nf_ct_bridge_info *info);
+void nf_ct_bridge_unregister(struct nf_ct_bridge_info *info);
+
+#endif
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
new file mode 100644
index 000000000..a36f87af4
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header is used to share core functionality between the
+ * standalone connection tracking module, and the compatibility layer's use
+ * of connection tracking.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - generalize L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack_core.h
+ */
+
+#ifndef _NF_CONNTRACK_CORE_H
+#define _NF_CONNTRACK_CORE_H
+
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+
+/* This header is used to share core functionality between the
+ standalone connection tracking module, and the compatibility layer's use
+ of connection tracking. */
+
+unsigned int nf_conntrack_in(struct sk_buff *skb,
+ const struct nf_hook_state *state);
+
+int nf_conntrack_init_net(struct net *net);
+void nf_conntrack_cleanup_net(struct net *net);
+void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
+
+void nf_conntrack_proto_pernet_init(struct net *net);
+
+int nf_conntrack_proto_init(void);
+void nf_conntrack_proto_fini(void);
+
+int nf_conntrack_init_start(void);
+void nf_conntrack_cleanup_start(void);
+
+void nf_conntrack_init_end(void);
+void nf_conntrack_cleanup_end(void);
+
+bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
+ const struct nf_conntrack_tuple *orig);
+
+/* Find a connection corresponding to a tuple. */
+struct nf_conntrack_tuple_hash *
+nf_conntrack_find_get(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *tuple);
+
+int __nf_conntrack_confirm(struct sk_buff *skb);
+
+/* Confirm a connection: returns NF_DROP if packet must be dropped. */
+static inline int nf_conntrack_confirm(struct sk_buff *skb)
+{
+ struct nf_conn *ct = (struct nf_conn *)skb_nfct(skb);
+ int ret = NF_ACCEPT;
+
+ if (ct) {
+ if (!nf_ct_is_confirmed(ct)) {
+ ret = __nf_conntrack_confirm(skb);
+
+ if (ret == NF_ACCEPT)
+ ct = (struct nf_conn *)skb_nfct(skb);
+ }
+
+ if (ret == NF_ACCEPT && nf_ct_ecache_exist(ct))
+ nf_ct_deliver_cached_events(ct);
+ }
+ return ret;
+}
+
+unsigned int nf_confirm(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo);
+
+void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_l4proto *proto);
+
+#define CONNTRACK_LOCKS 1024
+
+extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
+void nf_conntrack_lock(spinlock_t *lock);
+
+extern spinlock_t nf_conntrack_expect_lock;
+
+/* ctnetlink code shared by both ctnetlink and nf_conntrack_bpf */
+
+static inline void __nf_ct_set_timeout(struct nf_conn *ct, u64 timeout)
+{
+ if (timeout > INT_MAX)
+ timeout = INT_MAX;
+
+ if (nf_ct_is_confirmed(ct))
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
+ else
+ ct->timeout = (u32)timeout;
+}
+
+int __nf_ct_change_timeout(struct nf_conn *ct, u64 cta_timeout);
+void __nf_ct_change_status(struct nf_conn *ct, unsigned long on, unsigned long off);
+int nf_ct_change_status_common(struct nf_conn *ct, unsigned int status);
+
+#endif /* _NF_CONNTRACK_CORE_H */
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
new file mode 100644
index 000000000..e227d997f
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -0,0 +1,40 @@
+#ifndef _NF_CONNTRACK_COUNT_H
+#define _NF_CONNTRACK_COUNT_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+
+struct nf_conncount_data;
+
+struct nf_conncount_list {
+ spinlock_t list_lock;
+ u32 last_gc; /* jiffies at most recent gc */
+ struct list_head head; /* connections with the same filtering key */
+ unsigned int count; /* length of list */
+};
+
+struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
+ unsigned int keylen);
+void nf_conncount_destroy(struct net *net, unsigned int family,
+ struct nf_conncount_data *data);
+
+unsigned int nf_conncount_count(struct net *net,
+ struct nf_conncount_data *data,
+ const u32 *key,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone);
+
+int nf_conncount_add(struct net *net, struct nf_conncount_list *list,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone);
+
+void nf_conncount_list_init(struct nf_conncount_list *list);
+
+bool nf_conncount_gc_list(struct net *net,
+ struct nf_conncount_list *list);
+
+void nf_conncount_cache_free(struct nf_conncount_list *list);
+
+#endif
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
new file mode 100644
index 000000000..0c1dac318
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * connection tracking event cache.
+ */
+
+#ifndef _NF_CONNTRACK_ECACHE_H
+#define _NF_CONNTRACK_ECACHE_H
+#include <net/netfilter/nf_conntrack.h>
+
+#include <net/net_namespace.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+enum nf_ct_ecache_state {
+ NFCT_ECACHE_DESTROY_FAIL, /* tried but failed to send destroy event */
+ NFCT_ECACHE_DESTROY_SENT, /* sent destroy event after failure */
+};
+
+struct nf_conntrack_ecache {
+ unsigned long cache; /* bitops want long */
+ u16 ctmask; /* bitmask of ct events to be delivered */
+ u16 expmask; /* bitmask of expect events to be delivered */
+ u32 missed; /* missed events */
+ u32 portid; /* netlink portid of destroyer */
+};
+
+static inline struct nf_conntrack_ecache *
+nf_ct_ecache_find(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ return nf_ct_ext_find(ct, NF_CT_EXT_ECACHE);
+#else
+ return NULL;
+#endif
+}
+
+static inline bool nf_ct_ecache_exist(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ return nf_ct_ext_exist(ct, NF_CT_EXT_ECACHE);
+#else
+ return false;
+#endif
+}
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+
+/* This structure is passed to event handler */
+struct nf_ct_event {
+ struct nf_conn *ct;
+ u32 portid;
+ int report;
+};
+
+struct nf_exp_event {
+ struct nf_conntrack_expect *exp;
+ u32 portid;
+ int report;
+};
+
+struct nf_ct_event_notifier {
+ int (*ct_event)(unsigned int events, const struct nf_ct_event *item);
+ int (*exp_event)(unsigned int events, const struct nf_exp_event *item);
+};
+
+void nf_conntrack_register_notifier(struct net *net,
+ const struct nf_ct_event_notifier *nb);
+void nf_conntrack_unregister_notifier(struct net *net);
+
+void nf_ct_deliver_cached_events(struct nf_conn *ct);
+int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
+ u32 portid, int report);
+
+bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp);
+#else
+
+static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct)
+{
+}
+
+static inline int nf_conntrack_eventmask_report(unsigned int eventmask,
+ struct nf_conn *ct,
+ u32 portid,
+ int report)
+{
+ return 0;
+}
+
+static inline bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
+{
+ return false;
+}
+#endif
+
+static inline void
+nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ struct net *net = nf_ct_net(ct);
+ struct nf_conntrack_ecache *e;
+
+ if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
+ return;
+
+ e = nf_ct_ecache_find(ct);
+ if (e == NULL)
+ return;
+
+ set_bit(event, &e->cache);
+#endif
+}
+
+static inline int
+nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
+ u32 portid, int report)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ if (nf_ct_ecache_exist(ct))
+ return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
+#endif
+ return 0;
+}
+
+static inline int
+nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ if (nf_ct_ecache_exist(ct))
+ return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
+ struct nf_conntrack_expect *exp,
+ u32 portid, int report);
+
+void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state);
+
+void nf_conntrack_ecache_pernet_init(struct net *net);
+void nf_conntrack_ecache_pernet_fini(struct net *net);
+
+struct nf_conntrack_net_ecache *nf_conn_pernet_ecache(const struct net *net);
+
+static inline bool nf_conntrack_ecache_dwork_pending(const struct net *net)
+{
+ return net->ct.ecache_dwork_pending;
+}
+#else /* CONFIG_NF_CONNTRACK_EVENTS */
+
+static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
+ struct nf_conntrack_expect *exp,
+ u32 portid,
+ int report)
+{
+}
+
+static inline void nf_conntrack_ecache_work(struct net *net,
+ enum nf_ct_ecache_state s)
+{
+}
+
+static inline void nf_conntrack_ecache_pernet_init(struct net *net)
+{
+}
+
+static inline void nf_conntrack_ecache_pernet_fini(struct net *net)
+{
+}
+static inline bool nf_conntrack_ecache_dwork_pending(const struct net *net) { return false; }
+#endif /* CONFIG_NF_CONNTRACK_EVENTS */
+#endif /*_NF_CONNTRACK_ECACHE_H*/
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
new file mode 100644
index 000000000..0855b60fb
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * connection tracking expectations.
+ */
+
+#ifndef _NF_CONNTRACK_EXPECT_H
+#define _NF_CONNTRACK_EXPECT_H
+
+#include <linux/refcount.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+
+extern unsigned int nf_ct_expect_hsize;
+extern unsigned int nf_ct_expect_max;
+extern struct hlist_head *nf_ct_expect_hash;
+
+struct nf_conntrack_expect {
+ /* Conntrack expectation list member */
+ struct hlist_node lnode;
+
+ /* Hash member */
+ struct hlist_node hnode;
+
+ /* We expect this tuple, with the following mask */
+ struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_tuple_mask mask;
+
+ /* Function to call after setup and insertion */
+ void (*expectfn)(struct nf_conn *new,
+ struct nf_conntrack_expect *this);
+
+ /* Helper to assign to new connection */
+ struct nf_conntrack_helper *helper;
+
+ /* The conntrack of the master connection */
+ struct nf_conn *master;
+
+ /* Timer function; deletes the expectation. */
+ struct timer_list timeout;
+
+ /* Usage count. */
+ refcount_t use;
+
+ /* Flags */
+ unsigned int flags;
+
+ /* Expectation class */
+ unsigned int class;
+
+#if IS_ENABLED(CONFIG_NF_NAT)
+ union nf_inet_addr saved_addr;
+ /* This is the original per-proto part, used to map the
+ * expected connection the way the recipient expects. */
+ union nf_conntrack_man_proto saved_proto;
+ /* Direction relative to the master connection. */
+ enum ip_conntrack_dir dir;
+#endif
+
+ struct rcu_head rcu;
+};
+
+static inline struct net *nf_ct_exp_net(struct nf_conntrack_expect *exp)
+{
+ return nf_ct_net(exp->master);
+}
+
+#define NF_CT_EXP_POLICY_NAME_LEN 16
+
+struct nf_conntrack_expect_policy {
+ unsigned int max_expected;
+ unsigned int timeout;
+ char name[NF_CT_EXP_POLICY_NAME_LEN];
+};
+
+#define NF_CT_EXPECT_CLASS_DEFAULT 0
+#define NF_CT_EXPECT_MAX_CNT 255
+
+/* Allow to reuse expectations with the same tuples from different master
+ * conntracks.
+ */
+#define NF_CT_EXP_F_SKIP_MASTER 0x1
+
+int nf_conntrack_expect_pernet_init(struct net *net);
+void nf_conntrack_expect_pernet_fini(struct net *net);
+
+int nf_conntrack_expect_init(void);
+void nf_conntrack_expect_fini(void);
+
+struct nf_conntrack_expect *
+__nf_ct_expect_find(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *tuple);
+
+struct nf_conntrack_expect *
+nf_ct_expect_find_get(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *tuple);
+
+struct nf_conntrack_expect *
+nf_ct_find_expectation(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *tuple);
+
+void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
+ u32 portid, int report);
+static inline void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
+{
+ nf_ct_unlink_expect_report(exp, 0, 0);
+}
+
+void nf_ct_remove_expectations(struct nf_conn *ct);
+void nf_ct_unexpect_related(struct nf_conntrack_expect *exp);
+bool nf_ct_remove_expect(struct nf_conntrack_expect *exp);
+
+void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data), void *data);
+void nf_ct_expect_iterate_net(struct net *net,
+ bool (*iter)(struct nf_conntrack_expect *e, void *data),
+ void *data, u32 portid, int report);
+
+/* Allocate space for an expectation: this is mandatory before calling
+ nf_ct_expect_related. You will have to call put afterwards. */
+struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me);
+void nf_ct_expect_init(struct nf_conntrack_expect *, unsigned int, u_int8_t,
+ const union nf_inet_addr *,
+ const union nf_inet_addr *,
+ u_int8_t, const __be16 *, const __be16 *);
+void nf_ct_expect_put(struct nf_conntrack_expect *exp);
+int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
+ u32 portid, int report, unsigned int flags);
+static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect,
+ unsigned int flags)
+{
+ return nf_ct_expect_related_report(expect, 0, 0, flags);
+}
+
+#endif /*_NF_CONNTRACK_EXPECT_H*/
+
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
new file mode 100644
index 000000000..0b247248b
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_CONNTRACK_EXTEND_H
+#define _NF_CONNTRACK_EXTEND_H
+
+#include <linux/slab.h>
+
+#include <net/netfilter/nf_conntrack.h>
+
+enum nf_ct_ext_id {
+ NF_CT_EXT_HELPER,
+#if IS_ENABLED(CONFIG_NF_NAT)
+ NF_CT_EXT_NAT,
+#endif
+ NF_CT_EXT_SEQADJ,
+ NF_CT_EXT_ACCT,
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ NF_CT_EXT_ECACHE,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ NF_CT_EXT_TSTAMP,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ NF_CT_EXT_TIMEOUT,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+ NF_CT_EXT_LABELS,
+#endif
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ NF_CT_EXT_SYNPROXY,
+#endif
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ NF_CT_EXT_ACT_CT,
+#endif
+ NF_CT_EXT_NUM,
+};
+
+/* Extensions: optional stuff which isn't permanently in struct. */
+struct nf_ct_ext {
+ u8 offset[NF_CT_EXT_NUM];
+ u8 len;
+ unsigned int gen_id;
+ char data[] __aligned(8);
+};
+
+static inline bool __nf_ct_ext_exist(const struct nf_ct_ext *ext, u8 id)
+{
+ return !!ext->offset[id];
+}
+
+static inline bool nf_ct_ext_exist(const struct nf_conn *ct, u8 id)
+{
+ return (ct->ext && __nf_ct_ext_exist(ct->ext, id));
+}
+
+void *__nf_ct_ext_find(const struct nf_ct_ext *ext, u8 id);
+
+static inline void *nf_ct_ext_find(const struct nf_conn *ct, u8 id)
+{
+ struct nf_ct_ext *ext = ct->ext;
+
+ if (!ext || !__nf_ct_ext_exist(ext, id))
+ return NULL;
+
+ if (unlikely(ext->gen_id))
+ return __nf_ct_ext_find(ext, id);
+
+ return (void *)ct->ext + ct->ext->offset[id];
+}
+
+/* Add this type, returns pointer to data or NULL. */
+void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp);
+
+/* ext genid. if ext->id != ext_genid, extensions cannot be used
+ * anymore unless conntrack has CONFIRMED bit set.
+ */
+extern atomic_t nf_conntrack_ext_genid;
+void nf_ct_ext_bump_genid(void);
+
+#endif /* _NF_CONNTRACK_EXTEND_H */
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
new file mode 100644
index 000000000..9939c366f
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * connection tracking helpers.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - generalize L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack_helper.h
+ */
+
+#ifndef _NF_CONNTRACK_HELPER_H
+#define _NF_CONNTRACK_HELPER_H
+#include <linux/refcount.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+#define NF_NAT_HELPER_PREFIX "ip_nat_"
+#define NF_NAT_HELPER_NAME(name) NF_NAT_HELPER_PREFIX name
+#define MODULE_ALIAS_NF_NAT_HELPER(name) \
+ MODULE_ALIAS(NF_NAT_HELPER_NAME(name))
+
+struct module;
+
+enum nf_ct_helper_flags {
+ NF_CT_HELPER_F_USERSPACE = (1 << 0),
+ NF_CT_HELPER_F_CONFIGURED = (1 << 1),
+};
+
+#define NF_CT_HELPER_NAME_LEN 16
+
+struct nf_conntrack_helper {
+ struct hlist_node hnode; /* Internal use. */
+
+ char name[NF_CT_HELPER_NAME_LEN]; /* name of the module */
+ refcount_t refcnt;
+ struct module *me; /* pointer to self */
+ const struct nf_conntrack_expect_policy *expect_policy;
+
+ /* Tuple of things we will help (compared against server response) */
+ struct nf_conntrack_tuple tuple;
+
+ /* Function to call when data passes; return verdict, or -1 to
+ invalidate. */
+ int (*help)(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info conntrackinfo);
+
+ void (*destroy)(struct nf_conn *ct);
+
+ int (*from_nlattr)(struct nlattr *attr, struct nf_conn *ct);
+ int (*to_nlattr)(struct sk_buff *skb, const struct nf_conn *ct);
+ unsigned int expect_class_max;
+
+ unsigned int flags;
+
+ /* For user-space helpers: */
+ unsigned int queue_num;
+ /* length of userspace private data stored in nf_conn_help->data */
+ u16 data_len;
+ /* name of NAT helper module */
+ char nat_mod_name[NF_CT_HELPER_NAME_LEN];
+};
+
+/* Must be kept in sync with the classes defined by helpers */
+#define NF_CT_MAX_EXPECT_CLASSES 4
+
+/* nf_conn feature for connections that have a helper */
+struct nf_conn_help {
+ /* Helper. if any */
+ struct nf_conntrack_helper __rcu *helper;
+
+ struct hlist_head expectations;
+
+ /* Current number of expected connections */
+ u8 expecting[NF_CT_MAX_EXPECT_CLASSES];
+
+ /* private helper information. */
+ char data[32] __aligned(8);
+};
+
+#define NF_CT_HELPER_BUILD_BUG_ON(structsize) \
+ BUILD_BUG_ON((structsize) > sizeof_field(struct nf_conn_help, data))
+
+struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name,
+ u16 l3num, u8 protonum);
+
+struct nf_conntrack_helper *nf_conntrack_helper_try_module_get(const char *name,
+ u16 l3num,
+ u8 protonum);
+void nf_conntrack_helper_put(struct nf_conntrack_helper *helper);
+
+void nf_ct_helper_init(struct nf_conntrack_helper *helper,
+ u16 l3num, u16 protonum, const char *name,
+ u16 default_port, u16 spec_port, u32 id,
+ const struct nf_conntrack_expect_policy *exp_pol,
+ u32 expect_class_max,
+ int (*help)(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo),
+ int (*from_nlattr)(struct nlattr *attr,
+ struct nf_conn *ct),
+ struct module *module);
+
+int nf_conntrack_helper_register(struct nf_conntrack_helper *);
+void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
+
+int nf_conntrack_helpers_register(struct nf_conntrack_helper *, unsigned int);
+void nf_conntrack_helpers_unregister(struct nf_conntrack_helper *,
+ unsigned int);
+
+struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp);
+
+int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
+ gfp_t flags);
+
+void nf_ct_helper_destroy(struct nf_conn *ct);
+
+static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
+{
+ return nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
+}
+
+static inline void *nfct_help_data(const struct nf_conn *ct)
+{
+ struct nf_conn_help *help;
+
+ help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
+
+ return (void *)help->data;
+}
+
+void nf_conntrack_helper_pernet_init(struct net *net);
+
+int nf_conntrack_helper_init(void);
+void nf_conntrack_helper_fini(void);
+
+int nf_conntrack_broadcast_help(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int timeout);
+
+struct nf_ct_helper_expectfn {
+ struct list_head head;
+ const char *name;
+ void (*expectfn)(struct nf_conn *ct, struct nf_conntrack_expect *exp);
+};
+
+__printf(3,4)
+void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,
+ const char *fmt, ...);
+
+void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n);
+void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n);
+struct nf_ct_helper_expectfn *
+nf_ct_helper_expectfn_find_by_name(const char *name);
+struct nf_ct_helper_expectfn *
+nf_ct_helper_expectfn_find_by_symbol(const void *symbol);
+
+extern struct hlist_head *nf_ct_helper_hash;
+extern unsigned int nf_ct_helper_hsize;
+
+struct nf_conntrack_nat_helper {
+ struct list_head list;
+ char mod_name[NF_CT_HELPER_NAME_LEN]; /* module name */
+ struct module *module; /* pointer to self */
+};
+
+#define NF_CT_NAT_HELPER_INIT(name) \
+ { \
+ .mod_name = NF_NAT_HELPER_NAME(name), \
+ .module = THIS_MODULE \
+ }
+
+void nf_nat_helper_register(struct nf_conntrack_nat_helper *nat);
+void nf_nat_helper_unregister(struct nf_conntrack_nat_helper *nat);
+int nf_nat_helper_try_module_get(const char *name, u16 l3num,
+ u8 protonum);
+void nf_nat_helper_put(struct nf_conntrack_helper *helper);
+void nf_ct_set_auto_assign_helper_warned(struct net *net);
+#endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
new file mode 100644
index 000000000..1f47bef51
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header for use in defining a given L4 protocol for connection tracking.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - generalized L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack_protcol.h
+ */
+
+#ifndef _NF_CONNTRACK_L4PROTO_H
+#define _NF_CONNTRACK_L4PROTO_H
+#include <linux/netlink.h>
+#include <net/netlink.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netns/generic.h>
+
+struct seq_file;
+
+struct nf_conntrack_l4proto {
+ /* L4 Protocol number. */
+ u_int8_t l4proto;
+
+ /* Resolve clashes on insertion races. */
+ bool allow_clash;
+
+ /* protoinfo nlattr size, closes a hole */
+ u16 nlattr_size;
+
+ /* called by gc worker if table is full */
+ bool (*can_early_drop)(const struct nf_conn *ct);
+
+ /* convert protoinfo to nfnetink attributes */
+ int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
+ struct nf_conn *ct, bool destroy);
+
+ /* convert nfnetlink attributes to protoinfo */
+ int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct);
+
+ int (*tuple_to_nlattr)(struct sk_buff *skb,
+ const struct nf_conntrack_tuple *t);
+ /* Calculate tuple nlattr size */
+ unsigned int (*nlattr_tuple_size)(void);
+ int (*nlattr_to_tuple)(struct nlattr *tb[],
+ struct nf_conntrack_tuple *t,
+ u_int32_t flags);
+ const struct nla_policy *nla_policy;
+
+ struct {
+ int (*nlattr_to_obj)(struct nlattr *tb[],
+ struct net *net, void *data);
+ int (*obj_to_nlattr)(struct sk_buff *skb, const void *data);
+
+ u16 obj_size;
+ u16 nlattr_max;
+ const struct nla_policy *nla_policy;
+ } ctnl_timeout;
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
+ /* Print out the private part of the conntrack. */
+ void (*print_conntrack)(struct seq_file *s, struct nf_conn *);
+#endif
+};
+
+bool icmp_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct net *net,
+ struct nf_conntrack_tuple *tuple);
+
+bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct net *net,
+ struct nf_conntrack_tuple *tuple);
+
+bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig);
+bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig);
+
+int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state,
+ u8 l4proto,
+ union nf_inet_addr *outer_daddr);
+
+int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state);
+
+int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state);
+
+int nf_conntrack_icmp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+
+int nf_conntrack_icmpv6_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+
+int nf_conntrack_udp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+int nf_conntrack_udplite_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+int nf_conntrack_tcp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+int nf_conntrack_dccp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+int nf_conntrack_gre_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+
+void nf_conntrack_generic_init_net(struct net *net);
+void nf_conntrack_tcp_init_net(struct net *net);
+void nf_conntrack_udp_init_net(struct net *net);
+void nf_conntrack_gre_init_net(struct net *net);
+void nf_conntrack_dccp_init_net(struct net *net);
+void nf_conntrack_sctp_init_net(struct net *net);
+void nf_conntrack_icmp_init_net(struct net *net);
+void nf_conntrack_icmpv6_init_net(struct net *net);
+
+/* Existing built-in generic protocol */
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
+
+#define MAX_NF_CT_PROTO IPPROTO_UDPLITE
+
+const struct nf_conntrack_l4proto *nf_ct_l4proto_find(u8 l4proto);
+
+/* Generic netlink helpers */
+int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
+ const struct nf_conntrack_tuple *tuple);
+int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
+ struct nf_conntrack_tuple *t,
+ u_int32_t flags);
+unsigned int nf_ct_port_nlattr_tuple_size(void);
+extern const struct nla_policy nf_ct_port_nla_policy[];
+
+#ifdef CONFIG_SYSCTL
+__printf(4, 5) __cold
+void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
+ const struct nf_conn *ct,
+ const struct nf_hook_state *state,
+ const char *fmt, ...);
+__printf(4, 5) __cold
+void nf_l4proto_log_invalid(const struct sk_buff *skb,
+ const struct nf_hook_state *state,
+ u8 protonum,
+ const char *fmt, ...);
+#else
+static inline __printf(4, 5) __cold
+void nf_l4proto_log_invalid(const struct sk_buff *skb,
+ const struct nf_hook_state *state,
+ u8 protonum,
+ const char *fmt, ...) {}
+static inline __printf(4, 5) __cold
+void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
+ const struct nf_conn *ct,
+ const struct nf_hook_state *state,
+ const char *fmt, ...) { }
+#endif /* CONFIG_SYSCTL */
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+static inline struct nf_generic_net *nf_generic_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.generic;
+}
+
+static inline struct nf_tcp_net *nf_tcp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.tcp;
+}
+
+static inline struct nf_udp_net *nf_udp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.udp;
+}
+
+static inline struct nf_icmp_net *nf_icmp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmp;
+}
+
+static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmpv6;
+}
+
+/* Caller must check nf_ct_protonum(ct) is IPPROTO_TCP before calling. */
+static inline void nf_ct_set_tcp_be_liberal(struct nf_conn *ct)
+{
+ ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+ ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+}
+
+/* Caller must check nf_ct_protonum(ct) is IPPROTO_TCP before calling. */
+static inline bool nf_conntrack_tcp_established(const struct nf_conn *ct)
+{
+ return ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED &&
+ test_bit(IPS_ASSURED_BIT, &ct->status);
+}
+#endif
+
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.dccp;
+}
+#endif
+
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.sctp;
+}
+#endif
+
+#ifdef CONFIG_NF_CT_PROTO_GRE
+static inline struct nf_gre_net *nf_gre_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.gre;
+}
+#endif
+
+#endif /*_NF_CONNTRACK_PROTOCOL_H*/
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
new file mode 100644
index 000000000..66bab6c60
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _NF_CONNTRACK_LABELS_H
+#define _NF_CONNTRACK_LABELS_H
+
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <linux/types.h>
+#include <net/net_namespace.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <uapi/linux/netfilter/xt_connlabel.h>
+
+#define NF_CT_LABELS_MAX_SIZE ((XT_CONNLABEL_MAXBIT + 1) / BITS_PER_BYTE)
+
+struct nf_conn_labels {
+ unsigned long bits[NF_CT_LABELS_MAX_SIZE / sizeof(long)];
+};
+
+/* Can't use nf_ct_ext_find(), flow dissector cannot use symbols
+ * exported by nf_conntrack module.
+ */
+static inline struct nf_conn_labels *nf_ct_labels_find(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+ struct nf_ct_ext *ext = ct->ext;
+
+ if (!ext || !__nf_ct_ext_exist(ext, NF_CT_EXT_LABELS))
+ return NULL;
+
+ return (void *)ct->ext + ct->ext->offset[NF_CT_EXT_LABELS];
+#else
+ return NULL;
+#endif
+}
+
+static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+ struct net *net = nf_ct_net(ct);
+
+ if (net->ct.labels_used == 0)
+ return NULL;
+
+ return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC);
+#else
+ return NULL;
+#endif
+}
+
+int nf_connlabels_replace(struct nf_conn *ct,
+ const u32 *data, const u32 *mask, unsigned int words);
+
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+int nf_conntrack_labels_init(void);
+int nf_connlabels_get(struct net *net, unsigned int bit);
+void nf_connlabels_put(struct net *net);
+#else
+static inline int nf_connlabels_get(struct net *net, unsigned int bit) { return 0; }
+static inline void nf_connlabels_put(struct net *net) {}
+#endif
+
+#endif /* _NF_CONNTRACK_LABELS_H */
diff --git a/include/net/netfilter/nf_conntrack_seqadj.h b/include/net/netfilter/nf_conntrack_seqadj.h
new file mode 100644
index 000000000..883c414b7
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_seqadj.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_CONNTRACK_SEQADJ_H
+#define _NF_CONNTRACK_SEQADJ_H
+
+#include <net/netfilter/nf_conntrack_extend.h>
+
+/**
+ * struct nf_ct_seqadj - sequence number adjustment information
+ *
+ * @correction_pos: position of the last TCP sequence number modification
+ * @offset_before: sequence number offset before last modification
+ * @offset_after: sequence number offset after last modification
+ */
+struct nf_ct_seqadj {
+ u32 correction_pos;
+ s32 offset_before;
+ s32 offset_after;
+};
+
+struct nf_conn_seqadj {
+ struct nf_ct_seqadj seq[IP_CT_DIR_MAX];
+};
+
+static inline struct nf_conn_seqadj *nfct_seqadj(const struct nf_conn *ct)
+{
+ return nf_ct_ext_find(ct, NF_CT_EXT_SEQADJ);
+}
+
+static inline struct nf_conn_seqadj *nfct_seqadj_ext_add(struct nf_conn *ct)
+{
+ return nf_ct_ext_add(ct, NF_CT_EXT_SEQADJ, GFP_ATOMIC);
+}
+
+int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ s32 off);
+int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ __be32 seq, s32 off);
+void nf_ct_tcp_seqadj_set(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, s32 off);
+
+int nf_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, unsigned int protoff);
+s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir, u32 seq);
+
+#endif /* _NF_CONNTRACK_SEQADJ_H */
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
new file mode 100644
index 000000000..6a3ab081e
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_CONNTRACK_SYNPROXY_H
+#define _NF_CONNTRACK_SYNPROXY_H
+
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netns/generic.h>
+
+struct nf_conn_synproxy {
+ u32 isn;
+ u32 its;
+ u32 tsoff;
+};
+
+static inline struct nf_conn_synproxy *nfct_synproxy(const struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ return nf_ct_ext_find(ct, NF_CT_EXT_SYNPROXY);
+#else
+ return NULL;
+#endif
+}
+
+static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ return nf_ct_ext_add(ct, NF_CT_EXT_SYNPROXY, GFP_ATOMIC);
+#else
+ return NULL;
+#endif
+}
+
+static inline bool nf_ct_add_synproxy(struct nf_conn *ct,
+ const struct nf_conn *tmpl)
+{
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ if (tmpl && nfct_synproxy(tmpl)) {
+ if (!nfct_seqadj_ext_add(ct))
+ return false;
+
+ if (!nfct_synproxy_ext_add(ct))
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+#endif /* _NF_CONNTRACK_SYNPROXY_H */
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
new file mode 100644
index 000000000..9fdaba911
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_CONNTRACK_TIMEOUT_H
+#define _NF_CONNTRACK_TIMEOUT_H
+
+#include <net/net_namespace.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <linux/refcount.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+#define CTNL_TIMEOUT_NAME_MAX 32
+
+struct nf_ct_timeout {
+ __u16 l3num;
+ const struct nf_conntrack_l4proto *l4proto;
+ char data[];
+};
+
+struct nf_conn_timeout {
+ struct nf_ct_timeout __rcu *timeout;
+};
+
+static inline unsigned int *
+nf_ct_timeout_data(const struct nf_conn_timeout *t)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ struct nf_ct_timeout *timeout;
+
+ timeout = rcu_dereference(t->timeout);
+ if (timeout == NULL)
+ return NULL;
+
+ return (unsigned int *)timeout->data;
+#else
+ return NULL;
+#endif
+}
+
+static inline
+struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ return nf_ct_ext_find(ct, NF_CT_EXT_TIMEOUT);
+#else
+ return NULL;
+#endif
+}
+
+static inline
+struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
+ struct nf_ct_timeout *timeout,
+ gfp_t gfp)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ struct nf_conn_timeout *timeout_ext;
+
+ timeout_ext = nf_ct_ext_add(ct, NF_CT_EXT_TIMEOUT, gfp);
+ if (timeout_ext == NULL)
+ return NULL;
+
+ rcu_assign_pointer(timeout_ext->timeout, timeout);
+
+ return timeout_ext;
+#else
+ return NULL;
+#endif
+};
+
+static inline unsigned int *nf_ct_timeout_lookup(const struct nf_conn *ct)
+{
+ unsigned int *timeouts = NULL;
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ struct nf_conn_timeout *timeout_ext;
+
+ timeout_ext = nf_ct_timeout_find(ct);
+ if (timeout_ext)
+ timeouts = nf_ct_timeout_data(timeout_ext);
+#endif
+ return timeouts;
+}
+
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout);
+int nf_ct_set_timeout(struct net *net, struct nf_conn *ct, u8 l3num, u8 l4num,
+ const char *timeout_name);
+void nf_ct_destroy_timeout(struct nf_conn *ct);
+#else
+static inline int nf_ct_set_timeout(struct net *net, struct nf_conn *ct,
+ u8 l3num, u8 l4num,
+ const char *timeout_name)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void nf_ct_destroy_timeout(struct nf_conn *ct)
+{
+ return;
+}
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
+
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+struct nf_ct_timeout_hooks {
+ struct nf_ct_timeout *(*timeout_find_get)(struct net *net, const char *name);
+ void (*timeout_put)(struct nf_ct_timeout *timeout);
+};
+
+extern const struct nf_ct_timeout_hooks __rcu *nf_ct_timeout_hook;
+#endif
+
+#endif /* _NF_CONNTRACK_TIMEOUT_H */
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
new file mode 100644
index 000000000..57138d974
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_timestamp.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_CONNTRACK_TSTAMP_H
+#define _NF_CONNTRACK_TSTAMP_H
+
+#include <net/net_namespace.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+struct nf_conn_tstamp {
+ u_int64_t start;
+ u_int64_t stop;
+};
+
+static inline
+struct nf_conn_tstamp *nf_conn_tstamp_find(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ return nf_ct_ext_find(ct, NF_CT_EXT_TSTAMP);
+#else
+ return NULL;
+#endif
+}
+
+static inline
+struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ struct net *net = nf_ct_net(ct);
+
+ if (!net->ct.sysctl_tstamp)
+ return NULL;
+
+ return nf_ct_ext_add(ct, NF_CT_EXT_TSTAMP, gfp);
+#else
+ return NULL;
+#endif
+};
+
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+void nf_conntrack_tstamp_pernet_init(struct net *net);
+#else
+static inline void nf_conntrack_tstamp_pernet_init(struct net *net) {}
+#endif /* CONFIG_NF_CONNTRACK_TIMESTAMP */
+
+#endif /* _NF_CONNTRACK_TSTAMP_H */
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
new file mode 100644
index 000000000..9334371c9
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions and Declarations for tuple.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - generalize L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack_tuple.h
+ */
+
+#ifndef _NF_CONNTRACK_TUPLE_H
+#define _NF_CONNTRACK_TUPLE_H
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <linux/list_nulls.h>
+
+/* A `tuple' is a structure containing the information to uniquely
+ identify a connection. ie. if two packets have the same tuple, they
+ are in the same connection; if not, they are not.
+
+ We divide the structure along "manipulatable" and
+ "non-manipulatable" lines, for the benefit of the NAT code.
+*/
+
+#define NF_CT_TUPLE_L3SIZE ARRAY_SIZE(((union nf_inet_addr *)NULL)->all)
+
+/* The manipulable part of the tuple. */
+struct nf_conntrack_man {
+ union nf_inet_addr u3;
+ union nf_conntrack_man_proto u;
+ /* Layer 3 protocol */
+ u_int16_t l3num;
+};
+
+/* This contains the information to distinguish a connection. */
+struct nf_conntrack_tuple {
+ struct nf_conntrack_man src;
+
+ /* These are the parts of the tuple which are fixed. */
+ struct {
+ union nf_inet_addr u3;
+ union {
+ /* Add other protocols here. */
+ __be16 all;
+
+ struct {
+ __be16 port;
+ } tcp;
+ struct {
+ __be16 port;
+ } udp;
+ struct {
+ u_int8_t type, code;
+ } icmp;
+ struct {
+ __be16 port;
+ } dccp;
+ struct {
+ __be16 port;
+ } sctp;
+ struct {
+ __be16 key;
+ } gre;
+ } u;
+
+ /* The protocol. */
+ u_int8_t protonum;
+
+ /* The direction (for tuplehash) */
+ u_int8_t dir;
+ } dst;
+};
+
+struct nf_conntrack_tuple_mask {
+ struct {
+ union nf_inet_addr u3;
+ union nf_conntrack_man_proto u;
+ } src;
+};
+
+static inline void nf_ct_dump_tuple_ip(const struct nf_conntrack_tuple *t)
+{
+#ifdef DEBUG
+ printk("tuple %p: %u %pI4:%hu -> %pI4:%hu\n",
+ t, t->dst.protonum,
+ &t->src.u3.ip, ntohs(t->src.u.all),
+ &t->dst.u3.ip, ntohs(t->dst.u.all));
+#endif
+}
+
+static inline void nf_ct_dump_tuple_ipv6(const struct nf_conntrack_tuple *t)
+{
+#ifdef DEBUG
+ printk("tuple %p: %u %pI6 %hu -> %pI6 %hu\n",
+ t, t->dst.protonum,
+ t->src.u3.all, ntohs(t->src.u.all),
+ t->dst.u3.all, ntohs(t->dst.u.all));
+#endif
+}
+
+static inline void nf_ct_dump_tuple(const struct nf_conntrack_tuple *t)
+{
+ switch (t->src.l3num) {
+ case AF_INET:
+ nf_ct_dump_tuple_ip(t);
+ break;
+ case AF_INET6:
+ nf_ct_dump_tuple_ipv6(t);
+ break;
+ }
+}
+
+/* If we're the first tuple, it's the original dir. */
+#define NF_CT_DIRECTION(h) \
+ ((enum ip_conntrack_dir)(h)->tuple.dst.dir)
+
+/* Connections have two entries in the hash table: one for each way */
+struct nf_conntrack_tuple_hash {
+ struct hlist_nulls_node hnnode;
+ struct nf_conntrack_tuple tuple;
+};
+
+static inline bool __nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1,
+ const struct nf_conntrack_tuple *t2)
+{
+ return (nf_inet_addr_cmp(&t1->src.u3, &t2->src.u3) &&
+ t1->src.u.all == t2->src.u.all &&
+ t1->src.l3num == t2->src.l3num);
+}
+
+static inline bool __nf_ct_tuple_dst_equal(const struct nf_conntrack_tuple *t1,
+ const struct nf_conntrack_tuple *t2)
+{
+ return (nf_inet_addr_cmp(&t1->dst.u3, &t2->dst.u3) &&
+ t1->dst.u.all == t2->dst.u.all &&
+ t1->dst.protonum == t2->dst.protonum);
+}
+
+static inline bool nf_ct_tuple_equal(const struct nf_conntrack_tuple *t1,
+ const struct nf_conntrack_tuple *t2)
+{
+ return __nf_ct_tuple_src_equal(t1, t2) &&
+ __nf_ct_tuple_dst_equal(t1, t2);
+}
+
+static inline bool
+nf_ct_tuple_mask_equal(const struct nf_conntrack_tuple_mask *m1,
+ const struct nf_conntrack_tuple_mask *m2)
+{
+ return (nf_inet_addr_cmp(&m1->src.u3, &m2->src.u3) &&
+ m1->src.u.all == m2->src.u.all);
+}
+
+static inline bool
+nf_ct_tuple_src_mask_cmp(const struct nf_conntrack_tuple *t1,
+ const struct nf_conntrack_tuple *t2,
+ const struct nf_conntrack_tuple_mask *mask)
+{
+ int count;
+
+ for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++) {
+ if ((t1->src.u3.all[count] ^ t2->src.u3.all[count]) &
+ mask->src.u3.all[count])
+ return false;
+ }
+
+ if ((t1->src.u.all ^ t2->src.u.all) & mask->src.u.all)
+ return false;
+
+ if (t1->src.l3num != t2->src.l3num ||
+ t1->dst.protonum != t2->dst.protonum)
+ return false;
+
+ return true;
+}
+
+static inline bool
+nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple_mask *mask)
+{
+ return nf_ct_tuple_src_mask_cmp(t, tuple, mask) &&
+ __nf_ct_tuple_dst_equal(t, tuple);
+}
+
+#endif /* _NF_CONNTRACK_TUPLE_H */
diff --git a/include/net/netfilter/nf_conntrack_zones.h b/include/net/netfilter/nf_conntrack_zones.h
new file mode 100644
index 000000000..48dbadb96
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_zones.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_CONNTRACK_ZONES_H
+#define _NF_CONNTRACK_ZONES_H
+
+#include <linux/netfilter/nf_conntrack_zones_common.h>
+#include <net/netfilter/nf_conntrack.h>
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ return &ct->zone;
+#else
+ return &nf_ct_zone_dflt;
+#endif
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags)
+{
+ zone->id = id;
+ zone->flags = flags;
+ zone->dir = dir;
+
+ return zone;
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_tmpl(const struct nf_conn *tmpl, const struct sk_buff *skb,
+ struct nf_conntrack_zone *tmp)
+{
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ if (!tmpl)
+ return &nf_ct_zone_dflt;
+
+ if (tmpl->zone.flags & NF_CT_FLAG_MARK)
+ return nf_ct_zone_init(tmp, skb->mark, tmpl->zone.dir, 0);
+#endif
+ return nf_ct_zone(tmpl);
+}
+
+static inline void nf_ct_zone_add(struct nf_conn *ct,
+ const struct nf_conntrack_zone *zone)
+{
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ ct->zone = *zone;
+#endif
+}
+
+static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
+ enum ip_conntrack_dir dir)
+{
+ return zone->dir & (1 << dir);
+}
+
+static inline u16 nf_ct_zone_id(const struct nf_conntrack_zone *zone,
+ enum ip_conntrack_dir dir)
+{
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ return nf_ct_zone_matches_dir(zone, dir) ?
+ zone->id : NF_CT_DEFAULT_ZONE_ID;
+#else
+ return NF_CT_DEFAULT_ZONE_ID;
+#endif
+}
+
+static inline bool nf_ct_zone_equal(const struct nf_conn *a,
+ const struct nf_conntrack_zone *b,
+ enum ip_conntrack_dir dir)
+{
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ return nf_ct_zone_id(nf_ct_zone(a), dir) ==
+ nf_ct_zone_id(b, dir);
+#else
+ return true;
+#endif
+}
+
+static inline bool nf_ct_zone_equal_any(const struct nf_conn *a,
+ const struct nf_conntrack_zone *b)
+{
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ return nf_ct_zone(a)->id == b->id;
+#else
+ return true;
+#endif
+}
+
+#endif /* _NF_CONNTRACK_ZONES_H */
diff --git a/include/net/netfilter/nf_dup_netdev.h b/include/net/netfilter/nf_dup_netdev.h
new file mode 100644
index 000000000..b175d271a
--- /dev/null
+++ b/include/net/netfilter/nf_dup_netdev.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_DUP_NETDEV_H_
+#define _NF_DUP_NETDEV_H_
+
+#include <net/netfilter/nf_tables.h>
+
+void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif);
+void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif);
+
+struct nft_offload_ctx;
+struct nft_flow_rule;
+
+int nft_fwd_dup_netdev_offload(struct nft_offload_ctx *ctx,
+ struct nft_flow_rule *flow,
+ enum flow_action_id id, int oif);
+#endif
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
new file mode 100644
index 000000000..dde4dd9c4
--- /dev/null
+++ b/include/net/netfilter/nf_flow_table.h
@@ -0,0 +1,375 @@
+#ifndef _NF_FLOW_TABLE_H
+#define _NF_FLOW_TABLE_H
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/netdevice.h>
+#include <linux/rhashtable-types.h>
+#include <linux/rcupdate.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/flow_offload.h>
+#include <net/dst.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+
+struct nf_flowtable;
+struct nf_flow_rule;
+struct flow_offload;
+enum flow_offload_tuple_dir;
+
+struct nf_flow_key {
+ struct flow_dissector_key_meta meta;
+ struct flow_dissector_key_control control;
+ struct flow_dissector_key_control enc_control;
+ struct flow_dissector_key_basic basic;
+ struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_vlan cvlan;
+ union {
+ struct flow_dissector_key_ipv4_addrs ipv4;
+ struct flow_dissector_key_ipv6_addrs ipv6;
+ };
+ struct flow_dissector_key_keyid enc_key_id;
+ union {
+ struct flow_dissector_key_ipv4_addrs enc_ipv4;
+ struct flow_dissector_key_ipv6_addrs enc_ipv6;
+ };
+ struct flow_dissector_key_tcp tcp;
+ struct flow_dissector_key_ports tp;
+} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
+
+struct nf_flow_match {
+ struct flow_dissector dissector;
+ struct nf_flow_key key;
+ struct nf_flow_key mask;
+};
+
+struct nf_flow_rule {
+ struct nf_flow_match match;
+ struct flow_rule *rule;
+};
+
+struct nf_flowtable_type {
+ struct list_head list;
+ int family;
+ int (*init)(struct nf_flowtable *ft);
+ bool (*gc)(const struct flow_offload *flow);
+ int (*setup)(struct nf_flowtable *ft,
+ struct net_device *dev,
+ enum flow_block_command cmd);
+ int (*action)(struct net *net,
+ struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule);
+ void (*free)(struct nf_flowtable *ft);
+ void (*get)(struct nf_flowtable *ft);
+ void (*put)(struct nf_flowtable *ft);
+ nf_hookfn *hook;
+ struct module *owner;
+};
+
+enum nf_flowtable_flags {
+ NF_FLOWTABLE_HW_OFFLOAD = 0x1, /* NFT_FLOWTABLE_HW_OFFLOAD */
+ NF_FLOWTABLE_COUNTER = 0x2, /* NFT_FLOWTABLE_COUNTER */
+};
+
+struct nf_flowtable {
+ struct list_head list;
+ struct rhashtable rhashtable;
+ int priority;
+ const struct nf_flowtable_type *type;
+ struct delayed_work gc_work;
+ unsigned int flags;
+ struct flow_block flow_block;
+ struct rw_semaphore flow_block_lock; /* Guards flow_block */
+ possible_net_t net;
+};
+
+static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable)
+{
+ return flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD;
+}
+
+enum flow_offload_tuple_dir {
+ FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
+ FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
+};
+#define FLOW_OFFLOAD_DIR_MAX IP_CT_DIR_MAX
+
+enum flow_offload_xmit_type {
+ FLOW_OFFLOAD_XMIT_UNSPEC = 0,
+ FLOW_OFFLOAD_XMIT_NEIGH,
+ FLOW_OFFLOAD_XMIT_XFRM,
+ FLOW_OFFLOAD_XMIT_DIRECT,
+ FLOW_OFFLOAD_XMIT_TC,
+};
+
+#define NF_FLOW_TABLE_ENCAP_MAX 2
+
+struct flow_offload_tuple {
+ union {
+ struct in_addr src_v4;
+ struct in6_addr src_v6;
+ };
+ union {
+ struct in_addr dst_v4;
+ struct in6_addr dst_v6;
+ };
+ struct {
+ __be16 src_port;
+ __be16 dst_port;
+ };
+
+ int iifidx;
+
+ u8 l3proto;
+ u8 l4proto;
+ struct {
+ u16 id;
+ __be16 proto;
+ } encap[NF_FLOW_TABLE_ENCAP_MAX];
+
+ /* All members above are keys for lookups, see flow_offload_hash(). */
+ struct { } __hash;
+
+ u8 dir:2,
+ xmit_type:3,
+ encap_num:2,
+ in_vlan_ingress:2;
+ u16 mtu;
+ union {
+ struct {
+ struct dst_entry *dst_cache;
+ u32 dst_cookie;
+ };
+ struct {
+ u32 ifidx;
+ u32 hw_ifidx;
+ u8 h_source[ETH_ALEN];
+ u8 h_dest[ETH_ALEN];
+ } out;
+ struct {
+ u32 iifidx;
+ } tc;
+ };
+};
+
+struct flow_offload_tuple_rhash {
+ struct rhash_head node;
+ struct flow_offload_tuple tuple;
+};
+
+enum nf_flow_flags {
+ NF_FLOW_SNAT,
+ NF_FLOW_DNAT,
+ NF_FLOW_TEARDOWN,
+ NF_FLOW_HW,
+ NF_FLOW_HW_DYING,
+ NF_FLOW_HW_DEAD,
+ NF_FLOW_HW_PENDING,
+ NF_FLOW_HW_BIDIRECTIONAL,
+ NF_FLOW_HW_ESTABLISHED,
+};
+
+enum flow_offload_type {
+ NF_FLOW_OFFLOAD_UNSPEC = 0,
+ NF_FLOW_OFFLOAD_ROUTE,
+};
+
+struct flow_offload {
+ struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
+ struct nf_conn *ct;
+ unsigned long flags;
+ u16 type;
+ u32 timeout;
+ struct rcu_head rcu_head;
+};
+
+#define NF_FLOW_TIMEOUT (30 * HZ)
+#define nf_flowtable_time_stamp (u32)jiffies
+
+unsigned long flow_offload_get_timeout(struct flow_offload *flow);
+
+static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
+{
+ return (__s32)(timeout - nf_flowtable_time_stamp);
+}
+
+struct nf_flow_route {
+ struct {
+ struct dst_entry *dst;
+ struct {
+ u32 ifindex;
+ struct {
+ u16 id;
+ __be16 proto;
+ } encap[NF_FLOW_TABLE_ENCAP_MAX];
+ u8 num_encaps:2,
+ ingress_vlans:2;
+ } in;
+ struct {
+ u32 ifindex;
+ u32 hw_ifindex;
+ u8 h_source[ETH_ALEN];
+ u8 h_dest[ETH_ALEN];
+ } out;
+ enum flow_offload_xmit_type xmit_type;
+ } tuple[FLOW_OFFLOAD_DIR_MAX];
+};
+
+struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
+void flow_offload_free(struct flow_offload *flow);
+
+static inline int
+nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
+ flow_setup_cb_t *cb, void *cb_priv)
+{
+ struct flow_block *block = &flow_table->flow_block;
+ struct flow_block_cb *block_cb;
+ int err = 0;
+
+ down_write(&flow_table->flow_block_lock);
+ block_cb = flow_block_cb_lookup(block, cb, cb_priv);
+ if (block_cb) {
+ err = -EEXIST;
+ goto unlock;
+ }
+
+ block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
+ if (IS_ERR(block_cb)) {
+ err = PTR_ERR(block_cb);
+ goto unlock;
+ }
+
+ list_add_tail(&block_cb->list, &block->cb_list);
+ up_write(&flow_table->flow_block_lock);
+
+ if (flow_table->type->get)
+ flow_table->type->get(flow_table);
+ return 0;
+
+unlock:
+ up_write(&flow_table->flow_block_lock);
+ return err;
+}
+
+static inline void
+nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
+ flow_setup_cb_t *cb, void *cb_priv)
+{
+ struct flow_block *block = &flow_table->flow_block;
+ struct flow_block_cb *block_cb;
+
+ down_write(&flow_table->flow_block_lock);
+ block_cb = flow_block_cb_lookup(block, cb, cb_priv);
+ if (block_cb) {
+ list_del(&block_cb->list);
+ flow_block_cb_free(block_cb);
+ } else {
+ WARN_ON(true);
+ }
+ up_write(&flow_table->flow_block_lock);
+
+ if (flow_table->type->put)
+ flow_table->type->put(flow_table);
+}
+
+int flow_offload_route_init(struct flow_offload *flow,
+ const struct nf_flow_route *route);
+
+int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
+void flow_offload_refresh(struct nf_flowtable *flow_table,
+ struct flow_offload *flow, bool force);
+
+struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
+ struct flow_offload_tuple *tuple);
+void nf_flow_table_gc_run(struct nf_flowtable *flow_table);
+void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
+ struct net_device *dev);
+void nf_flow_table_cleanup(struct net_device *dev);
+
+int nf_flow_table_init(struct nf_flowtable *flow_table);
+void nf_flow_table_free(struct nf_flowtable *flow_table);
+
+void flow_offload_teardown(struct flow_offload *flow);
+
+void nf_flow_snat_port(const struct flow_offload *flow,
+ struct sk_buff *skb, unsigned int thoff,
+ u8 protocol, enum flow_offload_tuple_dir dir);
+void nf_flow_dnat_port(const struct flow_offload *flow,
+ struct sk_buff *skb, unsigned int thoff,
+ u8 protocol, enum flow_offload_tuple_dir dir);
+
+struct flow_ports {
+ __be16 source, dest;
+};
+
+unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
+unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
+
+#define MODULE_ALIAS_NF_FLOWTABLE(family) \
+ MODULE_ALIAS("nf-flowtable-" __stringify(family))
+
+void nf_flow_offload_add(struct nf_flowtable *flowtable,
+ struct flow_offload *flow);
+void nf_flow_offload_del(struct nf_flowtable *flowtable,
+ struct flow_offload *flow);
+void nf_flow_offload_stats(struct nf_flowtable *flowtable,
+ struct flow_offload *flow);
+
+void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
+void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable);
+
+int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ enum flow_block_command cmd);
+int nf_flow_rule_route_ipv4(struct net *net, struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule);
+int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule);
+
+int nf_flow_table_offload_init(void);
+void nf_flow_table_offload_exit(void);
+
+static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
+{
+ __be16 proto;
+
+ proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
+ sizeof(struct pppoe_hdr)));
+ switch (proto) {
+ case htons(PPP_IP):
+ return htons(ETH_P_IP);
+ case htons(PPP_IPV6):
+ return htons(ETH_P_IPV6);
+ }
+
+ return 0;
+}
+
+#define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
+#define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
+#define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count) \
+ this_cpu_inc((net)->ft.stat->count)
+#define NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count) \
+ this_cpu_dec((net)->ft.stat->count)
+
+#ifdef CONFIG_NF_FLOW_TABLE_PROCFS
+int nf_flow_table_init_proc(struct net *net);
+void nf_flow_table_fini_proc(struct net *net);
+#else
+static inline int nf_flow_table_init_proc(struct net *net)
+{
+ return 0;
+}
+
+static inline void nf_flow_table_fini_proc(struct net *net)
+{
+}
+#endif /* CONFIG_NF_FLOW_TABLE_PROCFS */
+
+#endif /* _NF_FLOW_TABLE_H */
diff --git a/include/net/netfilter/nf_hooks_lwtunnel.h b/include/net/netfilter/nf_hooks_lwtunnel.h
new file mode 100644
index 000000000..52e27920f
--- /dev/null
+++ b/include/net/netfilter/nf_hooks_lwtunnel.h
@@ -0,0 +1,7 @@
+#include <linux/sysctl.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_SYSCTL
+int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+#endif
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
new file mode 100644
index 000000000..e55eedc84
--- /dev/null
+++ b/include/net/netfilter/nf_log.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_LOG_H
+#define _NF_LOG_H
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_log.h>
+
+/* Log tcp sequence, tcp options, ip options and uid owning local socket */
+#define NF_LOG_DEFAULT_MASK 0x0f
+
+/* This flag indicates that copy_len field in nf_loginfo is set */
+#define NF_LOG_F_COPY_LEN 0x1
+
+enum nf_log_type {
+ NF_LOG_TYPE_LOG = 0,
+ NF_LOG_TYPE_ULOG,
+ NF_LOG_TYPE_MAX
+};
+
+struct nf_loginfo {
+ u_int8_t type;
+ union {
+ struct {
+ /* copy_len will be used iff you set
+ * NF_LOG_F_COPY_LEN in flags
+ */
+ u_int32_t copy_len;
+ u_int16_t group;
+ u_int16_t qthreshold;
+ u_int16_t flags;
+ } ulog;
+ struct {
+ u_int8_t level;
+ u_int8_t logflags;
+ } log;
+ } u;
+};
+
+typedef void nf_logfn(struct net *net,
+ u_int8_t pf,
+ unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct nf_loginfo *li,
+ const char *prefix);
+
+struct nf_logger {
+ char *name;
+ enum nf_log_type type;
+ nf_logfn *logfn;
+ struct module *me;
+};
+
+/* sysctl_nf_log_all_netns - allow LOG target in all network namespaces */
+extern int sysctl_nf_log_all_netns;
+
+/* Function to register/unregister log function. */
+int nf_log_register(u_int8_t pf, struct nf_logger *logger);
+void nf_log_unregister(struct nf_logger *logger);
+
+int nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger);
+void nf_log_unset(struct net *net, const struct nf_logger *logger);
+
+int nf_log_bind_pf(struct net *net, u_int8_t pf,
+ const struct nf_logger *logger);
+void nf_log_unbind_pf(struct net *net, u_int8_t pf);
+
+int nf_logger_find_get(int pf, enum nf_log_type type);
+void nf_logger_put(int pf, enum nf_log_type type);
+
+#define MODULE_ALIAS_NF_LOGGER(family, type) \
+ MODULE_ALIAS("nf-logger-" __stringify(family) "-" __stringify(type))
+
+/* Calls the registered backend logging function */
+__printf(8, 9)
+void nf_log_packet(struct net *net,
+ u_int8_t pf,
+ unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct nf_loginfo *li,
+ const char *fmt, ...);
+
+__printf(8, 9)
+void nf_log_trace(struct net *net,
+ u_int8_t pf,
+ unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct nf_loginfo *li,
+ const char *fmt, ...);
+
+struct nf_log_buf;
+
+struct nf_log_buf *nf_log_buf_open(void);
+__printf(2, 3) int nf_log_buf_add(struct nf_log_buf *m, const char *f, ...);
+void nf_log_buf_close(struct nf_log_buf *m);
+#endif /* _NF_LOG_H */
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
new file mode 100644
index 000000000..e9eb01e99
--- /dev/null
+++ b/include/net/netfilter/nf_nat.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_NAT_H
+#define _NF_NAT_H
+
+#include <linux/list.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/nf_conntrack_pptp.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <uapi/linux/netfilter/nf_nat.h>
+
+enum nf_nat_manip_type {
+ NF_NAT_MANIP_SRC,
+ NF_NAT_MANIP_DST
+};
+
+/* SRC manip occurs POST_ROUTING or LOCAL_IN */
+#define HOOK2MANIP(hooknum) ((hooknum) != NF_INET_POST_ROUTING && \
+ (hooknum) != NF_INET_LOCAL_IN)
+
+/* per conntrack: nat application helper private data */
+union nf_conntrack_nat_help {
+ /* insert nat helper private data here */
+#if IS_ENABLED(CONFIG_NF_NAT_PPTP)
+ struct nf_nat_pptp nat_pptp_info;
+#endif
+};
+
+/* The structure embedded in the conntrack structure. */
+struct nf_conn_nat {
+ union nf_conntrack_nat_help help;
+#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE)
+ int masq_index;
+#endif
+};
+
+/* Set up the info structure to map into this range. */
+unsigned int nf_nat_setup_info(struct nf_conn *ct,
+ const struct nf_nat_range2 *range,
+ enum nf_nat_manip_type maniptype);
+
+extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct,
+ unsigned int hooknum);
+
+struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct);
+
+static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NF_NAT)
+ return nf_ct_ext_find(ct, NF_CT_EXT_NAT);
+#else
+ return NULL;
+#endif
+}
+
+static inline bool nf_nat_oif_changed(unsigned int hooknum,
+ enum ip_conntrack_info ctinfo,
+ struct nf_conn_nat *nat,
+ const struct net_device *out)
+{
+#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE)
+ return nat && nat->masq_index && hooknum == NF_INET_POST_ROUTING &&
+ CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL &&
+ nat->masq_index != out->ifindex;
+#else
+ return false;
+#endif
+}
+
+int nf_nat_register_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
+ const struct nf_hook_ops *nat_ops, unsigned int ops_count);
+void nf_nat_unregister_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
+ unsigned int ops_count);
+
+unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int hooknum, struct sk_buff *skb);
+
+unsigned int nf_nat_manip_pkt(struct sk_buff *skb, struct nf_conn *ct,
+ enum nf_nat_manip_type mtype,
+ enum ip_conntrack_dir dir);
+void nf_nat_csum_recalc(struct sk_buff *skb,
+ u8 nfproto, u8 proto, void *data, __sum16 *check,
+ int datalen, int oldlen);
+
+int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int hooknum);
+
+int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int hooknum, unsigned int hdrlen);
+
+int nf_nat_ipv4_register_fn(struct net *net, const struct nf_hook_ops *ops);
+void nf_nat_ipv4_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
+
+int nf_nat_ipv6_register_fn(struct net *net, const struct nf_hook_ops *ops);
+void nf_nat_ipv6_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
+
+int nf_nat_inet_register_fn(struct net *net, const struct nf_hook_ops *ops);
+void nf_nat_inet_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
+
+unsigned int
+nf_nat_inet_fn(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
+
+static inline int nf_nat_initialized(const struct nf_conn *ct,
+ enum nf_nat_manip_type manip)
+{
+ if (manip == NF_NAT_MANIP_SRC)
+ return ct->status & IPS_SRC_NAT_DONE;
+ else
+ return ct->status & IPS_DST_NAT_DONE;
+}
+#endif
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h
new file mode 100644
index 000000000..44c421b9b
--- /dev/null
+++ b/include/net/netfilter/nf_nat_helper.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_NAT_HELPER_H
+#define _NF_NAT_HELPER_H
+/* NAT protocol helper routines. */
+
+#include <linux/skbuff.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+/* These return true or false. */
+bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned int match_offset,
+ unsigned int match_len, const char *rep_buffer,
+ unsigned int rep_len, bool adjust);
+
+static inline bool nf_nat_mangle_tcp_packet(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned int match_offset,
+ unsigned int match_len,
+ const char *rep_buffer,
+ unsigned int rep_len)
+{
+ return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
+ match_offset, match_len,
+ rep_buffer, rep_len, true);
+}
+
+bool nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned int match_offset,
+ unsigned int match_len, const char *rep_buffer,
+ unsigned int rep_len);
+
+/* Setup NAT on this expected conntrack so it follows master, but goes
+ * to port ct->master->saved_proto. */
+void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this);
+
+u16 nf_nat_exp_find_port(struct nf_conntrack_expect *exp, u16 port);
+#endif
diff --git a/include/net/netfilter/nf_nat_masquerade.h b/include/net/netfilter/nf_nat_masquerade.h
new file mode 100644
index 000000000..be7abc9d5
--- /dev/null
+++ b/include/net/netfilter/nf_nat_masquerade.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_NAT_MASQUERADE_H_
+#define _NF_NAT_MASQUERADE_H_
+
+#include <linux/skbuff.h>
+#include <net/netfilter/nf_nat.h>
+
+unsigned int
+nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
+ const struct nf_nat_range2 *range,
+ const struct net_device *out);
+
+int nf_nat_masquerade_inet_register_notifiers(void);
+void nf_nat_masquerade_inet_unregister_notifiers(void);
+
+unsigned int
+nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ const struct net_device *out);
+
+#endif /*_NF_NAT_MASQUERADE_H_ */
diff --git a/include/net/netfilter/nf_nat_redirect.h b/include/net/netfilter/nf_nat_redirect.h
new file mode 100644
index 000000000..279380de9
--- /dev/null
+++ b/include/net/netfilter/nf_nat_redirect.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_NAT_REDIRECT_H_
+#define _NF_NAT_REDIRECT_H_
+
+#include <linux/skbuff.h>
+#include <uapi/linux/netfilter/nf_nat.h>
+
+unsigned int
+nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ unsigned int hooknum);
+unsigned int
+nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ unsigned int hooknum);
+
+#endif /* _NF_NAT_REDIRECT_H_ */
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
new file mode 100644
index 000000000..c81021ab0
--- /dev/null
+++ b/include/net/netfilter/nf_queue.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_QUEUE_H
+#define _NF_QUEUE_H
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/jhash.h>
+#include <linux/netfilter.h>
+#include <linux/skbuff.h>
+
+/* Each queued (to userspace) skbuff has one of these. */
+struct nf_queue_entry {
+ struct list_head list;
+ struct sk_buff *skb;
+ unsigned int id;
+ unsigned int hook_index; /* index in hook_entries->hook[] */
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ struct net_device *physin;
+ struct net_device *physout;
+#endif
+ struct nf_hook_state state;
+ u16 size; /* sizeof(entry) + saved route keys */
+
+ /* extra space to store route keys */
+};
+
+#define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry))
+
+/* Packet queuing */
+struct nf_queue_handler {
+ int (*outfn)(struct nf_queue_entry *entry,
+ unsigned int queuenum);
+ void (*nf_hook_drop)(struct net *net);
+};
+
+void nf_register_queue_handler(const struct nf_queue_handler *qh);
+void nf_unregister_queue_handler(void);
+void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
+
+bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
+void nf_queue_entry_free(struct nf_queue_entry *entry);
+
+static inline void init_hashrandom(u32 *jhash_initval)
+{
+ while (*jhash_initval == 0)
+ *jhash_initval = get_random_u32();
+}
+
+static inline u32 hash_v4(const struct iphdr *iph, u32 initval)
+{
+ /* packets in either direction go into same queue */
+ if ((__force u32)iph->saddr < (__force u32)iph->daddr)
+ return jhash_3words((__force u32)iph->saddr,
+ (__force u32)iph->daddr, iph->protocol, initval);
+
+ return jhash_3words((__force u32)iph->daddr,
+ (__force u32)iph->saddr, iph->protocol, initval);
+}
+
+static inline u32 hash_v6(const struct ipv6hdr *ip6h, u32 initval)
+{
+ u32 a, b, c;
+
+ if ((__force u32)ip6h->saddr.s6_addr32[3] <
+ (__force u32)ip6h->daddr.s6_addr32[3]) {
+ a = (__force u32) ip6h->saddr.s6_addr32[3];
+ b = (__force u32) ip6h->daddr.s6_addr32[3];
+ } else {
+ b = (__force u32) ip6h->saddr.s6_addr32[3];
+ a = (__force u32) ip6h->daddr.s6_addr32[3];
+ }
+
+ if ((__force u32)ip6h->saddr.s6_addr32[1] <
+ (__force u32)ip6h->daddr.s6_addr32[1])
+ c = (__force u32) ip6h->saddr.s6_addr32[1];
+ else
+ c = (__force u32) ip6h->daddr.s6_addr32[1];
+
+ return jhash_3words(a, b, c, initval);
+}
+
+static inline u32 hash_bridge(const struct sk_buff *skb, u32 initval)
+{
+ struct ipv6hdr *ip6h, _ip6h;
+ struct iphdr *iph, _iph;
+
+ switch (eth_hdr(skb)->h_proto) {
+ case htons(ETH_P_IP):
+ iph = skb_header_pointer(skb, skb_network_offset(skb),
+ sizeof(*iph), &_iph);
+ if (iph)
+ return hash_v4(iph, initval);
+ break;
+ case htons(ETH_P_IPV6):
+ ip6h = skb_header_pointer(skb, skb_network_offset(skb),
+ sizeof(*ip6h), &_ip6h);
+ if (ip6h)
+ return hash_v6(ip6h, initval);
+ break;
+ }
+
+ return 0;
+}
+
+static inline u32
+nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family,
+ u32 initval)
+{
+ switch (family) {
+ case NFPROTO_IPV4:
+ queue += reciprocal_scale(hash_v4(ip_hdr(skb), initval),
+ queues_total);
+ break;
+ case NFPROTO_IPV6:
+ queue += reciprocal_scale(hash_v6(ipv6_hdr(skb), initval),
+ queues_total);
+ break;
+ case NFPROTO_BRIDGE:
+ queue += reciprocal_scale(hash_bridge(skb, initval),
+ queues_total);
+ break;
+ }
+
+ return queue;
+}
+
+int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
+ unsigned int index, unsigned int verdict);
+
+#endif /* _NF_QUEUE_H */
diff --git a/include/net/netfilter/nf_reject.h b/include/net/netfilter/nf_reject.h
new file mode 100644
index 000000000..7c669792f
--- /dev/null
+++ b/include/net/netfilter/nf_reject.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_REJECT_H
+#define _NF_REJECT_H
+
+#include <linux/types.h>
+#include <uapi/linux/in.h>
+
+static inline bool nf_reject_verify_csum(struct sk_buff *skb, int dataoff,
+ __u8 proto)
+{
+ /* Skip protocols that don't use 16-bit one's complement checksum
+ * of the entire payload.
+ */
+ switch (proto) {
+ /* Protocols with optional checksums. */
+ case IPPROTO_UDP: {
+ const struct udphdr *udp_hdr;
+ struct udphdr _udp_hdr;
+
+ udp_hdr = skb_header_pointer(skb, dataoff,
+ sizeof(_udp_hdr),
+ &_udp_hdr);
+ if (!udp_hdr || udp_hdr->check)
+ return true;
+
+ return false;
+ }
+ case IPPROTO_GRE:
+
+ /* Protocols with other integrity checks. */
+ case IPPROTO_AH:
+ case IPPROTO_ESP:
+ case IPPROTO_SCTP:
+
+ /* Protocols with partial checksums. */
+ case IPPROTO_UDPLITE:
+ case IPPROTO_DCCP:
+ return false;
+ }
+ return true;
+}
+
+#endif /* _NF_REJECT_H */
diff --git a/include/net/netfilter/nf_socket.h b/include/net/netfilter/nf_socket.h
new file mode 100644
index 000000000..f9d7bee9b
--- /dev/null
+++ b/include/net/netfilter/nf_socket.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_SOCK_H_
+#define _NF_SOCK_H_
+
+#include <net/sock.h>
+
+struct sock *nf_sk_lookup_slow_v4(struct net *net, const struct sk_buff *skb,
+ const struct net_device *indev);
+
+struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
+ const struct net_device *indev);
+
+#endif
diff --git a/include/net/netfilter/nf_synproxy.h b/include/net/netfilter/nf_synproxy.h
new file mode 100644
index 000000000..a336f9434
--- /dev/null
+++ b/include/net/netfilter/nf_synproxy.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_SYNPROXY_SHARED_H
+#define _NF_SYNPROXY_SHARED_H
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <net/ip6_checksum.h>
+#include <net/ip6_route.h>
+#include <net/tcp.h>
+
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
+
+struct synproxy_stats {
+ unsigned int syn_received;
+ unsigned int cookie_invalid;
+ unsigned int cookie_valid;
+ unsigned int cookie_retrans;
+ unsigned int conn_reopened;
+};
+
+struct synproxy_net {
+ struct nf_conn *tmpl;
+ struct synproxy_stats __percpu *stats;
+ unsigned int hook_ref4;
+ unsigned int hook_ref6;
+};
+
+extern unsigned int synproxy_net_id;
+static inline struct synproxy_net *synproxy_pernet(struct net *net)
+{
+ return net_generic(net, synproxy_net_id);
+}
+
+struct synproxy_options {
+ u8 options;
+ u8 wscale;
+ u16 mss_option;
+ u16 mss_encode;
+ u32 tsval;
+ u32 tsecr;
+};
+
+struct nf_synproxy_info;
+bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+ const struct tcphdr *th,
+ struct synproxy_options *opts);
+
+void synproxy_init_timestamp_cookie(const struct nf_synproxy_info *info,
+ struct synproxy_options *opts);
+
+void synproxy_send_client_synack(struct net *net, const struct sk_buff *skb,
+ const struct tcphdr *th,
+ const struct synproxy_options *opts);
+
+bool synproxy_recv_client_ack(struct net *net,
+ const struct sk_buff *skb,
+ const struct tcphdr *th,
+ struct synproxy_options *opts, u32 recv_seq);
+
+struct nf_hook_state;
+
+unsigned int ipv4_synproxy_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *nhs);
+int nf_synproxy_ipv4_init(struct synproxy_net *snet, struct net *net);
+void nf_synproxy_ipv4_fini(struct synproxy_net *snet, struct net *net);
+
+#if IS_ENABLED(CONFIG_IPV6)
+void synproxy_send_client_synack_ipv6(struct net *net,
+ const struct sk_buff *skb,
+ const struct tcphdr *th,
+ const struct synproxy_options *opts);
+
+bool synproxy_recv_client_ack_ipv6(struct net *net, const struct sk_buff *skb,
+ const struct tcphdr *th,
+ struct synproxy_options *opts, u32 recv_seq);
+
+unsigned int ipv6_synproxy_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *nhs);
+int nf_synproxy_ipv6_init(struct synproxy_net *snet, struct net *net);
+void nf_synproxy_ipv6_fini(struct synproxy_net *snet, struct net *net);
+#else
+static inline int
+nf_synproxy_ipv6_init(struct synproxy_net *snet, struct net *net) { return 0; }
+static inline void
+nf_synproxy_ipv6_fini(struct synproxy_net *snet, struct net *net) {};
+#endif /* CONFIG_IPV6 */
+
+#endif /* _NF_SYNPROXY_SHARED_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
new file mode 100644
index 000000000..c726da3b7
--- /dev/null
+++ b/include/net/netfilter/nf_tables.h
@@ -0,0 +1,1745 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_NF_TABLES_H
+#define _NET_NF_TABLES_H
+
+#include <asm/unaligned.h>
+#include <linux/list.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/nf_tables.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/rhashtable.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netlink.h>
+#include <net/flow_offload.h>
+#include <net/netns/generic.h>
+
+#define NFT_MAX_HOOKS (NF_INET_INGRESS + 1)
+
+struct module;
+
+#define NFT_JUMP_STACK_SIZE 16
+
+enum {
+ NFT_PKTINFO_L4PROTO = (1 << 0),
+ NFT_PKTINFO_INNER = (1 << 1),
+};
+
+struct nft_pktinfo {
+ struct sk_buff *skb;
+ const struct nf_hook_state *state;
+ u8 flags;
+ u8 tprot;
+ u16 fragoff;
+ unsigned int thoff;
+ unsigned int inneroff;
+};
+
+static inline struct sock *nft_sk(const struct nft_pktinfo *pkt)
+{
+ return pkt->state->sk;
+}
+
+static inline unsigned int nft_thoff(const struct nft_pktinfo *pkt)
+{
+ return pkt->thoff;
+}
+
+static inline struct net *nft_net(const struct nft_pktinfo *pkt)
+{
+ return pkt->state->net;
+}
+
+static inline unsigned int nft_hook(const struct nft_pktinfo *pkt)
+{
+ return pkt->state->hook;
+}
+
+static inline u8 nft_pf(const struct nft_pktinfo *pkt)
+{
+ return pkt->state->pf;
+}
+
+static inline const struct net_device *nft_in(const struct nft_pktinfo *pkt)
+{
+ return pkt->state->in;
+}
+
+static inline const struct net_device *nft_out(const struct nft_pktinfo *pkt)
+{
+ return pkt->state->out;
+}
+
+static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ pkt->skb = skb;
+ pkt->state = state;
+}
+
+static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt)
+{
+ pkt->flags = 0;
+ pkt->tprot = 0;
+ pkt->thoff = 0;
+ pkt->fragoff = 0;
+}
+
+/**
+ * struct nft_verdict - nf_tables verdict
+ *
+ * @code: nf_tables/netfilter verdict code
+ * @chain: destination chain for NFT_JUMP/NFT_GOTO
+ */
+struct nft_verdict {
+ u32 code;
+ struct nft_chain *chain;
+};
+
+struct nft_data {
+ union {
+ u32 data[4];
+ struct nft_verdict verdict;
+ };
+} __attribute__((aligned(__alignof__(u64))));
+
+#define NFT_REG32_NUM 20
+
+/**
+ * struct nft_regs - nf_tables register set
+ *
+ * @data: data registers
+ * @verdict: verdict register
+ *
+ * The first four data registers alias to the verdict register.
+ */
+struct nft_regs {
+ union {
+ u32 data[NFT_REG32_NUM];
+ struct nft_verdict verdict;
+ };
+};
+
+struct nft_regs_track {
+ struct {
+ const struct nft_expr *selector;
+ const struct nft_expr *bitwise;
+ u8 num_reg;
+ } regs[NFT_REG32_NUM];
+
+ const struct nft_expr *cur;
+ const struct nft_expr *last;
+};
+
+/* Store/load an u8, u16 or u64 integer to/from the u32 data register.
+ *
+ * Note, when using concatenations, register allocation happens at 32-bit
+ * level. So for store instruction, pad the rest part with zero to avoid
+ * garbage values.
+ */
+
+static inline void nft_reg_store8(u32 *dreg, u8 val)
+{
+ *dreg = 0;
+ *(u8 *)dreg = val;
+}
+
+static inline u8 nft_reg_load8(const u32 *sreg)
+{
+ return *(u8 *)sreg;
+}
+
+static inline void nft_reg_store16(u32 *dreg, u16 val)
+{
+ *dreg = 0;
+ *(u16 *)dreg = val;
+}
+
+static inline void nft_reg_store_be16(u32 *dreg, __be16 val)
+{
+ nft_reg_store16(dreg, (__force __u16)val);
+}
+
+static inline u16 nft_reg_load16(const u32 *sreg)
+{
+ return *(u16 *)sreg;
+}
+
+static inline __be16 nft_reg_load_be16(const u32 *sreg)
+{
+ return (__force __be16)nft_reg_load16(sreg);
+}
+
+static inline __be32 nft_reg_load_be32(const u32 *sreg)
+{
+ return *(__force __be32 *)sreg;
+}
+
+static inline void nft_reg_store64(u64 *dreg, u64 val)
+{
+ put_unaligned(val, dreg);
+}
+
+static inline u64 nft_reg_load64(const u32 *sreg)
+{
+ return get_unaligned((u64 *)sreg);
+}
+
+static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
+ unsigned int len)
+{
+ if (len % NFT_REG32_SIZE)
+ dst[len / NFT_REG32_SIZE] = 0;
+ memcpy(dst, src, len);
+}
+
+/**
+ * struct nft_ctx - nf_tables rule/set context
+ *
+ * @net: net namespace
+ * @table: the table the chain is contained in
+ * @chain: the chain the rule is contained in
+ * @nla: netlink attributes
+ * @portid: netlink portID of the original message
+ * @seq: netlink sequence number
+ * @family: protocol family
+ * @level: depth of the chains
+ * @report: notify via unicast netlink message
+ */
+struct nft_ctx {
+ struct net *net;
+ struct nft_table *table;
+ struct nft_chain *chain;
+ const struct nlattr * const *nla;
+ u32 portid;
+ u32 seq;
+ u16 flags;
+ u8 family;
+ u8 level;
+ bool report;
+};
+
+enum nft_data_desc_flags {
+ NFT_DATA_DESC_SETELEM = (1 << 0),
+};
+
+struct nft_data_desc {
+ enum nft_data_types type;
+ unsigned int size;
+ unsigned int len;
+ unsigned int flags;
+};
+
+int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
+ struct nft_data_desc *desc, const struct nlattr *nla);
+void nft_data_hold(const struct nft_data *data, enum nft_data_types type);
+void nft_data_release(const struct nft_data *data, enum nft_data_types type);
+int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
+ enum nft_data_types type, unsigned int len);
+
+static inline enum nft_data_types nft_dreg_to_type(enum nft_registers reg)
+{
+ return reg == NFT_REG_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE;
+}
+
+static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
+{
+ return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
+}
+
+int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
+int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
+
+int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len);
+int nft_parse_register_store(const struct nft_ctx *ctx,
+ const struct nlattr *attr, u8 *dreg,
+ const struct nft_data *data,
+ enum nft_data_types type, unsigned int len);
+
+/**
+ * struct nft_userdata - user defined data associated with an object
+ *
+ * @len: length of the data
+ * @data: content
+ *
+ * The presence of user data is indicated in an object specific fashion,
+ * so a length of zero can't occur and the value "len" indicates data
+ * of length len + 1.
+ */
+struct nft_userdata {
+ u8 len;
+ unsigned char data[];
+};
+
+/**
+ * struct nft_set_elem - generic representation of set elements
+ *
+ * @key: element key
+ * @key_end: closing element key
+ * @priv: element private data and extensions
+ */
+struct nft_set_elem {
+ union {
+ u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
+ struct nft_data val;
+ } key;
+ union {
+ u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
+ struct nft_data val;
+ } key_end;
+ union {
+ u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
+ struct nft_data val;
+ } data;
+ void *priv;
+};
+
+struct nft_set;
+struct nft_set_iter {
+ u8 genmask;
+ unsigned int count;
+ unsigned int skip;
+ int err;
+ int (*fn)(const struct nft_ctx *ctx,
+ struct nft_set *set,
+ const struct nft_set_iter *iter,
+ struct nft_set_elem *elem);
+};
+
+/**
+ * struct nft_set_desc - description of set elements
+ *
+ * @ktype: key type
+ * @klen: key length
+ * @dtype: data type
+ * @dlen: data length
+ * @objtype: object type
+ * @flags: flags
+ * @size: number of set elements
+ * @policy: set policy
+ * @gc_int: garbage collector interval
+ * @field_len: length of each field in concatenation, bytes
+ * @field_count: number of concatenated fields in element
+ * @expr: set must support for expressions
+ */
+struct nft_set_desc {
+ u32 ktype;
+ unsigned int klen;
+ u32 dtype;
+ unsigned int dlen;
+ u32 objtype;
+ unsigned int size;
+ u32 policy;
+ u32 gc_int;
+ u64 timeout;
+ u8 field_len[NFT_REG32_COUNT];
+ u8 field_count;
+ bool expr;
+};
+
+/**
+ * enum nft_set_class - performance class
+ *
+ * @NFT_LOOKUP_O_1: constant, O(1)
+ * @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N)
+ * @NFT_LOOKUP_O_N: linear, O(N)
+ */
+enum nft_set_class {
+ NFT_SET_CLASS_O_1,
+ NFT_SET_CLASS_O_LOG_N,
+ NFT_SET_CLASS_O_N,
+};
+
+/**
+ * struct nft_set_estimate - estimation of memory and performance
+ * characteristics
+ *
+ * @size: required memory
+ * @lookup: lookup performance class
+ * @space: memory class
+ */
+struct nft_set_estimate {
+ u64 size;
+ enum nft_set_class lookup;
+ enum nft_set_class space;
+};
+
+#define NFT_EXPR_MAXATTR 16
+#define NFT_EXPR_SIZE(size) (sizeof(struct nft_expr) + \
+ ALIGN(size, __alignof__(struct nft_expr)))
+
+/**
+ * struct nft_expr - nf_tables expression
+ *
+ * @ops: expression ops
+ * @data: expression private data
+ */
+struct nft_expr {
+ const struct nft_expr_ops *ops;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(u64))));
+};
+
+static inline void *nft_expr_priv(const struct nft_expr *expr)
+{
+ return (void *)expr->data;
+}
+
+int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src);
+void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
+int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
+ const struct nft_expr *expr);
+bool nft_expr_reduce_bitwise(struct nft_regs_track *track,
+ const struct nft_expr *expr);
+
+struct nft_set_ext;
+
+/**
+ * struct nft_set_ops - nf_tables set operations
+ *
+ * @lookup: look up an element within the set
+ * @update: update an element if exists, add it if doesn't exist
+ * @delete: delete an element
+ * @insert: insert new element into set
+ * @activate: activate new element in the next generation
+ * @deactivate: lookup for element and deactivate it in the next generation
+ * @flush: deactivate element in the next generation
+ * @remove: remove element from set
+ * @walk: iterate over all set elements
+ * @get: get set elements
+ * @privsize: function to return size of set private data
+ * @init: initialize private data of new set instance
+ * @destroy: destroy private data of set instance
+ * @elemsize: element private size
+ *
+ * Operations lookup, update and delete have simpler interfaces, are faster
+ * and currently only used in the packet path. All the rest are slower,
+ * control plane functions.
+ */
+struct nft_set_ops {
+ bool (*lookup)(const struct net *net,
+ const struct nft_set *set,
+ const u32 *key,
+ const struct nft_set_ext **ext);
+ bool (*update)(struct nft_set *set,
+ const u32 *key,
+ void *(*new)(struct nft_set *,
+ const struct nft_expr *,
+ struct nft_regs *),
+ const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_set_ext **ext);
+ bool (*delete)(const struct nft_set *set,
+ const u32 *key);
+
+ int (*insert)(const struct net *net,
+ const struct nft_set *set,
+ const struct nft_set_elem *elem,
+ struct nft_set_ext **ext);
+ void (*activate)(const struct net *net,
+ const struct nft_set *set,
+ const struct nft_set_elem *elem);
+ void * (*deactivate)(const struct net *net,
+ const struct nft_set *set,
+ const struct nft_set_elem *elem);
+ bool (*flush)(const struct net *net,
+ const struct nft_set *set,
+ void *priv);
+ void (*remove)(const struct net *net,
+ const struct nft_set *set,
+ const struct nft_set_elem *elem);
+ void (*walk)(const struct nft_ctx *ctx,
+ struct nft_set *set,
+ struct nft_set_iter *iter);
+ void * (*get)(const struct net *net,
+ const struct nft_set *set,
+ const struct nft_set_elem *elem,
+ unsigned int flags);
+ void (*commit)(const struct nft_set *set);
+ void (*abort)(const struct nft_set *set);
+ u64 (*privsize)(const struct nlattr * const nla[],
+ const struct nft_set_desc *desc);
+ bool (*estimate)(const struct nft_set_desc *desc,
+ u32 features,
+ struct nft_set_estimate *est);
+ int (*init)(const struct nft_set *set,
+ const struct nft_set_desc *desc,
+ const struct nlattr * const nla[]);
+ void (*destroy)(const struct nft_ctx *ctx,
+ const struct nft_set *set);
+ void (*gc_init)(const struct nft_set *set);
+
+ unsigned int elemsize;
+};
+
+/**
+ * struct nft_set_type - nf_tables set type
+ *
+ * @ops: set ops for this type
+ * @features: features supported by the implementation
+ */
+struct nft_set_type {
+ const struct nft_set_ops ops;
+ u32 features;
+};
+#define to_set_type(o) container_of(o, struct nft_set_type, ops)
+
+struct nft_set_elem_expr {
+ u8 size;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(struct nft_expr))));
+};
+
+#define nft_setelem_expr_at(__elem_expr, __offset) \
+ ((struct nft_expr *)&__elem_expr->data[__offset])
+
+#define nft_setelem_expr_foreach(__expr, __elem_expr, __size) \
+ for (__expr = nft_setelem_expr_at(__elem_expr, 0), __size = 0; \
+ __size < (__elem_expr)->size; \
+ __size += (__expr)->ops->size, __expr = ((void *)(__expr)) + (__expr)->ops->size)
+
+#define NFT_SET_EXPR_MAX 2
+
+/**
+ * struct nft_set - nf_tables set instance
+ *
+ * @list: table set list node
+ * @bindings: list of set bindings
+ * @refs: internal refcounting for async set destruction
+ * @table: table this set belongs to
+ * @net: netnamespace this set belongs to
+ * @name: name of the set
+ * @handle: unique handle of the set
+ * @ktype: key type (numeric type defined by userspace, not used in the kernel)
+ * @dtype: data type (verdict or numeric type defined by userspace)
+ * @objtype: object type (see NFT_OBJECT_* definitions)
+ * @size: maximum set size
+ * @field_len: length of each field in concatenation, bytes
+ * @field_count: number of concatenated fields in element
+ * @use: number of rules references to this set
+ * @nelems: number of elements
+ * @ndeact: number of deactivated elements queued for removal
+ * @timeout: default timeout value in jiffies
+ * @gc_int: garbage collection interval in msecs
+ * @policy: set parameterization (see enum nft_set_policies)
+ * @udlen: user data length
+ * @udata: user data
+ * @expr: stateful expression
+ * @ops: set ops
+ * @flags: set flags
+ * @dead: set will be freed, never cleared
+ * @genmask: generation mask
+ * @klen: key length
+ * @dlen: data length
+ * @data: private set data
+ */
+struct nft_set {
+ struct list_head list;
+ struct list_head bindings;
+ refcount_t refs;
+ struct nft_table *table;
+ possible_net_t net;
+ char *name;
+ u64 handle;
+ u32 ktype;
+ u32 dtype;
+ u32 objtype;
+ u32 size;
+ u8 field_len[NFT_REG32_COUNT];
+ u8 field_count;
+ u32 use;
+ atomic_t nelems;
+ u32 ndeact;
+ u64 timeout;
+ u32 gc_int;
+ u16 policy;
+ u16 udlen;
+ unsigned char *udata;
+ struct list_head pending_update;
+ /* runtime data below here */
+ const struct nft_set_ops *ops ____cacheline_aligned;
+ u16 flags:13,
+ dead:1,
+ genmask:2;
+ u8 klen;
+ u8 dlen;
+ u8 num_exprs;
+ struct nft_expr *exprs[NFT_SET_EXPR_MAX];
+ struct list_head catchall_list;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(u64))));
+};
+
+static inline bool nft_set_is_anonymous(const struct nft_set *set)
+{
+ return set->flags & NFT_SET_ANONYMOUS;
+}
+
+static inline void *nft_set_priv(const struct nft_set *set)
+{
+ return (void *)set->data;
+}
+
+static inline bool nft_set_gc_is_pending(const struct nft_set *s)
+{
+ return refcount_read(&s->refs) != 1;
+}
+
+static inline struct nft_set *nft_set_container_of(const void *priv)
+{
+ return (void *)priv - offsetof(struct nft_set, data);
+}
+
+struct nft_set *nft_set_lookup_global(const struct net *net,
+ const struct nft_table *table,
+ const struct nlattr *nla_set_name,
+ const struct nlattr *nla_set_id,
+ u8 genmask);
+
+struct nft_set_ext *nft_set_catchall_lookup(const struct net *net,
+ const struct nft_set *set);
+
+static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
+{
+ u32 gc_int = READ_ONCE(set->gc_int);
+
+ return gc_int ? msecs_to_jiffies(gc_int) : HZ;
+}
+
+/**
+ * struct nft_set_binding - nf_tables set binding
+ *
+ * @list: set bindings list node
+ * @chain: chain containing the rule bound to the set
+ * @flags: set action flags
+ *
+ * A set binding contains all information necessary for validation
+ * of new elements added to a bound set.
+ */
+struct nft_set_binding {
+ struct list_head list;
+ const struct nft_chain *chain;
+ u32 flags;
+};
+
+enum nft_trans_phase;
+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set);
+void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *binding,
+ enum nft_trans_phase phase);
+int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *binding);
+void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
+
+/**
+ * enum nft_set_extensions - set extension type IDs
+ *
+ * @NFT_SET_EXT_KEY: element key
+ * @NFT_SET_EXT_KEY_END: upper bound element key, for ranges
+ * @NFT_SET_EXT_DATA: mapping data
+ * @NFT_SET_EXT_FLAGS: element flags
+ * @NFT_SET_EXT_TIMEOUT: element timeout
+ * @NFT_SET_EXT_EXPIRATION: element expiration time
+ * @NFT_SET_EXT_USERDATA: user data associated with the element
+ * @NFT_SET_EXT_EXPRESSIONS: expressions assiciated with the element
+ * @NFT_SET_EXT_OBJREF: stateful object reference associated with element
+ * @NFT_SET_EXT_NUM: number of extension types
+ */
+enum nft_set_extensions {
+ NFT_SET_EXT_KEY,
+ NFT_SET_EXT_KEY_END,
+ NFT_SET_EXT_DATA,
+ NFT_SET_EXT_FLAGS,
+ NFT_SET_EXT_TIMEOUT,
+ NFT_SET_EXT_EXPIRATION,
+ NFT_SET_EXT_USERDATA,
+ NFT_SET_EXT_EXPRESSIONS,
+ NFT_SET_EXT_OBJREF,
+ NFT_SET_EXT_NUM
+};
+
+/**
+ * struct nft_set_ext_type - set extension type
+ *
+ * @len: fixed part length of the extension
+ * @align: alignment requirements of the extension
+ */
+struct nft_set_ext_type {
+ u8 len;
+ u8 align;
+};
+
+extern const struct nft_set_ext_type nft_set_ext_types[];
+
+/**
+ * struct nft_set_ext_tmpl - set extension template
+ *
+ * @len: length of extension area
+ * @offset: offsets of individual extension types
+ */
+struct nft_set_ext_tmpl {
+ u16 len;
+ u8 offset[NFT_SET_EXT_NUM];
+ u8 ext_len[NFT_SET_EXT_NUM];
+};
+
+/**
+ * struct nft_set_ext - set extensions
+ *
+ * @genmask: generation mask
+ * @offset: offsets of individual extension types
+ * @data: beginning of extension data
+ */
+struct nft_set_ext {
+ u8 genmask;
+ u8 offset[NFT_SET_EXT_NUM];
+ char data[];
+};
+
+static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
+{
+ memset(tmpl, 0, sizeof(*tmpl));
+ tmpl->len = sizeof(struct nft_set_ext);
+}
+
+static inline int nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id,
+ unsigned int len)
+{
+ tmpl->len = ALIGN(tmpl->len, nft_set_ext_types[id].align);
+ if (tmpl->len > U8_MAX)
+ return -EINVAL;
+
+ tmpl->offset[id] = tmpl->len;
+ tmpl->ext_len[id] = nft_set_ext_types[id].len + len;
+ tmpl->len += tmpl->ext_len[id];
+
+ return 0;
+}
+
+static inline int nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id)
+{
+ return nft_set_ext_add_length(tmpl, id, 0);
+}
+
+static inline void nft_set_ext_init(struct nft_set_ext *ext,
+ const struct nft_set_ext_tmpl *tmpl)
+{
+ memcpy(ext->offset, tmpl->offset, sizeof(ext->offset));
+}
+
+static inline bool __nft_set_ext_exists(const struct nft_set_ext *ext, u8 id)
+{
+ return !!ext->offset[id];
+}
+
+static inline bool nft_set_ext_exists(const struct nft_set_ext *ext, u8 id)
+{
+ return ext && __nft_set_ext_exists(ext, id);
+}
+
+static inline void *nft_set_ext(const struct nft_set_ext *ext, u8 id)
+{
+ return (void *)ext + ext->offset[id];
+}
+
+static inline struct nft_data *nft_set_ext_key(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_KEY);
+}
+
+static inline struct nft_data *nft_set_ext_key_end(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_KEY_END);
+}
+
+static inline struct nft_data *nft_set_ext_data(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_DATA);
+}
+
+static inline u8 *nft_set_ext_flags(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_FLAGS);
+}
+
+static inline u64 *nft_set_ext_timeout(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_TIMEOUT);
+}
+
+static inline u64 *nft_set_ext_expiration(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_EXPIRATION);
+}
+
+static inline struct nft_userdata *nft_set_ext_userdata(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_USERDATA);
+}
+
+static inline struct nft_set_elem_expr *nft_set_ext_expr(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_EXPRESSIONS);
+}
+
+static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
+{
+ return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) &&
+ time_is_before_eq_jiffies64(*nft_set_ext_expiration(ext));
+}
+
+static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
+ void *elem)
+{
+ return elem + set->ops->elemsize;
+}
+
+static inline struct nft_object **nft_set_ext_obj(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_OBJREF);
+}
+
+struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx,
+ const struct nft_set *set,
+ const struct nlattr *attr);
+
+void *nft_set_elem_init(const struct nft_set *set,
+ const struct nft_set_ext_tmpl *tmpl,
+ const u32 *key, const u32 *key_end, const u32 *data,
+ u64 timeout, u64 expiration, gfp_t gfp);
+int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_expr *expr_array[]);
+void nft_set_elem_destroy(const struct nft_set *set, void *elem,
+ bool destroy_expr);
+void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
+ const struct nft_set *set, void *elem);
+
+struct nft_expr_ops;
+/**
+ * struct nft_expr_type - nf_tables expression type
+ *
+ * @select_ops: function to select nft_expr_ops
+ * @release_ops: release nft_expr_ops
+ * @ops: default ops, used when no select_ops functions is present
+ * @list: used internally
+ * @name: Identifier
+ * @owner: module reference
+ * @policy: netlink attribute policy
+ * @maxattr: highest netlink attribute number
+ * @family: address family for AF-specific types
+ * @flags: expression type flags
+ */
+struct nft_expr_type {
+ const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *,
+ const struct nlattr * const tb[]);
+ void (*release_ops)(const struct nft_expr_ops *ops);
+ const struct nft_expr_ops *ops;
+ struct list_head list;
+ const char *name;
+ struct module *owner;
+ const struct nla_policy *policy;
+ unsigned int maxattr;
+ u8 family;
+ u8 flags;
+};
+
+#define NFT_EXPR_STATEFUL 0x1
+#define NFT_EXPR_GC 0x2
+
+enum nft_trans_phase {
+ NFT_TRANS_PREPARE,
+ NFT_TRANS_PREPARE_ERROR,
+ NFT_TRANS_ABORT,
+ NFT_TRANS_COMMIT,
+ NFT_TRANS_RELEASE
+};
+
+struct nft_flow_rule;
+struct nft_offload_ctx;
+
+/**
+ * struct nft_expr_ops - nf_tables expression operations
+ *
+ * @eval: Expression evaluation function
+ * @size: full expression size, including private data size
+ * @init: initialization function
+ * @activate: activate expression in the next generation
+ * @deactivate: deactivate expression in next generation
+ * @destroy: destruction function, called after synchronize_rcu
+ * @dump: function to dump parameters
+ * @type: expression type
+ * @validate: validate expression, called during loop detection
+ * @data: extra data to attach to this expression operation
+ */
+struct nft_expr_ops {
+ void (*eval)(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+ int (*clone)(struct nft_expr *dst,
+ const struct nft_expr *src);
+ unsigned int size;
+
+ int (*init)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+ void (*activate)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr);
+ void (*deactivate)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ enum nft_trans_phase phase);
+ void (*destroy)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr);
+ void (*destroy_clone)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr);
+ int (*dump)(struct sk_buff *skb,
+ const struct nft_expr *expr);
+ int (*validate)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data);
+ bool (*reduce)(struct nft_regs_track *track,
+ const struct nft_expr *expr);
+ bool (*gc)(struct net *net,
+ const struct nft_expr *expr);
+ int (*offload)(struct nft_offload_ctx *ctx,
+ struct nft_flow_rule *flow,
+ const struct nft_expr *expr);
+ bool (*offload_action)(const struct nft_expr *expr);
+ void (*offload_stats)(struct nft_expr *expr,
+ const struct flow_stats *stats);
+ const struct nft_expr_type *type;
+ void *data;
+};
+
+/**
+ * struct nft_rule - nf_tables rule
+ *
+ * @list: used internally
+ * @handle: rule handle
+ * @genmask: generation mask
+ * @dlen: length of expression data
+ * @udata: user data is appended to the rule
+ * @data: expression data
+ */
+struct nft_rule {
+ struct list_head list;
+ u64 handle:42,
+ genmask:2,
+ dlen:12,
+ udata:1;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(struct nft_expr))));
+};
+
+static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule)
+{
+ return (struct nft_expr *)&rule->data[0];
+}
+
+static inline struct nft_expr *nft_expr_next(const struct nft_expr *expr)
+{
+ return ((void *)expr) + expr->ops->size;
+}
+
+static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule)
+{
+ return (struct nft_expr *)&rule->data[rule->dlen];
+}
+
+static inline bool nft_expr_more(const struct nft_rule *rule,
+ const struct nft_expr *expr)
+{
+ return expr != nft_expr_last(rule) && expr->ops;
+}
+
+static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
+{
+ return (void *)&rule->data[rule->dlen];
+}
+
+void nft_rule_expr_activate(const struct nft_ctx *ctx, struct nft_rule *rule);
+void nft_rule_expr_deactivate(const struct nft_ctx *ctx, struct nft_rule *rule,
+ enum nft_trans_phase phase);
+void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule);
+
+static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_set_elem_expr *elem_expr;
+ struct nft_expr *expr;
+ u32 size;
+
+ if (__nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS)) {
+ elem_expr = nft_set_ext_expr(ext);
+ nft_setelem_expr_foreach(expr, elem_expr, size) {
+ expr->ops->eval(expr, regs, pkt);
+ if (regs->verdict.code == NFT_BREAK)
+ return;
+ }
+ }
+}
+
+/*
+ * The last pointer isn't really necessary, but the compiler isn't able to
+ * determine that the result of nft_expr_last() is always the same since it
+ * can't assume that the dlen value wasn't changed within calls in the loop.
+ */
+#define nft_rule_for_each_expr(expr, last, rule) \
+ for ((expr) = nft_expr_first(rule), (last) = nft_expr_last(rule); \
+ (expr) != (last); \
+ (expr) = nft_expr_next(expr))
+
+#define NFT_CHAIN_POLICY_UNSET U8_MAX
+
+struct nft_rule_dp {
+ u64 is_last:1,
+ dlen:12,
+ handle:42; /* for tracing */
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(struct nft_expr))));
+};
+
+struct nft_rule_blob {
+ unsigned long size;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(struct nft_rule_dp))));
+};
+
+/**
+ * struct nft_chain - nf_tables chain
+ *
+ * @rules: list of rules in the chain
+ * @list: used internally
+ * @rhlhead: used internally
+ * @table: table that this chain belongs to
+ * @handle: chain handle
+ * @use: number of jump references to this chain
+ * @flags: bitmask of enum nft_chain_flags
+ * @name: name of the chain
+ */
+struct nft_chain {
+ struct nft_rule_blob __rcu *blob_gen_0;
+ struct nft_rule_blob __rcu *blob_gen_1;
+ struct list_head rules;
+ struct list_head list;
+ struct rhlist_head rhlhead;
+ struct nft_table *table;
+ u64 handle;
+ u32 use;
+ u8 flags:5,
+ bound:1,
+ genmask:2;
+ char *name;
+ u16 udlen;
+ u8 *udata;
+
+ /* Only used during control plane commit phase: */
+ struct nft_rule_blob *blob_next;
+};
+
+int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain);
+int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
+ const struct nft_set_iter *iter,
+ struct nft_set_elem *elem);
+int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set);
+int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain);
+void nf_tables_unbind_chain(const struct nft_ctx *ctx, struct nft_chain *chain);
+
+enum nft_chain_types {
+ NFT_CHAIN_T_DEFAULT = 0,
+ NFT_CHAIN_T_ROUTE,
+ NFT_CHAIN_T_NAT,
+ NFT_CHAIN_T_MAX
+};
+
+/**
+ * struct nft_chain_type - nf_tables chain type info
+ *
+ * @name: name of the type
+ * @type: numeric identifier
+ * @family: address family
+ * @owner: module owner
+ * @hook_mask: mask of valid hooks
+ * @hooks: array of hook functions
+ * @ops_register: base chain register function
+ * @ops_unregister: base chain unregister function
+ */
+struct nft_chain_type {
+ const char *name;
+ enum nft_chain_types type;
+ int family;
+ struct module *owner;
+ unsigned int hook_mask;
+ nf_hookfn *hooks[NFT_MAX_HOOKS];
+ int (*ops_register)(struct net *net, const struct nf_hook_ops *ops);
+ void (*ops_unregister)(struct net *net, const struct nf_hook_ops *ops);
+};
+
+int nft_chain_validate_dependency(const struct nft_chain *chain,
+ enum nft_chain_types type);
+int nft_chain_validate_hooks(const struct nft_chain *chain,
+ unsigned int hook_flags);
+
+static inline bool nft_chain_binding(const struct nft_chain *chain)
+{
+ return chain->flags & NFT_CHAIN_BINDING;
+}
+
+static inline bool nft_chain_is_bound(struct nft_chain *chain)
+{
+ return (chain->flags & NFT_CHAIN_BINDING) && chain->bound;
+}
+
+int nft_chain_add(struct nft_table *table, struct nft_chain *chain);
+void nft_chain_del(struct nft_chain *chain);
+void nf_tables_chain_destroy(struct nft_ctx *ctx);
+
+struct nft_stats {
+ u64 bytes;
+ u64 pkts;
+ struct u64_stats_sync syncp;
+};
+
+struct nft_hook {
+ struct list_head list;
+ struct nf_hook_ops ops;
+ struct rcu_head rcu;
+};
+
+/**
+ * struct nft_base_chain - nf_tables base chain
+ *
+ * @ops: netfilter hook ops
+ * @hook_list: list of netfilter hooks (for NFPROTO_NETDEV family)
+ * @type: chain type
+ * @policy: default policy
+ * @stats: per-cpu chain stats
+ * @chain: the chain
+ * @flow_block: flow block (for hardware offload)
+ */
+struct nft_base_chain {
+ struct nf_hook_ops ops;
+ struct list_head hook_list;
+ const struct nft_chain_type *type;
+ u8 policy;
+ u8 flags;
+ struct nft_stats __percpu *stats;
+ struct nft_chain chain;
+ struct flow_block flow_block;
+};
+
+static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)
+{
+ return container_of(chain, struct nft_base_chain, chain);
+}
+
+static inline bool nft_is_base_chain(const struct nft_chain *chain)
+{
+ return chain->flags & NFT_CHAIN_BASE;
+}
+
+int __nft_release_basechain(struct nft_ctx *ctx);
+
+unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
+
+static inline bool nft_use_inc(u32 *use)
+{
+ if (*use == UINT_MAX)
+ return false;
+
+ (*use)++;
+
+ return true;
+}
+
+static inline void nft_use_dec(u32 *use)
+{
+ WARN_ON_ONCE((*use)-- == 0);
+}
+
+/* For error and abort path: restore use counter to previous state. */
+static inline void nft_use_inc_restore(u32 *use)
+{
+ WARN_ON_ONCE(!nft_use_inc(use));
+}
+
+#define nft_use_dec_restore nft_use_dec
+
+/**
+ * struct nft_table - nf_tables table
+ *
+ * @list: used internally
+ * @chains_ht: chains in the table
+ * @chains: same, for stable walks
+ * @sets: sets in the table
+ * @objects: stateful objects in the table
+ * @flowtables: flow tables in the table
+ * @hgenerator: handle generator state
+ * @handle: table handle
+ * @use: number of chain references to this table
+ * @flags: table flag (see enum nft_table_flags)
+ * @genmask: generation mask
+ * @afinfo: address family info
+ * @name: name of the table
+ */
+struct nft_table {
+ struct list_head list;
+ struct rhltable chains_ht;
+ struct list_head chains;
+ struct list_head sets;
+ struct list_head objects;
+ struct list_head flowtables;
+ u64 hgenerator;
+ u64 handle;
+ u32 use;
+ u16 family:6,
+ flags:8,
+ genmask:2;
+ u32 nlpid;
+ char *name;
+ u16 udlen;
+ u8 *udata;
+};
+
+static inline bool nft_table_has_owner(const struct nft_table *table)
+{
+ return table->flags & NFT_TABLE_F_OWNER;
+}
+
+static inline bool nft_base_chain_netdev(int family, u32 hooknum)
+{
+ return family == NFPROTO_NETDEV ||
+ (family == NFPROTO_INET && hooknum == NF_INET_INGRESS);
+}
+
+void nft_register_chain_type(const struct nft_chain_type *);
+void nft_unregister_chain_type(const struct nft_chain_type *);
+
+int nft_register_expr(struct nft_expr_type *);
+void nft_unregister_expr(struct nft_expr_type *);
+
+int nft_verdict_dump(struct sk_buff *skb, int type,
+ const struct nft_verdict *v);
+
+/**
+ * struct nft_object_hash_key - key to lookup nft_object
+ *
+ * @name: name of the stateful object to look up
+ * @table: table the object belongs to
+ */
+struct nft_object_hash_key {
+ const char *name;
+ const struct nft_table *table;
+};
+
+/**
+ * struct nft_object - nf_tables stateful object
+ *
+ * @list: table stateful object list node
+ * @key: keys that identify this object
+ * @rhlhead: nft_objname_ht node
+ * @genmask: generation mask
+ * @use: number of references to this stateful object
+ * @handle: unique object handle
+ * @ops: object operations
+ * @data: object data, layout depends on type
+ */
+struct nft_object {
+ struct list_head list;
+ struct rhlist_head rhlhead;
+ struct nft_object_hash_key key;
+ u32 genmask:2;
+ u32 use;
+ u64 handle;
+ u16 udlen;
+ u8 *udata;
+ /* runtime data below here */
+ const struct nft_object_ops *ops ____cacheline_aligned;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(u64))));
+};
+
+static inline void *nft_obj_data(const struct nft_object *obj)
+{
+ return (void *)obj->data;
+}
+
+#define nft_expr_obj(expr) *((struct nft_object **)nft_expr_priv(expr))
+
+struct nft_object *nft_obj_lookup(const struct net *net,
+ const struct nft_table *table,
+ const struct nlattr *nla, u32 objtype,
+ u8 genmask);
+
+void nft_obj_notify(struct net *net, const struct nft_table *table,
+ struct nft_object *obj, u32 portid, u32 seq,
+ int event, u16 flags, int family, int report, gfp_t gfp);
+
+/**
+ * struct nft_object_type - stateful object type
+ *
+ * @select_ops: function to select nft_object_ops
+ * @ops: default ops, used when no select_ops functions is present
+ * @list: list node in list of object types
+ * @type: stateful object numeric type
+ * @owner: module owner
+ * @maxattr: maximum netlink attribute
+ * @policy: netlink attribute policy
+ */
+struct nft_object_type {
+ const struct nft_object_ops *(*select_ops)(const struct nft_ctx *,
+ const struct nlattr * const tb[]);
+ const struct nft_object_ops *ops;
+ struct list_head list;
+ u32 type;
+ unsigned int maxattr;
+ struct module *owner;
+ const struct nla_policy *policy;
+};
+
+/**
+ * struct nft_object_ops - stateful object operations
+ *
+ * @eval: stateful object evaluation function
+ * @size: stateful object size
+ * @init: initialize object from netlink attributes
+ * @destroy: release existing stateful object
+ * @dump: netlink dump stateful object
+ * @update: update stateful object
+ */
+struct nft_object_ops {
+ void (*eval)(struct nft_object *obj,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+ unsigned int size;
+ int (*init)(const struct nft_ctx *ctx,
+ const struct nlattr *const tb[],
+ struct nft_object *obj);
+ void (*destroy)(const struct nft_ctx *ctx,
+ struct nft_object *obj);
+ int (*dump)(struct sk_buff *skb,
+ struct nft_object *obj,
+ bool reset);
+ void (*update)(struct nft_object *obj,
+ struct nft_object *newobj);
+ const struct nft_object_type *type;
+};
+
+int nft_register_obj(struct nft_object_type *obj_type);
+void nft_unregister_obj(struct nft_object_type *obj_type);
+
+#define NFT_NETDEVICE_MAX 256
+
+/**
+ * struct nft_flowtable - nf_tables flow table
+ *
+ * @list: flow table list node in table list
+ * @table: the table the flow table is contained in
+ * @name: name of this flow table
+ * @hooknum: hook number
+ * @ops_len: number of hooks in array
+ * @genmask: generation mask
+ * @use: number of references to this flow table
+ * @handle: unique object handle
+ * @dev_name: array of device names
+ * @data: rhashtable and garbage collector
+ * @ops: array of hooks
+ */
+struct nft_flowtable {
+ struct list_head list;
+ struct nft_table *table;
+ char *name;
+ int hooknum;
+ int ops_len;
+ u32 genmask:2;
+ u32 use;
+ u64 handle;
+ /* runtime data below here */
+ struct list_head hook_list ____cacheline_aligned;
+ struct nf_flowtable data;
+};
+
+struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table,
+ const struct nlattr *nla,
+ u8 genmask);
+
+void nf_tables_deactivate_flowtable(const struct nft_ctx *ctx,
+ struct nft_flowtable *flowtable,
+ enum nft_trans_phase phase);
+
+void nft_register_flowtable_type(struct nf_flowtable_type *type);
+void nft_unregister_flowtable_type(struct nf_flowtable_type *type);
+
+/**
+ * struct nft_traceinfo - nft tracing information and state
+ *
+ * @trace: other struct members are initialised
+ * @nf_trace: copy of skb->nf_trace before rule evaluation
+ * @type: event type (enum nft_trace_types)
+ * @skbid: hash of skb to be used as trace id
+ * @packet_dumped: packet headers sent in a previous traceinfo message
+ * @pkt: pktinfo currently processed
+ * @basechain: base chain currently processed
+ * @chain: chain currently processed
+ * @rule: rule that was evaluated
+ * @verdict: verdict given by rule
+ */
+struct nft_traceinfo {
+ bool trace;
+ bool nf_trace;
+ bool packet_dumped;
+ enum nft_trace_types type:8;
+ u32 skbid;
+ const struct nft_pktinfo *pkt;
+ const struct nft_base_chain *basechain;
+ const struct nft_chain *chain;
+ const struct nft_rule_dp *rule;
+ const struct nft_verdict *verdict;
+};
+
+void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt,
+ const struct nft_verdict *verdict,
+ const struct nft_chain *basechain);
+
+void nft_trace_notify(struct nft_traceinfo *info);
+
+#define MODULE_ALIAS_NFT_CHAIN(family, name) \
+ MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
+
+#define MODULE_ALIAS_NFT_AF_EXPR(family, name) \
+ MODULE_ALIAS("nft-expr-" __stringify(family) "-" name)
+
+#define MODULE_ALIAS_NFT_EXPR(name) \
+ MODULE_ALIAS("nft-expr-" name)
+
+#define MODULE_ALIAS_NFT_OBJ(type) \
+ MODULE_ALIAS("nft-obj-" __stringify(type))
+
+#if IS_ENABLED(CONFIG_NF_TABLES)
+
+/*
+ * The gencursor defines two generations, the currently active and the
+ * next one. Objects contain a bitmask of 2 bits specifying the generations
+ * they're active in. A set bit means they're inactive in the generation
+ * represented by that bit.
+ *
+ * New objects start out as inactive in the current and active in the
+ * next generation. When committing the ruleset the bitmask is cleared,
+ * meaning they're active in all generations. When removing an object,
+ * it is set inactive in the next generation. After committing the ruleset,
+ * the objects are removed.
+ */
+static inline unsigned int nft_gencursor_next(const struct net *net)
+{
+ return net->nft.gencursor + 1 == 1 ? 1 : 0;
+}
+
+static inline u8 nft_genmask_next(const struct net *net)
+{
+ return 1 << nft_gencursor_next(net);
+}
+
+static inline u8 nft_genmask_cur(const struct net *net)
+{
+ /* Use READ_ONCE() to prevent refetching the value for atomicity */
+ return 1 << READ_ONCE(net->nft.gencursor);
+}
+
+#define NFT_GENMASK_ANY ((1 << 0) | (1 << 1))
+
+/*
+ * Generic transaction helpers
+ */
+
+/* Check if this object is currently active. */
+#define nft_is_active(__net, __obj) \
+ (((__obj)->genmask & nft_genmask_cur(__net)) == 0)
+
+/* Check if this object is active in the next generation. */
+#define nft_is_active_next(__net, __obj) \
+ (((__obj)->genmask & nft_genmask_next(__net)) == 0)
+
+/* This object becomes active in the next generation. */
+#define nft_activate_next(__net, __obj) \
+ (__obj)->genmask = nft_genmask_cur(__net)
+
+/* This object becomes inactive in the next generation. */
+#define nft_deactivate_next(__net, __obj) \
+ (__obj)->genmask = nft_genmask_next(__net)
+
+/* After committing the ruleset, clear the stale generation bit. */
+#define nft_clear(__net, __obj) \
+ (__obj)->genmask &= ~nft_genmask_next(__net)
+#define nft_active_genmask(__obj, __genmask) \
+ !((__obj)->genmask & __genmask)
+
+/*
+ * Set element transaction helpers
+ */
+
+static inline bool nft_set_elem_active(const struct nft_set_ext *ext,
+ u8 genmask)
+{
+ return !(ext->genmask & genmask);
+}
+
+static inline void nft_set_elem_change_active(const struct net *net,
+ const struct nft_set *set,
+ struct nft_set_ext *ext)
+{
+ ext->genmask ^= nft_genmask_next(net);
+}
+
+#endif /* IS_ENABLED(CONFIG_NF_TABLES) */
+
+#define NFT_SET_ELEM_DEAD_MASK (1 << 2)
+
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+#define NFT_SET_ELEM_DEAD_BIT 2
+#elif defined(__BIG_ENDIAN_BITFIELD)
+#define NFT_SET_ELEM_DEAD_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2)
+#else
+#error
+#endif
+
+static inline void nft_set_elem_dead(struct nft_set_ext *ext)
+{
+ unsigned long *word = (unsigned long *)ext;
+
+ BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
+ set_bit(NFT_SET_ELEM_DEAD_BIT, word);
+}
+
+static inline int nft_set_elem_is_dead(const struct nft_set_ext *ext)
+{
+ unsigned long *word = (unsigned long *)ext;
+
+ BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
+ return test_bit(NFT_SET_ELEM_DEAD_BIT, word);
+}
+
+/**
+ * struct nft_trans - nf_tables object update in transaction
+ *
+ * @list: used internally
+ * @binding_list: list of objects with possible bindings
+ * @msg_type: message type
+ * @put_net: ctx->net needs to be put
+ * @ctx: transaction context
+ * @data: internal information related to the transaction
+ */
+struct nft_trans {
+ struct list_head list;
+ struct list_head binding_list;
+ int msg_type;
+ bool put_net;
+ struct nft_ctx ctx;
+ char data[];
+};
+
+struct nft_trans_rule {
+ struct nft_rule *rule;
+ struct nft_flow_rule *flow;
+ u32 rule_id;
+ bool bound;
+};
+
+#define nft_trans_rule(trans) \
+ (((struct nft_trans_rule *)trans->data)->rule)
+#define nft_trans_flow_rule(trans) \
+ (((struct nft_trans_rule *)trans->data)->flow)
+#define nft_trans_rule_id(trans) \
+ (((struct nft_trans_rule *)trans->data)->rule_id)
+#define nft_trans_rule_bound(trans) \
+ (((struct nft_trans_rule *)trans->data)->bound)
+
+struct nft_trans_set {
+ struct nft_set *set;
+ u32 set_id;
+ u32 gc_int;
+ u64 timeout;
+ bool update;
+ bool bound;
+};
+
+#define nft_trans_set(trans) \
+ (((struct nft_trans_set *)trans->data)->set)
+#define nft_trans_set_id(trans) \
+ (((struct nft_trans_set *)trans->data)->set_id)
+#define nft_trans_set_bound(trans) \
+ (((struct nft_trans_set *)trans->data)->bound)
+#define nft_trans_set_update(trans) \
+ (((struct nft_trans_set *)trans->data)->update)
+#define nft_trans_set_timeout(trans) \
+ (((struct nft_trans_set *)trans->data)->timeout)
+#define nft_trans_set_gc_int(trans) \
+ (((struct nft_trans_set *)trans->data)->gc_int)
+
+struct nft_trans_chain {
+ struct nft_chain *chain;
+ bool update;
+ char *name;
+ struct nft_stats __percpu *stats;
+ u8 policy;
+ bool bound;
+ u32 chain_id;
+};
+
+#define nft_trans_chain(trans) \
+ (((struct nft_trans_chain *)trans->data)->chain)
+#define nft_trans_chain_update(trans) \
+ (((struct nft_trans_chain *)trans->data)->update)
+#define nft_trans_chain_name(trans) \
+ (((struct nft_trans_chain *)trans->data)->name)
+#define nft_trans_chain_stats(trans) \
+ (((struct nft_trans_chain *)trans->data)->stats)
+#define nft_trans_chain_policy(trans) \
+ (((struct nft_trans_chain *)trans->data)->policy)
+#define nft_trans_chain_bound(trans) \
+ (((struct nft_trans_chain *)trans->data)->bound)
+#define nft_trans_chain_id(trans) \
+ (((struct nft_trans_chain *)trans->data)->chain_id)
+
+struct nft_trans_table {
+ bool update;
+};
+
+#define nft_trans_table_update(trans) \
+ (((struct nft_trans_table *)trans->data)->update)
+
+struct nft_trans_elem {
+ struct nft_set *set;
+ struct nft_set_elem elem;
+ bool bound;
+};
+
+#define nft_trans_elem_set(trans) \
+ (((struct nft_trans_elem *)trans->data)->set)
+#define nft_trans_elem(trans) \
+ (((struct nft_trans_elem *)trans->data)->elem)
+#define nft_trans_elem_set_bound(trans) \
+ (((struct nft_trans_elem *)trans->data)->bound)
+
+struct nft_trans_obj {
+ struct nft_object *obj;
+ struct nft_object *newobj;
+ bool update;
+};
+
+#define nft_trans_obj(trans) \
+ (((struct nft_trans_obj *)trans->data)->obj)
+#define nft_trans_obj_newobj(trans) \
+ (((struct nft_trans_obj *)trans->data)->newobj)
+#define nft_trans_obj_update(trans) \
+ (((struct nft_trans_obj *)trans->data)->update)
+
+struct nft_trans_flowtable {
+ struct nft_flowtable *flowtable;
+ bool update;
+ struct list_head hook_list;
+ u32 flags;
+};
+
+#define nft_trans_flowtable(trans) \
+ (((struct nft_trans_flowtable *)trans->data)->flowtable)
+#define nft_trans_flowtable_update(trans) \
+ (((struct nft_trans_flowtable *)trans->data)->update)
+#define nft_trans_flowtable_hooks(trans) \
+ (((struct nft_trans_flowtable *)trans->data)->hook_list)
+#define nft_trans_flowtable_flags(trans) \
+ (((struct nft_trans_flowtable *)trans->data)->flags)
+
+#define NFT_TRANS_GC_BATCHCOUNT 256
+
+struct nft_trans_gc {
+ struct list_head list;
+ struct net *net;
+ struct nft_set *set;
+ u32 seq;
+ u16 count;
+ void *priv[NFT_TRANS_GC_BATCHCOUNT];
+ struct rcu_head rcu;
+};
+
+struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
+ unsigned int gc_seq, gfp_t gfp);
+void nft_trans_gc_destroy(struct nft_trans_gc *trans);
+
+struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc,
+ unsigned int gc_seq, gfp_t gfp);
+void nft_trans_gc_queue_async_done(struct nft_trans_gc *gc);
+
+struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp);
+void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans);
+
+void nft_trans_gc_elem_add(struct nft_trans_gc *gc, void *priv);
+
+struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
+ unsigned int gc_seq);
+struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc);
+
+void nft_setelem_data_deactivate(const struct net *net,
+ const struct nft_set *set,
+ struct nft_set_elem *elem);
+
+int __init nft_chain_filter_init(void);
+void nft_chain_filter_fini(void);
+
+void __init nft_chain_route_init(void);
+void nft_chain_route_fini(void);
+
+void nf_tables_trans_destroy_flush_work(void);
+
+int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result);
+__be64 nf_jiffies64_to_msecs(u64 input);
+
+#ifdef CONFIG_MODULES
+__printf(2, 3) int nft_request_module(struct net *net, const char *fmt, ...);
+#else
+static inline int nft_request_module(struct net *net, const char *fmt, ...) { return -ENOENT; }
+#endif
+
+struct nftables_pernet {
+ struct list_head tables;
+ struct list_head commit_list;
+ struct list_head binding_list;
+ struct list_head module_list;
+ struct list_head notify_list;
+ struct mutex commit_mutex;
+ u64 table_handle;
+ unsigned int base_seq;
+ u8 validate_state;
+ unsigned int gc_seq;
+};
+
+extern unsigned int nf_tables_net_id;
+
+static inline struct nftables_pernet *nft_pernet(const struct net *net)
+{
+ return net_generic(net, nf_tables_net_id);
+}
+
+#define __NFT_REDUCE_READONLY 1UL
+#define NFT_REDUCE_READONLY (void *)__NFT_REDUCE_READONLY
+
+static inline bool nft_reduce_is_readonly(const struct nft_expr *expr)
+{
+ return expr->ops->reduce == NFT_REDUCE_READONLY;
+}
+
+void nft_reg_track_update(struct nft_regs_track *track,
+ const struct nft_expr *expr, u8 dreg, u8 len);
+void nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg, u8 len);
+void __nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg);
+
+static inline bool nft_reg_track_cmp(struct nft_regs_track *track,
+ const struct nft_expr *expr, u8 dreg)
+{
+ return track->regs[dreg].selector &&
+ track->regs[dreg].selector->ops == expr->ops &&
+ track->regs[dreg].num_reg == 0;
+}
+
+#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
new file mode 100644
index 000000000..1223af68c
--- /dev/null
+++ b/include/net/netfilter/nf_tables_core.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_NF_TABLES_CORE_H
+#define _NET_NF_TABLES_CORE_H
+
+#include <net/netfilter/nf_tables.h>
+#include <linux/indirect_call_wrapper.h>
+
+extern struct nft_expr_type nft_imm_type;
+extern struct nft_expr_type nft_cmp_type;
+extern struct nft_expr_type nft_counter_type;
+extern struct nft_expr_type nft_lookup_type;
+extern struct nft_expr_type nft_bitwise_type;
+extern struct nft_expr_type nft_byteorder_type;
+extern struct nft_expr_type nft_payload_type;
+extern struct nft_expr_type nft_dynset_type;
+extern struct nft_expr_type nft_range_type;
+extern struct nft_expr_type nft_meta_type;
+extern struct nft_expr_type nft_rt_type;
+extern struct nft_expr_type nft_exthdr_type;
+extern struct nft_expr_type nft_last_type;
+
+#ifdef CONFIG_NETWORK_SECMARK
+extern struct nft_object_type nft_secmark_obj_type;
+#endif
+extern struct nft_object_type nft_counter_obj_type;
+
+int nf_tables_core_module_init(void);
+void nf_tables_core_module_exit(void);
+
+struct nft_bitwise_fast_expr {
+ u32 mask;
+ u32 xor;
+ u8 sreg;
+ u8 dreg;
+};
+
+struct nft_cmp_fast_expr {
+ u32 data;
+ u32 mask;
+ u8 sreg;
+ u8 len;
+ bool inv;
+};
+
+struct nft_cmp16_fast_expr {
+ struct nft_data data;
+ struct nft_data mask;
+ u8 sreg;
+ u8 len;
+ bool inv;
+};
+
+struct nft_immediate_expr {
+ struct nft_data data;
+ u8 dreg;
+ u8 dlen;
+};
+
+extern const struct nft_expr_ops nft_cmp_fast_ops;
+extern const struct nft_expr_ops nft_cmp16_fast_ops;
+
+struct nft_payload {
+ enum nft_payload_bases base:8;
+ u8 offset;
+ u8 len;
+ u8 dreg;
+};
+
+struct nft_payload_set {
+ enum nft_payload_bases base:8;
+ u8 offset;
+ u8 len;
+ u8 sreg;
+ u8 csum_type;
+ u8 csum_offset;
+ u8 csum_flags;
+};
+
+extern const struct nft_expr_ops nft_payload_fast_ops;
+
+extern const struct nft_expr_ops nft_bitwise_fast_ops;
+
+extern struct static_key_false nft_counters_enabled;
+extern struct static_key_false nft_trace_enabled;
+
+extern const struct nft_set_type nft_set_rhash_type;
+extern const struct nft_set_type nft_set_hash_type;
+extern const struct nft_set_type nft_set_hash_fast_type;
+extern const struct nft_set_type nft_set_rbtree_type;
+extern const struct nft_set_type nft_set_bitmap_type;
+extern const struct nft_set_type nft_set_pipapo_type;
+extern const struct nft_set_type nft_set_pipapo_avx2_type;
+
+#ifdef CONFIG_RETPOLINE
+bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_hash_lookup_fast(const struct net *net,
+ const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+#else
+static inline bool
+nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
+{
+ return set->ops->lookup(net, set, key, ext);
+}
+#endif
+
+/* called from nft_pipapo_avx2.c */
+bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+/* called from nft_set_pipapo.c */
+bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+
+void nft_counter_init_seqcount(void);
+
+struct nft_expr;
+struct nft_regs;
+struct nft_pktinfo;
+void nft_meta_get_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_cmp_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_lookup_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_payload_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_immediate_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_bitwise_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_range_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_byteorder_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_dynset_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_rt_get_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_counter_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
new file mode 100644
index 000000000..5225d2bd1
--- /dev/null
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_TABLES_IPV4_H_
+#define _NF_TABLES_IPV4_H_
+
+#include <net/netfilter/nf_tables.h>
+#include <net/ip.h>
+
+static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt)
+{
+ struct iphdr *ip;
+
+ ip = ip_hdr(pkt->skb);
+ pkt->flags = NFT_PKTINFO_L4PROTO;
+ pkt->tprot = ip->protocol;
+ pkt->thoff = ip_hdrlen(pkt->skb);
+ pkt->fragoff = ntohs(ip->frag_off) & IP_OFFSET;
+}
+
+static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
+{
+ struct iphdr *iph, _iph;
+ u32 len, thoff;
+
+ iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ sizeof(*iph), &_iph);
+ if (!iph)
+ return -1;
+
+ if (iph->ihl < 5 || iph->version != 4)
+ return -1;
+
+ len = iph_totlen(pkt->skb, iph);
+ thoff = skb_network_offset(pkt->skb) + (iph->ihl * 4);
+ if (pkt->skb->len < len)
+ return -1;
+ else if (len < thoff)
+ return -1;
+
+ pkt->flags = NFT_PKTINFO_L4PROTO;
+ pkt->tprot = iph->protocol;
+ pkt->thoff = thoff;
+ pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+
+ return 0;
+}
+
+static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
+{
+ if (__nft_set_pktinfo_ipv4_validate(pkt) < 0)
+ nft_set_pktinfo_unspec(pkt);
+}
+
+static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt)
+{
+ struct iphdr *iph;
+ u32 len, thoff;
+
+ if (!pskb_may_pull(pkt->skb, sizeof(*iph)))
+ return -1;
+
+ iph = ip_hdr(pkt->skb);
+ if (iph->ihl < 5 || iph->version != 4)
+ goto inhdr_error;
+
+ len = iph_totlen(pkt->skb, iph);
+ thoff = iph->ihl * 4;
+ if (pkt->skb->len < len) {
+ __IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INTRUNCATEDPKTS);
+ return -1;
+ } else if (len < thoff) {
+ goto inhdr_error;
+ }
+
+ pkt->flags = NFT_PKTINFO_L4PROTO;
+ pkt->tprot = iph->protocol;
+ pkt->thoff = thoff;
+ pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+
+ return 0;
+
+inhdr_error:
+ __IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INHDRERRORS);
+ return -1;
+}
+
+#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
new file mode 100644
index 000000000..ec7eaeaf4
--- /dev/null
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_TABLES_IPV6_H_
+#define _NF_TABLES_IPV6_H_
+
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/ipv6.h>
+#include <net/netfilter/nf_tables.h>
+
+static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt)
+{
+ unsigned int flags = IP6_FH_F_AUTH;
+ int protohdr, thoff = 0;
+ unsigned short frag_off;
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
+ if (protohdr < 0) {
+ nft_set_pktinfo_unspec(pkt);
+ return;
+ }
+
+ pkt->flags = NFT_PKTINFO_L4PROTO;
+ pkt->tprot = protohdr;
+ pkt->thoff = thoff;
+ pkt->fragoff = frag_off;
+}
+
+static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ unsigned int flags = IP6_FH_F_AUTH;
+ struct ipv6hdr *ip6h, _ip6h;
+ unsigned int thoff = 0;
+ unsigned short frag_off;
+ int protohdr;
+ u32 pkt_len;
+
+ ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ sizeof(*ip6h), &_ip6h);
+ if (!ip6h)
+ return -1;
+
+ if (ip6h->version != 6)
+ return -1;
+
+ pkt_len = ntohs(ip6h->payload_len);
+ if (pkt_len + sizeof(*ip6h) > pkt->skb->len)
+ return -1;
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
+ if (protohdr < 0)
+ return -1;
+
+ pkt->flags = NFT_PKTINFO_L4PROTO;
+ pkt->tprot = protohdr;
+ pkt->thoff = thoff;
+ pkt->fragoff = frag_off;
+
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
+{
+ if (__nft_set_pktinfo_ipv6_validate(pkt) < 0)
+ nft_set_pktinfo_unspec(pkt);
+}
+
+static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ unsigned int flags = IP6_FH_F_AUTH;
+ unsigned short frag_off;
+ unsigned int thoff = 0;
+ struct inet6_dev *idev;
+ struct ipv6hdr *ip6h;
+ int protohdr;
+ u32 pkt_len;
+
+ if (!pskb_may_pull(pkt->skb, sizeof(*ip6h)))
+ return -1;
+
+ ip6h = ipv6_hdr(pkt->skb);
+ if (ip6h->version != 6)
+ goto inhdr_error;
+
+ pkt_len = ntohs(ip6h->payload_len);
+ if (pkt_len + sizeof(*ip6h) > pkt->skb->len) {
+ idev = __in6_dev_get(nft_in(pkt));
+ __IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INTRUNCATEDPKTS);
+ return -1;
+ }
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
+ if (protohdr < 0)
+ goto inhdr_error;
+
+ pkt->flags = NFT_PKTINFO_L4PROTO;
+ pkt->tprot = protohdr;
+ pkt->thoff = thoff;
+ pkt->fragoff = frag_off;
+
+ return 0;
+
+inhdr_error:
+ idev = __in6_dev_get(nft_in(pkt));
+ __IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INHDRERRORS);
+ return -1;
+#else
+ return -1;
+#endif
+}
+
+#endif
diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
new file mode 100644
index 000000000..3568b6a2f
--- /dev/null
+++ b/include/net/netfilter/nf_tables_offload.h
@@ -0,0 +1,100 @@
+#ifndef _NET_NF_TABLES_OFFLOAD_H
+#define _NET_NF_TABLES_OFFLOAD_H
+
+#include <net/flow_offload.h>
+#include <net/netfilter/nf_tables.h>
+
+enum nft_offload_reg_flags {
+ NFT_OFFLOAD_F_NETWORK2HOST = (1 << 0),
+};
+
+struct nft_offload_reg {
+ u32 key;
+ u32 len;
+ u32 base_offset;
+ u32 offset;
+ u32 flags;
+ struct nft_data data;
+ struct nft_data mask;
+};
+
+enum nft_offload_dep_type {
+ NFT_OFFLOAD_DEP_UNSPEC = 0,
+ NFT_OFFLOAD_DEP_NETWORK,
+ NFT_OFFLOAD_DEP_TRANSPORT,
+};
+
+struct nft_offload_ctx {
+ struct {
+ enum nft_offload_dep_type type;
+ __be16 l3num;
+ u8 protonum;
+ } dep;
+ unsigned int num_actions;
+ struct net *net;
+ struct nft_offload_reg regs[NFT_REG32_15 + 1];
+};
+
+void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
+ enum nft_offload_dep_type type);
+void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
+ const void *data, u32 len);
+
+struct nft_flow_key {
+ struct flow_dissector_key_basic basic;
+ struct flow_dissector_key_control control;
+ union {
+ struct flow_dissector_key_ipv4_addrs ipv4;
+ struct flow_dissector_key_ipv6_addrs ipv6;
+ };
+ struct flow_dissector_key_ports tp;
+ struct flow_dissector_key_ip ip;
+ struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_vlan cvlan;
+ struct flow_dissector_key_eth_addrs eth_addrs;
+ struct flow_dissector_key_meta meta;
+} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
+
+struct nft_flow_match {
+ struct flow_dissector dissector;
+ struct nft_flow_key key;
+ struct nft_flow_key mask;
+};
+
+struct nft_flow_rule {
+ __be16 proto;
+ struct nft_flow_match match;
+ struct flow_rule *rule;
+};
+
+void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
+ enum flow_dissector_key_id addr_type);
+
+struct nft_rule;
+struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rule *rule);
+int nft_flow_rule_stats(const struct nft_chain *chain, const struct nft_rule *rule);
+void nft_flow_rule_destroy(struct nft_flow_rule *flow);
+int nft_flow_rule_offload_commit(struct net *net);
+
+#define NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, __flags) \
+ (__reg)->base_offset = \
+ offsetof(struct nft_flow_key, __base); \
+ (__reg)->offset = \
+ offsetof(struct nft_flow_key, __base.__field); \
+ (__reg)->len = __len; \
+ (__reg)->key = __key; \
+ (__reg)->flags = __flags;
+
+#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
+ NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, 0)
+
+#define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg) \
+ NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
+ memset(&(__reg)->mask, 0xff, (__reg)->len);
+
+bool nft_chain_offload_support(const struct nft_base_chain *basechain);
+
+int nft_offload_init(void);
+void nft_offload_exit(void);
+
+#endif
diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
new file mode 100644
index 000000000..faa108b1b
--- /dev/null
+++ b/include/net/netfilter/nf_tproxy.h
@@ -0,0 +1,128 @@
+#ifndef _NF_TPROXY_H_
+#define _NF_TPROXY_H_
+
+#include <net/tcp.h>
+
+enum nf_tproxy_lookup_t {
+ NF_TPROXY_LOOKUP_LISTENER,
+ NF_TPROXY_LOOKUP_ESTABLISHED,
+};
+
+static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
+{
+ if (inet_sk_transparent(sk))
+ return true;
+
+ sock_gen_put(sk);
+ return false;
+}
+
+static inline void nf_tproxy_twsk_deschedule_put(struct inet_timewait_sock *tw)
+{
+ local_bh_disable();
+ inet_twsk_deschedule_put(tw);
+ local_bh_enable();
+}
+
+/* assign a socket to the skb -- consumes sk */
+static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
+{
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+}
+
+__be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr);
+
+/**
+ * nf_tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections
+ * @skb: The skb being processed.
+ * @laddr: IPv4 address to redirect to or zero.
+ * @lport: TCP port to redirect to or zero.
+ * @sk: The TIME_WAIT TCP socket found by the lookup.
+ *
+ * We have to handle SYN packets arriving to TIME_WAIT sockets
+ * differently: instead of reopening the connection we should rather
+ * redirect the new connection to the proxy if there's a listener
+ * socket present.
+ *
+ * nf_tproxy_handle_time_wait4() consumes the socket reference passed in.
+ *
+ * Returns the listener socket if there's one, the TIME_WAIT socket if
+ * no such listener is found, or NULL if the TCP header is incomplete.
+ */
+struct sock *
+nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
+ __be32 laddr, __be16 lport, struct sock *sk);
+
+/*
+ * This is used when the user wants to intercept a connection matching
+ * an explicit iptables rule. In this case the sockets are assumed
+ * matching in preference order:
+ *
+ * - match: if there's a fully established connection matching the
+ * _packet_ tuple, it is returned, assuming the redirection
+ * already took place and we process a packet belonging to an
+ * established connection
+ *
+ * - match: if there's a listening socket matching the redirection
+ * (e.g. on-port & on-ip of the connection), it is returned,
+ * regardless if it was bound to 0.0.0.0 or an explicit
+ * address. The reasoning is that if there's an explicit rule, it
+ * does not really matter if the listener is bound to an interface
+ * or to 0. The user already stated that he wants redirection
+ * (since he added the rule).
+ *
+ * Please note that there's an overlap between what a TPROXY target
+ * and a socket match will match. Normally if you have both rules the
+ * "socket" match will be the first one, effectively all packets
+ * belonging to established connections going through that one.
+ */
+struct sock *
+nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
+ const u8 protocol,
+ const __be32 saddr, const __be32 daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in,
+ const enum nf_tproxy_lookup_t lookup_type);
+
+const struct in6_addr *
+nf_tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
+ const struct in6_addr *daddr);
+
+/**
+ * nf_tproxy_handle_time_wait6 - handle IPv6 TCP TIME_WAIT reopen redirections
+ * @skb: The skb being processed.
+ * @tproto: Transport protocol.
+ * @thoff: Transport protocol header offset.
+ * @net: Network namespace.
+ * @laddr: IPv6 address to redirect to.
+ * @lport: TCP port to redirect to or zero.
+ * @sk: The TIME_WAIT TCP socket found by the lookup.
+ *
+ * We have to handle SYN packets arriving to TIME_WAIT sockets
+ * differently: instead of reopening the connection we should rather
+ * redirect the new connection to the proxy if there's a listener
+ * socket present.
+ *
+ * nf_tproxy_handle_time_wait6() consumes the socket reference passed in.
+ *
+ * Returns the listener socket if there's one, the TIME_WAIT socket if
+ * no such listener is found, or NULL if the TCP header is incomplete.
+ */
+struct sock *
+nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
+ struct net *net,
+ const struct in6_addr *laddr,
+ const __be16 lport,
+ struct sock *sk);
+
+struct sock *
+nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
+ const u8 protocol,
+ const struct in6_addr *saddr, const struct in6_addr *daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in,
+ const enum nf_tproxy_lookup_t lookup_type);
+
+#endif /* _NF_TPROXY_H_ */
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
new file mode 100644
index 000000000..eed099eae
--- /dev/null
+++ b/include/net/netfilter/nft_fib.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NFT_FIB_H_
+#define _NFT_FIB_H_
+
+#include <net/netfilter/nf_tables.h>
+
+struct nft_fib {
+ u8 dreg;
+ u8 result;
+ u32 flags;
+};
+
+extern const struct nla_policy nft_fib_policy[];
+
+static inline bool
+nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
+{
+ return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
+}
+
+int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr);
+int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nft_data **data);
+
+
+void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+
+void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+
+void nft_fib_store_result(void *reg, const struct nft_fib *priv,
+ const struct net_device *dev);
+
+bool nft_fib_reduce(struct nft_regs_track *track,
+ const struct nft_expr *expr);
+#endif
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
new file mode 100644
index 000000000..9b51cc67d
--- /dev/null
+++ b/include/net/netfilter/nft_meta.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NFT_META_H_
+#define _NFT_META_H_
+
+#include <net/netfilter/nf_tables.h>
+
+struct nft_meta {
+ enum nft_meta_keys key:8;
+ u8 len;
+ union {
+ u8 dreg;
+ u8 sreg;
+ };
+};
+
+extern const struct nla_policy nft_meta_policy[];
+
+int nft_meta_get_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+
+int nft_meta_set_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+
+int nft_meta_get_dump(struct sk_buff *skb,
+ const struct nft_expr *expr);
+
+int nft_meta_set_dump(struct sk_buff *skb,
+ const struct nft_expr *expr);
+
+void nft_meta_get_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+
+void nft_meta_set_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+
+void nft_meta_set_destroy(const struct nft_ctx *ctx,
+ const struct nft_expr *expr);
+
+int nft_meta_set_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data);
+
+bool nft_meta_get_reduce(struct nft_regs_track *track,
+ const struct nft_expr *expr);
+#endif
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
new file mode 100644
index 000000000..56b123a42
--- /dev/null
+++ b/include/net/netfilter/nft_reject.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NFT_REJECT_H_
+#define _NFT_REJECT_H_
+
+#include <linux/types.h>
+#include <net/netlink.h>
+#include <net/netfilter/nf_tables.h>
+#include <uapi/linux/netfilter/nf_tables.h>
+
+struct nft_reject {
+ enum nft_reject_types type:8;
+ u8 icmp_code;
+};
+
+extern const struct nla_policy nft_reject_policy[];
+
+int nft_reject_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data);
+
+int nft_reject_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+
+int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr);
+
+int nft_reject_icmp_code(u8 code);
+int nft_reject_icmpv6_code(u8 code);
+
+#endif
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
new file mode 100644
index 000000000..4c3809e14
--- /dev/null
+++ b/include/net/netfilter/xt_rateest.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _XT_RATEEST_H
+#define _XT_RATEEST_H
+
+#include <net/gen_stats.h>
+
+struct xt_rateest {
+ /* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
+ struct gnet_stats_basic_sync bstats;
+ spinlock_t lock;
+
+
+ /* following fields not accessed in hot path */
+ unsigned int refcnt;
+ struct hlist_node list;
+ char name[IFNAMSIZ];
+ struct gnet_estimator params;
+ struct rcu_head rcu;
+
+ /* keep this field far away to speedup xt_rateest_mt() */
+ struct net_rate_estimator __rcu *rate_est;
+};
+
+struct xt_rateest *xt_rateest_lookup(struct net *net, const char *name);
+void xt_rateest_put(struct net *net, struct xt_rateest *est);
+
+#endif /* _XT_RATEEST_H */
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
new file mode 100644
index 000000000..43ae50337
--- /dev/null
+++ b/include/net/netlabel.h
@@ -0,0 +1,682 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * NetLabel System
+ *
+ * The NetLabel system manages static and dynamic label mappings for network
+ * protocols such as CIPSO and RIPSO.
+ *
+ * Author: Paul Moore <paul@paul-moore.com>
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008
+ */
+
+#ifndef _NETLABEL_H
+#define _NETLABEL_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <net/netlink.h>
+#include <net/request_sock.h>
+#include <linux/refcount.h>
+
+struct cipso_v4_doi;
+struct calipso_doi;
+
+/*
+ * NetLabel - A management interface for maintaining network packet label
+ * mapping tables for explicit packet labling protocols.
+ *
+ * Network protocols such as CIPSO and RIPSO require a label translation layer
+ * to convert the label on the packet into something meaningful on the host
+ * machine. In the current Linux implementation these mapping tables live
+ * inside the kernel; NetLabel provides a mechanism for user space applications
+ * to manage these mapping tables.
+ *
+ * NetLabel makes use of the Generic NETLINK mechanism as a transport layer to
+ * send messages between kernel and user space. The general format of a
+ * NetLabel message is shown below:
+ *
+ * +-----------------+-------------------+--------- --- -- -
+ * | struct nlmsghdr | struct genlmsghdr | payload
+ * +-----------------+-------------------+--------- --- -- -
+ *
+ * The 'nlmsghdr' and 'genlmsghdr' structs should be dealt with like normal.
+ * The payload is dependent on the subsystem specified in the
+ * 'nlmsghdr->nlmsg_type' and should be defined below, supporting functions
+ * should be defined in the corresponding net/netlabel/netlabel_<subsys>.h|c
+ * file. All of the fields in the NetLabel payload are NETLINK attributes, see
+ * the include/net/netlink.h file for more information on NETLINK attributes.
+ *
+ */
+
+/*
+ * NetLabel NETLINK protocol
+ */
+
+/* NetLabel NETLINK protocol version
+ * 1: initial version
+ * 2: added static labels for unlabeled connections
+ * 3: network selectors added to the NetLabel/LSM domain mapping and the
+ * CIPSO_V4_MAP_LOCAL CIPSO mapping was added
+ */
+#define NETLBL_PROTO_VERSION 3
+
+/* NetLabel NETLINK types/families */
+#define NETLBL_NLTYPE_NONE 0
+#define NETLBL_NLTYPE_MGMT 1
+#define NETLBL_NLTYPE_MGMT_NAME "NLBL_MGMT"
+#define NETLBL_NLTYPE_RIPSO 2
+#define NETLBL_NLTYPE_RIPSO_NAME "NLBL_RIPSO"
+#define NETLBL_NLTYPE_CIPSOV4 3
+#define NETLBL_NLTYPE_CIPSOV4_NAME "NLBL_CIPSOv4"
+#define NETLBL_NLTYPE_CIPSOV6 4
+#define NETLBL_NLTYPE_CIPSOV6_NAME "NLBL_CIPSOv6"
+#define NETLBL_NLTYPE_UNLABELED 5
+#define NETLBL_NLTYPE_UNLABELED_NAME "NLBL_UNLBL"
+#define NETLBL_NLTYPE_ADDRSELECT 6
+#define NETLBL_NLTYPE_ADDRSELECT_NAME "NLBL_ADRSEL"
+#define NETLBL_NLTYPE_CALIPSO 7
+#define NETLBL_NLTYPE_CALIPSO_NAME "NLBL_CALIPSO"
+
+/*
+ * NetLabel - Kernel API for accessing the network packet label mappings.
+ *
+ * The following functions are provided for use by other kernel modules,
+ * specifically kernel LSM modules, to provide a consistent, transparent API
+ * for dealing with explicit packet labeling protocols such as CIPSO and
+ * RIPSO. The functions defined here are implemented in the
+ * net/netlabel/netlabel_kapi.c file.
+ *
+ */
+
+/* NetLabel audit information */
+struct netlbl_audit {
+ u32 secid;
+ kuid_t loginuid;
+ unsigned int sessionid;
+};
+
+/*
+ * LSM security attributes
+ */
+
+/**
+ * struct netlbl_lsm_cache - NetLabel LSM security attribute cache
+ * @refcount: atomic reference counter
+ * @free: LSM supplied function to free the cache data
+ * @data: LSM supplied cache data
+ *
+ * Description:
+ * This structure is provided for LSMs which wish to make use of the NetLabel
+ * caching mechanism to store LSM specific data/attributes in the NetLabel
+ * cache. If the LSM has to perform a lot of translation from the NetLabel
+ * security attributes into it's own internal representation then the cache
+ * mechanism can provide a way to eliminate some or all of that translation
+ * overhead on a cache hit.
+ *
+ */
+struct netlbl_lsm_cache {
+ refcount_t refcount;
+ void (*free) (const void *data);
+ void *data;
+};
+
+/**
+ * struct netlbl_lsm_catmap - NetLabel LSM secattr category bitmap
+ * @startbit: the value of the lowest order bit in the bitmap
+ * @bitmap: the category bitmap
+ * @next: pointer to the next bitmap "node" or NULL
+ *
+ * Description:
+ * This structure is used to represent category bitmaps. Due to the large
+ * number of categories supported by most labeling protocols it is not
+ * practical to transfer a full bitmap internally so NetLabel adopts a sparse
+ * bitmap structure modeled after SELinux's ebitmap structure.
+ * The catmap bitmap field MUST be a power of two in length and large
+ * enough to hold at least 240 bits. Special care (i.e. check the code!)
+ * should be used when changing these values as the LSM implementation
+ * probably has functions which rely on the sizes of these types to speed
+ * processing.
+ *
+ */
+#define NETLBL_CATMAP_MAPTYPE u64
+#define NETLBL_CATMAP_MAPCNT 4
+#define NETLBL_CATMAP_MAPSIZE (sizeof(NETLBL_CATMAP_MAPTYPE) * 8)
+#define NETLBL_CATMAP_SIZE (NETLBL_CATMAP_MAPSIZE * \
+ NETLBL_CATMAP_MAPCNT)
+#define NETLBL_CATMAP_BIT (NETLBL_CATMAP_MAPTYPE)0x01
+struct netlbl_lsm_catmap {
+ u32 startbit;
+ NETLBL_CATMAP_MAPTYPE bitmap[NETLBL_CATMAP_MAPCNT];
+ struct netlbl_lsm_catmap *next;
+};
+
+/**
+ * struct netlbl_lsm_secattr - NetLabel LSM security attributes
+ * @flags: indicate structure attributes, see NETLBL_SECATTR_*
+ * @type: indicate the NLTYPE of the attributes
+ * @domain: the NetLabel LSM domain
+ * @cache: NetLabel LSM specific cache
+ * @attr.mls: MLS sensitivity label
+ * @attr.mls.cat: MLS category bitmap
+ * @attr.mls.lvl: MLS sensitivity level
+ * @attr.secid: LSM specific secid token
+ *
+ * Description:
+ * This structure is used to pass security attributes between NetLabel and the
+ * LSM modules. The flags field is used to specify which fields within the
+ * struct are valid and valid values can be created by bitwise OR'ing the
+ * NETLBL_SECATTR_* defines. The domain field is typically set by the LSM to
+ * specify domain specific configuration settings and is not usually used by
+ * NetLabel itself when returning security attributes to the LSM.
+ *
+ */
+struct netlbl_lsm_secattr {
+ u32 flags;
+ /* bitmap values for 'flags' */
+#define NETLBL_SECATTR_NONE 0x00000000
+#define NETLBL_SECATTR_DOMAIN 0x00000001
+#define NETLBL_SECATTR_DOMAIN_CPY (NETLBL_SECATTR_DOMAIN | \
+ NETLBL_SECATTR_FREE_DOMAIN)
+#define NETLBL_SECATTR_CACHE 0x00000002
+#define NETLBL_SECATTR_MLS_LVL 0x00000004
+#define NETLBL_SECATTR_MLS_CAT 0x00000008
+#define NETLBL_SECATTR_SECID 0x00000010
+ /* bitmap meta-values for 'flags' */
+#define NETLBL_SECATTR_FREE_DOMAIN 0x01000000
+#define NETLBL_SECATTR_CACHEABLE (NETLBL_SECATTR_MLS_LVL | \
+ NETLBL_SECATTR_MLS_CAT | \
+ NETLBL_SECATTR_SECID)
+ u32 type;
+ char *domain;
+ struct netlbl_lsm_cache *cache;
+ struct {
+ struct {
+ struct netlbl_lsm_catmap *cat;
+ u32 lvl;
+ } mls;
+ u32 secid;
+ } attr;
+};
+
+/**
+ * struct netlbl_calipso_ops - NetLabel CALIPSO operations
+ * @doi_add: add a CALIPSO DOI
+ * @doi_free: free a CALIPSO DOI
+ * @doi_getdef: returns a reference to a DOI
+ * @doi_putdef: releases a reference of a DOI
+ * @doi_walk: enumerate the DOI list
+ * @sock_getattr: retrieve the socket's attr
+ * @sock_setattr: set the socket's attr
+ * @sock_delattr: remove the socket's attr
+ * @req_setattr: set the req socket's attr
+ * @req_delattr: remove the req socket's attr
+ * @opt_getattr: retrieve attr from memory block
+ * @skbuff_optptr: find option in packet
+ * @skbuff_setattr: set the skbuff's attr
+ * @skbuff_delattr: remove the skbuff's attr
+ * @cache_invalidate: invalidate cache
+ * @cache_add: add cache entry
+ *
+ * Description:
+ * This structure is filled out by the CALIPSO engine and passed
+ * to the NetLabel core via a call to netlbl_calipso_ops_register().
+ * It enables the CALIPSO engine (and hence IPv6) to be compiled
+ * as a module.
+ */
+struct netlbl_calipso_ops {
+ int (*doi_add)(struct calipso_doi *doi_def,
+ struct netlbl_audit *audit_info);
+ void (*doi_free)(struct calipso_doi *doi_def);
+ int (*doi_remove)(u32 doi, struct netlbl_audit *audit_info);
+ struct calipso_doi *(*doi_getdef)(u32 doi);
+ void (*doi_putdef)(struct calipso_doi *doi_def);
+ int (*doi_walk)(u32 *skip_cnt,
+ int (*callback)(struct calipso_doi *doi_def, void *arg),
+ void *cb_arg);
+ int (*sock_getattr)(struct sock *sk,
+ struct netlbl_lsm_secattr *secattr);
+ int (*sock_setattr)(struct sock *sk,
+ const struct calipso_doi *doi_def,
+ const struct netlbl_lsm_secattr *secattr);
+ void (*sock_delattr)(struct sock *sk);
+ int (*req_setattr)(struct request_sock *req,
+ const struct calipso_doi *doi_def,
+ const struct netlbl_lsm_secattr *secattr);
+ void (*req_delattr)(struct request_sock *req);
+ int (*opt_getattr)(const unsigned char *calipso,
+ struct netlbl_lsm_secattr *secattr);
+ unsigned char *(*skbuff_optptr)(const struct sk_buff *skb);
+ int (*skbuff_setattr)(struct sk_buff *skb,
+ const struct calipso_doi *doi_def,
+ const struct netlbl_lsm_secattr *secattr);
+ int (*skbuff_delattr)(struct sk_buff *skb);
+ void (*cache_invalidate)(void);
+ int (*cache_add)(const unsigned char *calipso_ptr,
+ const struct netlbl_lsm_secattr *secattr);
+};
+
+/*
+ * LSM security attribute operations (inline)
+ */
+
+/**
+ * netlbl_secattr_cache_alloc - Allocate and initialize a secattr cache
+ * @flags: the memory allocation flags
+ *
+ * Description:
+ * Allocate and initialize a netlbl_lsm_cache structure. Returns a pointer
+ * on success, NULL on failure.
+ *
+ */
+static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc(gfp_t flags)
+{
+ struct netlbl_lsm_cache *cache;
+
+ cache = kzalloc(sizeof(*cache), flags);
+ if (cache)
+ refcount_set(&cache->refcount, 1);
+ return cache;
+}
+
+/**
+ * netlbl_secattr_cache_free - Frees a netlbl_lsm_cache struct
+ * @cache: the struct to free
+ *
+ * Description:
+ * Frees @secattr including all of the internal buffers.
+ *
+ */
+static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache)
+{
+ if (!refcount_dec_and_test(&cache->refcount))
+ return;
+
+ if (cache->free)
+ cache->free(cache->data);
+ kfree(cache);
+}
+
+/**
+ * netlbl_catmap_alloc - Allocate a LSM secattr catmap
+ * @flags: memory allocation flags
+ *
+ * Description:
+ * Allocate memory for a LSM secattr catmap, returns a pointer on success, NULL
+ * on failure.
+ *
+ */
+static inline struct netlbl_lsm_catmap *netlbl_catmap_alloc(gfp_t flags)
+{
+ return kzalloc(sizeof(struct netlbl_lsm_catmap), flags);
+}
+
+/**
+ * netlbl_catmap_free - Free a LSM secattr catmap
+ * @catmap: the category bitmap
+ *
+ * Description:
+ * Free a LSM secattr catmap.
+ *
+ */
+static inline void netlbl_catmap_free(struct netlbl_lsm_catmap *catmap)
+{
+ struct netlbl_lsm_catmap *iter;
+
+ while (catmap) {
+ iter = catmap;
+ catmap = catmap->next;
+ kfree(iter);
+ }
+}
+
+/**
+ * netlbl_secattr_init - Initialize a netlbl_lsm_secattr struct
+ * @secattr: the struct to initialize
+ *
+ * Description:
+ * Initialize an already allocated netlbl_lsm_secattr struct.
+ *
+ */
+static inline void netlbl_secattr_init(struct netlbl_lsm_secattr *secattr)
+{
+ memset(secattr, 0, sizeof(*secattr));
+}
+
+/**
+ * netlbl_secattr_destroy - Clears a netlbl_lsm_secattr struct
+ * @secattr: the struct to clear
+ *
+ * Description:
+ * Destroys the @secattr struct, including freeing all of the internal buffers.
+ * The struct must be reset with a call to netlbl_secattr_init() before reuse.
+ *
+ */
+static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr)
+{
+ if (secattr->flags & NETLBL_SECATTR_FREE_DOMAIN)
+ kfree(secattr->domain);
+ if (secattr->flags & NETLBL_SECATTR_CACHE)
+ netlbl_secattr_cache_free(secattr->cache);
+ if (secattr->flags & NETLBL_SECATTR_MLS_CAT)
+ netlbl_catmap_free(secattr->attr.mls.cat);
+}
+
+/**
+ * netlbl_secattr_alloc - Allocate and initialize a netlbl_lsm_secattr struct
+ * @flags: the memory allocation flags
+ *
+ * Description:
+ * Allocate and initialize a netlbl_lsm_secattr struct. Returns a valid
+ * pointer on success, or NULL on failure.
+ *
+ */
+static inline struct netlbl_lsm_secattr *netlbl_secattr_alloc(gfp_t flags)
+{
+ return kzalloc(sizeof(struct netlbl_lsm_secattr), flags);
+}
+
+/**
+ * netlbl_secattr_free - Frees a netlbl_lsm_secattr struct
+ * @secattr: the struct to free
+ *
+ * Description:
+ * Frees @secattr including all of the internal buffers.
+ *
+ */
+static inline void netlbl_secattr_free(struct netlbl_lsm_secattr *secattr)
+{
+ netlbl_secattr_destroy(secattr);
+ kfree(secattr);
+}
+
+#ifdef CONFIG_NETLABEL
+/*
+ * LSM configuration operations
+ */
+int netlbl_cfg_map_del(const char *domain,
+ u16 family,
+ const void *addr,
+ const void *mask,
+ struct netlbl_audit *audit_info);
+int netlbl_cfg_unlbl_map_add(const char *domain,
+ u16 family,
+ const void *addr,
+ const void *mask,
+ struct netlbl_audit *audit_info);
+int netlbl_cfg_unlbl_static_add(struct net *net,
+ const char *dev_name,
+ const void *addr,
+ const void *mask,
+ u16 family,
+ u32 secid,
+ struct netlbl_audit *audit_info);
+int netlbl_cfg_unlbl_static_del(struct net *net,
+ const char *dev_name,
+ const void *addr,
+ const void *mask,
+ u16 family,
+ struct netlbl_audit *audit_info);
+int netlbl_cfg_cipsov4_add(struct cipso_v4_doi *doi_def,
+ struct netlbl_audit *audit_info);
+void netlbl_cfg_cipsov4_del(u32 doi, struct netlbl_audit *audit_info);
+int netlbl_cfg_cipsov4_map_add(u32 doi,
+ const char *domain,
+ const struct in_addr *addr,
+ const struct in_addr *mask,
+ struct netlbl_audit *audit_info);
+int netlbl_cfg_calipso_add(struct calipso_doi *doi_def,
+ struct netlbl_audit *audit_info);
+void netlbl_cfg_calipso_del(u32 doi, struct netlbl_audit *audit_info);
+int netlbl_cfg_calipso_map_add(u32 doi,
+ const char *domain,
+ const struct in6_addr *addr,
+ const struct in6_addr *mask,
+ struct netlbl_audit *audit_info);
+/*
+ * LSM security attribute operations
+ */
+int netlbl_catmap_walk(struct netlbl_lsm_catmap *catmap, u32 offset);
+int netlbl_catmap_walkrng(struct netlbl_lsm_catmap *catmap, u32 offset);
+int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap,
+ u32 *offset,
+ unsigned long *bitmap);
+int netlbl_catmap_setbit(struct netlbl_lsm_catmap **catmap,
+ u32 bit,
+ gfp_t flags);
+int netlbl_catmap_setrng(struct netlbl_lsm_catmap **catmap,
+ u32 start,
+ u32 end,
+ gfp_t flags);
+int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap,
+ u32 offset,
+ unsigned long bitmap,
+ gfp_t flags);
+
+/* Bitmap functions
+ */
+int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len,
+ u32 offset, u8 state);
+void netlbl_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state);
+
+/*
+ * LSM protocol operations (NetLabel LSM/kernel API)
+ */
+int netlbl_enabled(void);
+int netlbl_sock_setattr(struct sock *sk,
+ u16 family,
+ const struct netlbl_lsm_secattr *secattr);
+void netlbl_sock_delattr(struct sock *sk);
+int netlbl_sock_getattr(struct sock *sk,
+ struct netlbl_lsm_secattr *secattr);
+int netlbl_conn_setattr(struct sock *sk,
+ struct sockaddr *addr,
+ const struct netlbl_lsm_secattr *secattr);
+int netlbl_req_setattr(struct request_sock *req,
+ const struct netlbl_lsm_secattr *secattr);
+void netlbl_req_delattr(struct request_sock *req);
+int netlbl_skbuff_setattr(struct sk_buff *skb,
+ u16 family,
+ const struct netlbl_lsm_secattr *secattr);
+int netlbl_skbuff_getattr(const struct sk_buff *skb,
+ u16 family,
+ struct netlbl_lsm_secattr *secattr);
+void netlbl_skbuff_err(struct sk_buff *skb, u16 family, int error, int gateway);
+
+/*
+ * LSM label mapping cache operations
+ */
+void netlbl_cache_invalidate(void);
+int netlbl_cache_add(const struct sk_buff *skb, u16 family,
+ const struct netlbl_lsm_secattr *secattr);
+
+/*
+ * Protocol engine operations
+ */
+struct audit_buffer *netlbl_audit_start(int type,
+ struct netlbl_audit *audit_info);
+#else
+static inline int netlbl_cfg_map_del(const char *domain,
+ u16 family,
+ const void *addr,
+ const void *mask,
+ struct netlbl_audit *audit_info)
+{
+ return -ENOSYS;
+}
+static inline int netlbl_cfg_unlbl_map_add(const char *domain,
+ u16 family,
+ void *addr,
+ void *mask,
+ struct netlbl_audit *audit_info)
+{
+ return -ENOSYS;
+}
+static inline int netlbl_cfg_unlbl_static_add(struct net *net,
+ const char *dev_name,
+ const void *addr,
+ const void *mask,
+ u16 family,
+ u32 secid,
+ struct netlbl_audit *audit_info)
+{
+ return -ENOSYS;
+}
+static inline int netlbl_cfg_unlbl_static_del(struct net *net,
+ const char *dev_name,
+ const void *addr,
+ const void *mask,
+ u16 family,
+ struct netlbl_audit *audit_info)
+{
+ return -ENOSYS;
+}
+static inline int netlbl_cfg_cipsov4_add(struct cipso_v4_doi *doi_def,
+ struct netlbl_audit *audit_info)
+{
+ return -ENOSYS;
+}
+static inline void netlbl_cfg_cipsov4_del(u32 doi,
+ struct netlbl_audit *audit_info)
+{
+ return;
+}
+static inline int netlbl_cfg_cipsov4_map_add(u32 doi,
+ const char *domain,
+ const struct in_addr *addr,
+ const struct in_addr *mask,
+ struct netlbl_audit *audit_info)
+{
+ return -ENOSYS;
+}
+static inline int netlbl_cfg_calipso_add(struct calipso_doi *doi_def,
+ struct netlbl_audit *audit_info)
+{
+ return -ENOSYS;
+}
+static inline void netlbl_cfg_calipso_del(u32 doi,
+ struct netlbl_audit *audit_info)
+{
+ return;
+}
+static inline int netlbl_cfg_calipso_map_add(u32 doi,
+ const char *domain,
+ const struct in6_addr *addr,
+ const struct in6_addr *mask,
+ struct netlbl_audit *audit_info)
+{
+ return -ENOSYS;
+}
+static inline int netlbl_catmap_walk(struct netlbl_lsm_catmap *catmap,
+ u32 offset)
+{
+ return -ENOENT;
+}
+static inline int netlbl_catmap_walkrng(struct netlbl_lsm_catmap *catmap,
+ u32 offset)
+{
+ return -ENOENT;
+}
+static inline int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap,
+ u32 *offset,
+ unsigned long *bitmap)
+{
+ return 0;
+}
+static inline int netlbl_catmap_setbit(struct netlbl_lsm_catmap **catmap,
+ u32 bit,
+ gfp_t flags)
+{
+ return 0;
+}
+static inline int netlbl_catmap_setrng(struct netlbl_lsm_catmap **catmap,
+ u32 start,
+ u32 end,
+ gfp_t flags)
+{
+ return 0;
+}
+static inline int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap,
+ u32 offset,
+ unsigned long bitmap,
+ gfp_t flags)
+{
+ return 0;
+}
+static inline int netlbl_enabled(void)
+{
+ return 0;
+}
+static inline int netlbl_sock_setattr(struct sock *sk,
+ u16 family,
+ const struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOSYS;
+}
+static inline void netlbl_sock_delattr(struct sock *sk)
+{
+}
+static inline int netlbl_sock_getattr(struct sock *sk,
+ struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOSYS;
+}
+static inline int netlbl_conn_setattr(struct sock *sk,
+ struct sockaddr *addr,
+ const struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOSYS;
+}
+static inline int netlbl_req_setattr(struct request_sock *req,
+ const struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOSYS;
+}
+static inline void netlbl_req_delattr(struct request_sock *req)
+{
+ return;
+}
+static inline int netlbl_skbuff_setattr(struct sk_buff *skb,
+ u16 family,
+ const struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOSYS;
+}
+static inline int netlbl_skbuff_getattr(const struct sk_buff *skb,
+ u16 family,
+ struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOSYS;
+}
+static inline void netlbl_skbuff_err(struct sk_buff *skb,
+ int error,
+ int gateway)
+{
+ return;
+}
+static inline void netlbl_cache_invalidate(void)
+{
+ return;
+}
+static inline int netlbl_cache_add(const struct sk_buff *skb, u16 family,
+ const struct netlbl_lsm_secattr *secattr)
+{
+ return 0;
+}
+static inline struct audit_buffer *netlbl_audit_start(int type,
+ struct netlbl_audit *audit_info)
+{
+ return NULL;
+}
+#endif /* CONFIG_NETLABEL */
+
+const struct netlbl_calipso_ops *
+netlbl_calipso_ops_register(const struct netlbl_calipso_ops *ops);
+
+#endif /* _NETLABEL_H */
diff --git a/include/net/netlink.h b/include/net/netlink.h
new file mode 100644
index 000000000..a686c9041
--- /dev/null
+++ b/include/net/netlink.h
@@ -0,0 +1,2004 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_NETLINK_H
+#define __NET_NETLINK_H
+
+#include <linux/types.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/in6.h>
+
+/* ========================================================================
+ * Netlink Messages and Attributes Interface (As Seen On TV)
+ * ------------------------------------------------------------------------
+ * Messages Interface
+ * ------------------------------------------------------------------------
+ *
+ * Message Format:
+ * <--- nlmsg_total_size(payload) --->
+ * <-- nlmsg_msg_size(payload) ->
+ * +----------+- - -+-------------+- - -+-------- - -
+ * | nlmsghdr | Pad | Payload | Pad | nlmsghdr
+ * +----------+- - -+-------------+- - -+-------- - -
+ * nlmsg_data(nlh)---^ ^
+ * nlmsg_next(nlh)-----------------------+
+ *
+ * Payload Format:
+ * <---------------------- nlmsg_len(nlh) --------------------->
+ * <------ hdrlen ------> <- nlmsg_attrlen(nlh, hdrlen) ->
+ * +----------------------+- - -+--------------------------------+
+ * | Family Header | Pad | Attributes |
+ * +----------------------+- - -+--------------------------------+
+ * nlmsg_attrdata(nlh, hdrlen)---^
+ *
+ * Data Structures:
+ * struct nlmsghdr netlink message header
+ *
+ * Message Construction:
+ * nlmsg_new() create a new netlink message
+ * nlmsg_put() add a netlink message to an skb
+ * nlmsg_put_answer() callback based nlmsg_put()
+ * nlmsg_end() finalize netlink message
+ * nlmsg_get_pos() return current position in message
+ * nlmsg_trim() trim part of message
+ * nlmsg_cancel() cancel message construction
+ * nlmsg_free() free a netlink message
+ *
+ * Message Sending:
+ * nlmsg_multicast() multicast message to several groups
+ * nlmsg_unicast() unicast a message to a single socket
+ * nlmsg_notify() send notification message
+ *
+ * Message Length Calculations:
+ * nlmsg_msg_size(payload) length of message w/o padding
+ * nlmsg_total_size(payload) length of message w/ padding
+ * nlmsg_padlen(payload) length of padding at tail
+ *
+ * Message Payload Access:
+ * nlmsg_data(nlh) head of message payload
+ * nlmsg_len(nlh) length of message payload
+ * nlmsg_attrdata(nlh, hdrlen) head of attributes data
+ * nlmsg_attrlen(nlh, hdrlen) length of attributes data
+ *
+ * Message Parsing:
+ * nlmsg_ok(nlh, remaining) does nlh fit into remaining bytes?
+ * nlmsg_next(nlh, remaining) get next netlink message
+ * nlmsg_parse() parse attributes of a message
+ * nlmsg_find_attr() find an attribute in a message
+ * nlmsg_for_each_msg() loop over all messages
+ * nlmsg_validate() validate netlink message incl. attrs
+ * nlmsg_for_each_attr() loop over all attributes
+ *
+ * Misc:
+ * nlmsg_report() report back to application?
+ *
+ * ------------------------------------------------------------------------
+ * Attributes Interface
+ * ------------------------------------------------------------------------
+ *
+ * Attribute Format:
+ * <------- nla_total_size(payload) ------->
+ * <---- nla_attr_size(payload) ----->
+ * +----------+- - -+- - - - - - - - - +- - -+-------- - -
+ * | Header | Pad | Payload | Pad | Header
+ * +----------+- - -+- - - - - - - - - +- - -+-------- - -
+ * <- nla_len(nla) -> ^
+ * nla_data(nla)----^ |
+ * nla_next(nla)-----------------------------'
+ *
+ * Data Structures:
+ * struct nlattr netlink attribute header
+ *
+ * Attribute Construction:
+ * nla_reserve(skb, type, len) reserve room for an attribute
+ * nla_reserve_nohdr(skb, len) reserve room for an attribute w/o hdr
+ * nla_put(skb, type, len, data) add attribute to skb
+ * nla_put_nohdr(skb, len, data) add attribute w/o hdr
+ * nla_append(skb, len, data) append data to skb
+ *
+ * Attribute Construction for Basic Types:
+ * nla_put_u8(skb, type, value) add u8 attribute to skb
+ * nla_put_u16(skb, type, value) add u16 attribute to skb
+ * nla_put_u32(skb, type, value) add u32 attribute to skb
+ * nla_put_u64_64bit(skb, type,
+ * value, padattr) add u64 attribute to skb
+ * nla_put_s8(skb, type, value) add s8 attribute to skb
+ * nla_put_s16(skb, type, value) add s16 attribute to skb
+ * nla_put_s32(skb, type, value) add s32 attribute to skb
+ * nla_put_s64(skb, type, value,
+ * padattr) add s64 attribute to skb
+ * nla_put_string(skb, type, str) add string attribute to skb
+ * nla_put_flag(skb, type) add flag attribute to skb
+ * nla_put_msecs(skb, type, jiffies,
+ * padattr) add msecs attribute to skb
+ * nla_put_in_addr(skb, type, addr) add IPv4 address attribute to skb
+ * nla_put_in6_addr(skb, type, addr) add IPv6 address attribute to skb
+ *
+ * Nested Attributes Construction:
+ * nla_nest_start(skb, type) start a nested attribute
+ * nla_nest_end(skb, nla) finalize a nested attribute
+ * nla_nest_cancel(skb, nla) cancel nested attribute construction
+ *
+ * Attribute Length Calculations:
+ * nla_attr_size(payload) length of attribute w/o padding
+ * nla_total_size(payload) length of attribute w/ padding
+ * nla_padlen(payload) length of padding
+ *
+ * Attribute Payload Access:
+ * nla_data(nla) head of attribute payload
+ * nla_len(nla) length of attribute payload
+ *
+ * Attribute Payload Access for Basic Types:
+ * nla_get_u8(nla) get payload for a u8 attribute
+ * nla_get_u16(nla) get payload for a u16 attribute
+ * nla_get_u32(nla) get payload for a u32 attribute
+ * nla_get_u64(nla) get payload for a u64 attribute
+ * nla_get_s8(nla) get payload for a s8 attribute
+ * nla_get_s16(nla) get payload for a s16 attribute
+ * nla_get_s32(nla) get payload for a s32 attribute
+ * nla_get_s64(nla) get payload for a s64 attribute
+ * nla_get_flag(nla) return 1 if flag is true
+ * nla_get_msecs(nla) get payload for a msecs attribute
+ *
+ * Attribute Misc:
+ * nla_memcpy(dest, nla, count) copy attribute into memory
+ * nla_memcmp(nla, data, size) compare attribute with memory area
+ * nla_strscpy(dst, nla, size) copy attribute to a sized string
+ * nla_strcmp(nla, str) compare attribute with string
+ *
+ * Attribute Parsing:
+ * nla_ok(nla, remaining) does nla fit into remaining bytes?
+ * nla_next(nla, remaining) get next netlink attribute
+ * nla_validate() validate a stream of attributes
+ * nla_validate_nested() validate a stream of nested attributes
+ * nla_find() find attribute in stream of attributes
+ * nla_find_nested() find attribute in nested attributes
+ * nla_parse() parse and validate stream of attrs
+ * nla_parse_nested() parse nested attributes
+ * nla_for_each_attr() loop over all attributes
+ * nla_for_each_nested() loop over the nested attributes
+ *=========================================================================
+ */
+
+ /**
+ * Standard attribute types to specify validation policy
+ */
+enum {
+ NLA_UNSPEC,
+ NLA_U8,
+ NLA_U16,
+ NLA_U32,
+ NLA_U64,
+ NLA_STRING,
+ NLA_FLAG,
+ NLA_MSECS,
+ NLA_NESTED,
+ NLA_NESTED_ARRAY,
+ NLA_NUL_STRING,
+ NLA_BINARY,
+ NLA_S8,
+ NLA_S16,
+ NLA_S32,
+ NLA_S64,
+ NLA_BITFIELD32,
+ NLA_REJECT,
+ NLA_BE16,
+ NLA_BE32,
+ __NLA_TYPE_MAX,
+};
+
+#define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1)
+
+struct netlink_range_validation {
+ u64 min, max;
+};
+
+struct netlink_range_validation_signed {
+ s64 min, max;
+};
+
+enum nla_policy_validation {
+ NLA_VALIDATE_NONE,
+ NLA_VALIDATE_RANGE,
+ NLA_VALIDATE_RANGE_WARN_TOO_LONG,
+ NLA_VALIDATE_MIN,
+ NLA_VALIDATE_MAX,
+ NLA_VALIDATE_MASK,
+ NLA_VALIDATE_RANGE_PTR,
+ NLA_VALIDATE_FUNCTION,
+};
+
+/**
+ * struct nla_policy - attribute validation policy
+ * @type: Type of attribute or NLA_UNSPEC
+ * @validation_type: type of attribute validation done in addition to
+ * type-specific validation (e.g. range, function call), see
+ * &enum nla_policy_validation
+ * @len: Type specific length of payload
+ *
+ * Policies are defined as arrays of this struct, the array must be
+ * accessible by attribute type up to the highest identifier to be expected.
+ *
+ * Meaning of `len' field:
+ * NLA_STRING Maximum length of string
+ * NLA_NUL_STRING Maximum length of string (excluding NUL)
+ * NLA_FLAG Unused
+ * NLA_BINARY Maximum length of attribute payload
+ * (but see also below with the validation type)
+ * NLA_NESTED,
+ * NLA_NESTED_ARRAY Length verification is done by checking len of
+ * nested header (or empty); len field is used if
+ * nested_policy is also used, for the max attr
+ * number in the nested policy.
+ * NLA_U8, NLA_U16,
+ * NLA_U32, NLA_U64,
+ * NLA_S8, NLA_S16,
+ * NLA_S32, NLA_S64,
+ * NLA_BE16, NLA_BE32,
+ * NLA_MSECS Leaving the length field zero will verify the
+ * given type fits, using it verifies minimum length
+ * just like "All other"
+ * NLA_BITFIELD32 Unused
+ * NLA_REJECT Unused
+ * All other Minimum length of attribute payload
+ *
+ * Meaning of validation union:
+ * NLA_BITFIELD32 This is a 32-bit bitmap/bitselector attribute and
+ * `bitfield32_valid' is the u32 value of valid flags
+ * NLA_REJECT This attribute is always rejected and `reject_message'
+ * may point to a string to report as the error instead
+ * of the generic one in extended ACK.
+ * NLA_NESTED `nested_policy' to a nested policy to validate, must
+ * also set `len' to the max attribute number. Use the
+ * provided NLA_POLICY_NESTED() macro.
+ * Note that nla_parse() will validate, but of course not
+ * parse, the nested sub-policies.
+ * NLA_NESTED_ARRAY `nested_policy' points to a nested policy to validate,
+ * must also set `len' to the max attribute number. Use
+ * the provided NLA_POLICY_NESTED_ARRAY() macro.
+ * The difference to NLA_NESTED is the structure:
+ * NLA_NESTED has the nested attributes directly inside
+ * while an array has the nested attributes at another
+ * level down and the attribute types directly in the
+ * nesting don't matter.
+ * NLA_U8,
+ * NLA_U16,
+ * NLA_U32,
+ * NLA_U64,
+ * NLA_BE16,
+ * NLA_BE32,
+ * NLA_S8,
+ * NLA_S16,
+ * NLA_S32,
+ * NLA_S64 The `min' and `max' fields are used depending on the
+ * validation_type field, if that is min/max/range then
+ * the min, max or both are used (respectively) to check
+ * the value of the integer attribute.
+ * Note that in the interest of code simplicity and
+ * struct size both limits are s16, so you cannot
+ * enforce a range that doesn't fall within the range
+ * of s16 - do that as usual in the code instead.
+ * Use the NLA_POLICY_MIN(), NLA_POLICY_MAX() and
+ * NLA_POLICY_RANGE() macros.
+ * NLA_U8,
+ * NLA_U16,
+ * NLA_U32,
+ * NLA_U64 If the validation_type field instead is set to
+ * NLA_VALIDATE_RANGE_PTR, `range' must be a pointer
+ * to a struct netlink_range_validation that indicates
+ * the min/max values.
+ * Use NLA_POLICY_FULL_RANGE().
+ * NLA_S8,
+ * NLA_S16,
+ * NLA_S32,
+ * NLA_S64 If the validation_type field instead is set to
+ * NLA_VALIDATE_RANGE_PTR, `range_signed' must be a
+ * pointer to a struct netlink_range_validation_signed
+ * that indicates the min/max values.
+ * Use NLA_POLICY_FULL_RANGE_SIGNED().
+ *
+ * NLA_BINARY If the validation type is like the ones for integers
+ * above, then the min/max length (not value like for
+ * integers) of the attribute is enforced.
+ *
+ * All other Unused - but note that it's a union
+ *
+ * Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN:
+ * NLA_BINARY Validation function called for the attribute.
+ * All other Unused - but note that it's a union
+ *
+ * Example:
+ *
+ * static const u32 myvalidflags = 0xff231023;
+ *
+ * static const struct nla_policy my_policy[ATTR_MAX+1] = {
+ * [ATTR_FOO] = { .type = NLA_U16 },
+ * [ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ },
+ * [ATTR_BAZ] = NLA_POLICY_EXACT_LEN(sizeof(struct mystruct)),
+ * [ATTR_GOO] = NLA_POLICY_BITFIELD32(myvalidflags),
+ * };
+ */
+struct nla_policy {
+ u8 type;
+ u8 validation_type;
+ u16 len;
+ union {
+ /**
+ * @strict_start_type: first attribute to validate strictly
+ *
+ * This entry is special, and used for the attribute at index 0
+ * only, and specifies special data about the policy, namely it
+ * specifies the "boundary type" where strict length validation
+ * starts for any attribute types >= this value, also, strict
+ * nesting validation starts here.
+ *
+ * Additionally, it means that NLA_UNSPEC is actually NLA_REJECT
+ * for any types >= this, so need to use NLA_POLICY_MIN_LEN() to
+ * get the previous pure { .len = xyz } behaviour. The advantage
+ * of this is that types not specified in the policy will be
+ * rejected.
+ *
+ * For completely new families it should be set to 1 so that the
+ * validation is enforced for all attributes. For existing ones
+ * it should be set at least when new attributes are added to
+ * the enum used by the policy, and be set to the new value that
+ * was added to enforce strict validation from thereon.
+ */
+ u16 strict_start_type;
+
+ /* private: use NLA_POLICY_*() to set */
+ const u32 bitfield32_valid;
+ const u32 mask;
+ const char *reject_message;
+ const struct nla_policy *nested_policy;
+ struct netlink_range_validation *range;
+ struct netlink_range_validation_signed *range_signed;
+ struct {
+ s16 min, max;
+ };
+ int (*validate)(const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
+ };
+};
+
+#define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN)
+#define NLA_POLICY_ETH_ADDR_COMPAT NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN)
+
+#define _NLA_POLICY_NESTED(maxattr, policy) \
+ { .type = NLA_NESTED, .nested_policy = policy, .len = maxattr }
+#define _NLA_POLICY_NESTED_ARRAY(maxattr, policy) \
+ { .type = NLA_NESTED_ARRAY, .nested_policy = policy, .len = maxattr }
+#define NLA_POLICY_NESTED(policy) \
+ _NLA_POLICY_NESTED(ARRAY_SIZE(policy) - 1, policy)
+#define NLA_POLICY_NESTED_ARRAY(policy) \
+ _NLA_POLICY_NESTED_ARRAY(ARRAY_SIZE(policy) - 1, policy)
+#define NLA_POLICY_BITFIELD32(valid) \
+ { .type = NLA_BITFIELD32, .bitfield32_valid = valid }
+
+#define __NLA_IS_UINT_TYPE(tp) \
+ (tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || tp == NLA_U64)
+#define __NLA_IS_SINT_TYPE(tp) \
+ (tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64)
+#define __NLA_IS_BEINT_TYPE(tp) \
+ (tp == NLA_BE16 || tp == NLA_BE32)
+
+#define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition))
+#define NLA_ENSURE_UINT_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp)) + tp)
+#define NLA_ENSURE_UINT_OR_BINARY_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \
+ tp == NLA_MSECS || \
+ tp == NLA_BINARY) + tp)
+#define NLA_ENSURE_SINT_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_SINT_TYPE(tp)) + tp)
+#define NLA_ENSURE_INT_OR_BINARY_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \
+ __NLA_IS_SINT_TYPE(tp) || \
+ __NLA_IS_BEINT_TYPE(tp) || \
+ tp == NLA_MSECS || \
+ tp == NLA_BINARY) + tp)
+#define NLA_ENSURE_NO_VALIDATION_PTR(tp) \
+ (__NLA_ENSURE(tp != NLA_BITFIELD32 && \
+ tp != NLA_REJECT && \
+ tp != NLA_NESTED && \
+ tp != NLA_NESTED_ARRAY) + tp)
+#define NLA_ENSURE_BEINT_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_BEINT_TYPE(tp)) + tp)
+
+#define NLA_POLICY_RANGE(tp, _min, _max) { \
+ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_RANGE, \
+ .min = _min, \
+ .max = _max \
+}
+
+#define NLA_POLICY_FULL_RANGE(tp, _range) { \
+ .type = NLA_ENSURE_UINT_OR_BINARY_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_RANGE_PTR, \
+ .range = _range, \
+}
+
+#define NLA_POLICY_FULL_RANGE_SIGNED(tp, _range) { \
+ .type = NLA_ENSURE_SINT_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_RANGE_PTR, \
+ .range_signed = _range, \
+}
+
+#define NLA_POLICY_MIN(tp, _min) { \
+ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_MIN, \
+ .min = _min, \
+}
+
+#define NLA_POLICY_MAX(tp, _max) { \
+ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_MAX, \
+ .max = _max, \
+}
+
+#define NLA_POLICY_MASK(tp, _mask) { \
+ .type = NLA_ENSURE_UINT_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_MASK, \
+ .mask = _mask, \
+}
+
+#define NLA_POLICY_VALIDATE_FN(tp, fn, ...) { \
+ .type = NLA_ENSURE_NO_VALIDATION_PTR(tp), \
+ .validation_type = NLA_VALIDATE_FUNCTION, \
+ .validate = fn, \
+ .len = __VA_ARGS__ + 0, \
+}
+
+#define NLA_POLICY_EXACT_LEN(_len) NLA_POLICY_RANGE(NLA_BINARY, _len, _len)
+#define NLA_POLICY_EXACT_LEN_WARN(_len) { \
+ .type = NLA_BINARY, \
+ .validation_type = NLA_VALIDATE_RANGE_WARN_TOO_LONG, \
+ .min = _len, \
+ .max = _len \
+}
+#define NLA_POLICY_MIN_LEN(_len) NLA_POLICY_MIN(NLA_BINARY, _len)
+
+/**
+ * struct nl_info - netlink source information
+ * @nlh: Netlink message header of original request
+ * @nl_net: Network namespace
+ * @portid: Netlink PORTID of requesting application
+ * @skip_notify: Skip netlink notifications to user space
+ * @skip_notify_kernel: Skip selected in-kernel notifications
+ */
+struct nl_info {
+ struct nlmsghdr *nlh;
+ struct net *nl_net;
+ u32 portid;
+ u8 skip_notify:1,
+ skip_notify_kernel:1;
+};
+
+/**
+ * enum netlink_validation - netlink message/attribute validation levels
+ * @NL_VALIDATE_LIBERAL: Old-style "be liberal" validation, not caring about
+ * extra data at the end of the message, attributes being longer than
+ * they should be, or unknown attributes being present.
+ * @NL_VALIDATE_TRAILING: Reject junk data encountered after attribute parsing.
+ * @NL_VALIDATE_MAXTYPE: Reject attributes > max type; Together with _TRAILING
+ * this is equivalent to the old nla_parse_strict()/nlmsg_parse_strict().
+ * @NL_VALIDATE_UNSPEC: Reject attributes with NLA_UNSPEC in the policy.
+ * This can safely be set by the kernel when the given policy has no
+ * NLA_UNSPEC anymore, and can thus be used to ensure policy entries
+ * are enforced going forward.
+ * @NL_VALIDATE_STRICT_ATTRS: strict attribute policy parsing (e.g.
+ * U8, U16, U32 must have exact size, etc.)
+ * @NL_VALIDATE_NESTED: Check that NLA_F_NESTED is set for NLA_NESTED(_ARRAY)
+ * and unset for other policies.
+ */
+enum netlink_validation {
+ NL_VALIDATE_LIBERAL = 0,
+ NL_VALIDATE_TRAILING = BIT(0),
+ NL_VALIDATE_MAXTYPE = BIT(1),
+ NL_VALIDATE_UNSPEC = BIT(2),
+ NL_VALIDATE_STRICT_ATTRS = BIT(3),
+ NL_VALIDATE_NESTED = BIT(4),
+};
+
+#define NL_VALIDATE_DEPRECATED_STRICT (NL_VALIDATE_TRAILING |\
+ NL_VALIDATE_MAXTYPE)
+#define NL_VALIDATE_STRICT (NL_VALIDATE_TRAILING |\
+ NL_VALIDATE_MAXTYPE |\
+ NL_VALIDATE_UNSPEC |\
+ NL_VALIDATE_STRICT_ATTRS |\
+ NL_VALIDATE_NESTED)
+
+int netlink_rcv_skb(struct sk_buff *skb,
+ int (*cb)(struct sk_buff *, struct nlmsghdr *,
+ struct netlink_ext_ack *));
+int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
+ unsigned int group, int report, gfp_t flags);
+
+int __nla_validate(const struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy, unsigned int validate,
+ struct netlink_ext_ack *extack);
+int __nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
+ int len, const struct nla_policy *policy, unsigned int validate,
+ struct netlink_ext_ack *extack);
+int nla_policy_len(const struct nla_policy *, int);
+struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
+ssize_t nla_strscpy(char *dst, const struct nlattr *nla, size_t dstsize);
+char *nla_strdup(const struct nlattr *nla, gfp_t flags);
+int nla_memcpy(void *dest, const struct nlattr *src, int count);
+int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
+int nla_strcmp(const struct nlattr *nla, const char *str);
+struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
+ int attrlen, int padattr);
+void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
+struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype,
+ int attrlen, int padattr);
+void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
+void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data);
+void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data, int padattr);
+void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
+int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
+int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data, int padattr);
+int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
+int nla_append(struct sk_buff *skb, int attrlen, const void *data);
+
+/**************************************************************************
+ * Netlink Messages
+ **************************************************************************/
+
+/**
+ * nlmsg_msg_size - length of netlink message not including padding
+ * @payload: length of message payload
+ */
+static inline int nlmsg_msg_size(int payload)
+{
+ return NLMSG_HDRLEN + payload;
+}
+
+/**
+ * nlmsg_total_size - length of netlink message including padding
+ * @payload: length of message payload
+ */
+static inline int nlmsg_total_size(int payload)
+{
+ return NLMSG_ALIGN(nlmsg_msg_size(payload));
+}
+
+/**
+ * nlmsg_padlen - length of padding at the message's tail
+ * @payload: length of message payload
+ */
+static inline int nlmsg_padlen(int payload)
+{
+ return nlmsg_total_size(payload) - nlmsg_msg_size(payload);
+}
+
+/**
+ * nlmsg_data - head of message payload
+ * @nlh: netlink message header
+ */
+static inline void *nlmsg_data(const struct nlmsghdr *nlh)
+{
+ return (unsigned char *) nlh + NLMSG_HDRLEN;
+}
+
+/**
+ * nlmsg_len - length of message payload
+ * @nlh: netlink message header
+ */
+static inline int nlmsg_len(const struct nlmsghdr *nlh)
+{
+ return nlh->nlmsg_len - NLMSG_HDRLEN;
+}
+
+/**
+ * nlmsg_attrdata - head of attributes data
+ * @nlh: netlink message header
+ * @hdrlen: length of family specific header
+ */
+static inline struct nlattr *nlmsg_attrdata(const struct nlmsghdr *nlh,
+ int hdrlen)
+{
+ unsigned char *data = nlmsg_data(nlh);
+ return (struct nlattr *) (data + NLMSG_ALIGN(hdrlen));
+}
+
+/**
+ * nlmsg_attrlen - length of attributes data
+ * @nlh: netlink message header
+ * @hdrlen: length of family specific header
+ */
+static inline int nlmsg_attrlen(const struct nlmsghdr *nlh, int hdrlen)
+{
+ return nlmsg_len(nlh) - NLMSG_ALIGN(hdrlen);
+}
+
+/**
+ * nlmsg_ok - check if the netlink message fits into the remaining bytes
+ * @nlh: netlink message header
+ * @remaining: number of bytes remaining in message stream
+ */
+static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
+{
+ return (remaining >= (int) sizeof(struct nlmsghdr) &&
+ nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
+ nlh->nlmsg_len <= remaining);
+}
+
+/**
+ * nlmsg_next - next netlink message in message stream
+ * @nlh: netlink message header
+ * @remaining: number of bytes remaining in message stream
+ *
+ * Returns the next netlink message in the message stream and
+ * decrements remaining by the size of the current message.
+ */
+static inline struct nlmsghdr *
+nlmsg_next(const struct nlmsghdr *nlh, int *remaining)
+{
+ int totlen = NLMSG_ALIGN(nlh->nlmsg_len);
+
+ *remaining -= totlen;
+
+ return (struct nlmsghdr *) ((unsigned char *) nlh + totlen);
+}
+
+/**
+ * nla_parse - Parse a stream of attributes into a tb buffer
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @policy: validation policy
+ * @extack: extended ACK pointer
+ *
+ * Parses a stream of attributes and stores a pointer to each attribute in
+ * the tb array accessible via the attribute type. Attributes with a type
+ * exceeding maxtype will be rejected, policy must be specified, attributes
+ * will be validated in the strictest way possible.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+static inline int nla_parse(struct nlattr **tb, int maxtype,
+ const struct nlattr *head, int len,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_parse(tb, maxtype, head, len, policy,
+ NL_VALIDATE_STRICT, extack);
+}
+
+/**
+ * nla_parse_deprecated - Parse a stream of attributes into a tb buffer
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @policy: validation policy
+ * @extack: extended ACK pointer
+ *
+ * Parses a stream of attributes and stores a pointer to each attribute in
+ * the tb array accessible via the attribute type. Attributes with a type
+ * exceeding maxtype will be ignored and attributes from the policy are not
+ * always strictly validated (only for new attributes).
+ *
+ * Returns 0 on success or a negative error code.
+ */
+static inline int nla_parse_deprecated(struct nlattr **tb, int maxtype,
+ const struct nlattr *head, int len,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_parse(tb, maxtype, head, len, policy,
+ NL_VALIDATE_LIBERAL, extack);
+}
+
+/**
+ * nla_parse_deprecated_strict - Parse a stream of attributes into a tb buffer
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @policy: validation policy
+ * @extack: extended ACK pointer
+ *
+ * Parses a stream of attributes and stores a pointer to each attribute in
+ * the tb array accessible via the attribute type. Attributes with a type
+ * exceeding maxtype will be rejected as well as trailing data, but the
+ * policy is not completely strictly validated (only for new attributes).
+ *
+ * Returns 0 on success or a negative error code.
+ */
+static inline int nla_parse_deprecated_strict(struct nlattr **tb, int maxtype,
+ const struct nlattr *head,
+ int len,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_parse(tb, maxtype, head, len, policy,
+ NL_VALIDATE_DEPRECATED_STRICT, extack);
+}
+
+/**
+ * __nlmsg_parse - parse attributes of a netlink message
+ * @nlh: netlink message header
+ * @hdrlen: length of family specific header
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * @validate: validation strictness
+ * @extack: extended ACK report struct
+ *
+ * See nla_parse()
+ */
+static inline int __nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
+ struct nlattr *tb[], int maxtype,
+ const struct nla_policy *policy,
+ unsigned int validate,
+ struct netlink_ext_ack *extack)
+{
+ if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) {
+ NL_SET_ERR_MSG(extack, "Invalid header length");
+ return -EINVAL;
+ }
+
+ return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
+ nlmsg_attrlen(nlh, hdrlen), policy, validate,
+ extack);
+}
+
+/**
+ * nlmsg_parse - parse attributes of a netlink message
+ * @nlh: netlink message header
+ * @hdrlen: length of family specific header
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * @extack: extended ACK report struct
+ *
+ * See nla_parse()
+ */
+static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
+ struct nlattr *tb[], int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
+ NL_VALIDATE_STRICT, extack);
+}
+
+/**
+ * nlmsg_parse_deprecated - parse attributes of a netlink message
+ * @nlh: netlink message header
+ * @hdrlen: length of family specific header
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * @extack: extended ACK report struct
+ *
+ * See nla_parse_deprecated()
+ */
+static inline int nlmsg_parse_deprecated(const struct nlmsghdr *nlh, int hdrlen,
+ struct nlattr *tb[], int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
+ NL_VALIDATE_LIBERAL, extack);
+}
+
+/**
+ * nlmsg_parse_deprecated_strict - parse attributes of a netlink message
+ * @nlh: netlink message header
+ * @hdrlen: length of family specific header
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * @extack: extended ACK report struct
+ *
+ * See nla_parse_deprecated_strict()
+ */
+static inline int
+nlmsg_parse_deprecated_strict(const struct nlmsghdr *nlh, int hdrlen,
+ struct nlattr *tb[], int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
+ NL_VALIDATE_DEPRECATED_STRICT, extack);
+}
+
+/**
+ * nlmsg_find_attr - find a specific attribute in a netlink message
+ * @nlh: netlink message header
+ * @hdrlen: length of familiy specific header
+ * @attrtype: type of attribute to look for
+ *
+ * Returns the first attribute which matches the specified type.
+ */
+static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
+ int hdrlen, int attrtype)
+{
+ return nla_find(nlmsg_attrdata(nlh, hdrlen),
+ nlmsg_attrlen(nlh, hdrlen), attrtype);
+}
+
+/**
+ * nla_validate_deprecated - Validate a stream of attributes
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * @extack: extended ACK report struct
+ *
+ * Validates all attributes in the specified attribute stream against the
+ * specified policy. Validation is done in liberal mode.
+ * See documenation of struct nla_policy for more details.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+static inline int nla_validate_deprecated(const struct nlattr *head, int len,
+ int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_validate(head, len, maxtype, policy, NL_VALIDATE_LIBERAL,
+ extack);
+}
+
+/**
+ * nla_validate - Validate a stream of attributes
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * @extack: extended ACK report struct
+ *
+ * Validates all attributes in the specified attribute stream against the
+ * specified policy. Validation is done in strict mode.
+ * See documenation of struct nla_policy for more details.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+static inline int nla_validate(const struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_validate(head, len, maxtype, policy, NL_VALIDATE_STRICT,
+ extack);
+}
+
+/**
+ * nlmsg_validate_deprecated - validate a netlink message including attributes
+ * @nlh: netlinket message header
+ * @hdrlen: length of familiy specific header
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * @extack: extended ACK report struct
+ */
+static inline int nlmsg_validate_deprecated(const struct nlmsghdr *nlh,
+ int hdrlen, int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
+ return -EINVAL;
+
+ return __nla_validate(nlmsg_attrdata(nlh, hdrlen),
+ nlmsg_attrlen(nlh, hdrlen), maxtype,
+ policy, NL_VALIDATE_LIBERAL, extack);
+}
+
+
+
+/**
+ * nlmsg_report - need to report back to application?
+ * @nlh: netlink message header
+ *
+ * Returns 1 if a report back to the application is requested.
+ */
+static inline int nlmsg_report(const struct nlmsghdr *nlh)
+{
+ return nlh ? !!(nlh->nlmsg_flags & NLM_F_ECHO) : 0;
+}
+
+/**
+ * nlmsg_for_each_attr - iterate over a stream of attributes
+ * @pos: loop counter, set to current attribute
+ * @nlh: netlink message header
+ * @hdrlen: length of familiy specific header
+ * @rem: initialized to len, holds bytes currently remaining in stream
+ */
+#define nlmsg_for_each_attr(pos, nlh, hdrlen, rem) \
+ nla_for_each_attr(pos, nlmsg_attrdata(nlh, hdrlen), \
+ nlmsg_attrlen(nlh, hdrlen), rem)
+
+/**
+ * nlmsg_put - Add a new netlink message to an skb
+ * @skb: socket buffer to store message in
+ * @portid: netlink PORTID of requesting application
+ * @seq: sequence number of message
+ * @type: message type
+ * @payload: length of message payload
+ * @flags: message flags
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the message header and payload.
+ */
+static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
+ int type, int payload, int flags)
+{
+ if (unlikely(skb_tailroom(skb) < nlmsg_total_size(payload)))
+ return NULL;
+
+ return __nlmsg_put(skb, portid, seq, type, payload, flags);
+}
+
+/**
+ * nlmsg_append - Add more data to a nlmsg in a skb
+ * @skb: socket buffer to store message in
+ * @size: length of message payload
+ *
+ * Append data to an existing nlmsg, used when constructing a message
+ * with multiple fixed-format headers (which is rare).
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the extra payload.
+ */
+static inline void *nlmsg_append(struct sk_buff *skb, u32 size)
+{
+ if (unlikely(skb_tailroom(skb) < NLMSG_ALIGN(size)))
+ return NULL;
+
+ if (NLMSG_ALIGN(size) - size)
+ memset(skb_tail_pointer(skb) + size, 0,
+ NLMSG_ALIGN(size) - size);
+ return __skb_put(skb, NLMSG_ALIGN(size));
+}
+
+/**
+ * nlmsg_put_answer - Add a new callback based netlink message to an skb
+ * @skb: socket buffer to store message in
+ * @cb: netlink callback
+ * @type: message type
+ * @payload: length of message payload
+ * @flags: message flags
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the message header and payload.
+ */
+static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ int type, int payload,
+ int flags)
+{
+ return nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ type, payload, flags);
+}
+
+/**
+ * nlmsg_new - Allocate a new netlink message
+ * @payload: size of the message payload
+ * @flags: the type of memory to allocate.
+ *
+ * Use NLMSG_DEFAULT_SIZE if the size of the payload isn't known
+ * and a good default is needed.
+ */
+static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
+{
+ return alloc_skb(nlmsg_total_size(payload), flags);
+}
+
+/**
+ * nlmsg_end - Finalize a netlink message
+ * @skb: socket buffer the message is stored in
+ * @nlh: netlink message header
+ *
+ * Corrects the netlink message header to include the appeneded
+ * attributes. Only necessary if attributes have been added to
+ * the message.
+ */
+static inline void nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ nlh->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
+}
+
+/**
+ * nlmsg_get_pos - return current position in netlink message
+ * @skb: socket buffer the message is stored in
+ *
+ * Returns a pointer to the current tail of the message.
+ */
+static inline void *nlmsg_get_pos(struct sk_buff *skb)
+{
+ return skb_tail_pointer(skb);
+}
+
+/**
+ * nlmsg_trim - Trim message to a mark
+ * @skb: socket buffer the message is stored in
+ * @mark: mark to trim to
+ *
+ * Trims the message to the provided mark.
+ */
+static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
+{
+ if (mark) {
+ WARN_ON((unsigned char *) mark < skb->data);
+ skb_trim(skb, (unsigned char *) mark - skb->data);
+ }
+}
+
+/**
+ * nlmsg_cancel - Cancel construction of a netlink message
+ * @skb: socket buffer the message is stored in
+ * @nlh: netlink message header
+ *
+ * Removes the complete netlink message including all
+ * attributes from the socket buffer again.
+ */
+static inline void nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ nlmsg_trim(skb, nlh);
+}
+
+/**
+ * nlmsg_free - free a netlink message
+ * @skb: socket buffer of netlink message
+ */
+static inline void nlmsg_free(struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+/**
+ * nlmsg_multicast - multicast a netlink message
+ * @sk: netlink socket to spread messages to
+ * @skb: netlink message as socket buffer
+ * @portid: own netlink portid to avoid sending to yourself
+ * @group: multicast group id
+ * @flags: allocation flags
+ */
+static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
+ u32 portid, unsigned int group, gfp_t flags)
+{
+ int err;
+
+ NETLINK_CB(skb).dst_group = group;
+
+ err = netlink_broadcast(sk, skb, portid, group, flags);
+ if (err > 0)
+ err = 0;
+
+ return err;
+}
+
+/**
+ * nlmsg_unicast - unicast a netlink message
+ * @sk: netlink socket to spread message to
+ * @skb: netlink message as socket buffer
+ * @portid: netlink portid of the destination socket
+ */
+static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 portid)
+{
+ int err;
+
+ err = netlink_unicast(sk, skb, portid, MSG_DONTWAIT);
+ if (err > 0)
+ err = 0;
+
+ return err;
+}
+
+/**
+ * nlmsg_for_each_msg - iterate over a stream of messages
+ * @pos: loop counter, set to current message
+ * @head: head of message stream
+ * @len: length of message stream
+ * @rem: initialized to len, holds bytes currently remaining in stream
+ */
+#define nlmsg_for_each_msg(pos, head, len, rem) \
+ for (pos = head, rem = len; \
+ nlmsg_ok(pos, rem); \
+ pos = nlmsg_next(pos, &(rem)))
+
+/**
+ * nl_dump_check_consistent - check if sequence is consistent and advertise if not
+ * @cb: netlink callback structure that stores the sequence number
+ * @nlh: netlink message header to write the flag to
+ *
+ * This function checks if the sequence (generation) number changed during dump
+ * and if it did, advertises it in the netlink message header.
+ *
+ * The correct way to use it is to set cb->seq to the generation counter when
+ * all locks for dumping have been acquired, and then call this function for
+ * each message that is generated.
+ *
+ * Note that due to initialisation concerns, 0 is an invalid sequence number
+ * and must not be used by code that uses this functionality.
+ */
+static inline void
+nl_dump_check_consistent(struct netlink_callback *cb,
+ struct nlmsghdr *nlh)
+{
+ if (cb->prev_seq && cb->seq != cb->prev_seq)
+ nlh->nlmsg_flags |= NLM_F_DUMP_INTR;
+ cb->prev_seq = cb->seq;
+}
+
+/**************************************************************************
+ * Netlink Attributes
+ **************************************************************************/
+
+/**
+ * nla_attr_size - length of attribute not including padding
+ * @payload: length of payload
+ */
+static inline int nla_attr_size(int payload)
+{
+ return NLA_HDRLEN + payload;
+}
+
+/**
+ * nla_total_size - total length of attribute including padding
+ * @payload: length of payload
+ */
+static inline int nla_total_size(int payload)
+{
+ return NLA_ALIGN(nla_attr_size(payload));
+}
+
+/**
+ * nla_padlen - length of padding at the tail of attribute
+ * @payload: length of payload
+ */
+static inline int nla_padlen(int payload)
+{
+ return nla_total_size(payload) - nla_attr_size(payload);
+}
+
+/**
+ * nla_type - attribute type
+ * @nla: netlink attribute
+ */
+static inline int nla_type(const struct nlattr *nla)
+{
+ return nla->nla_type & NLA_TYPE_MASK;
+}
+
+/**
+ * nla_data - head of payload
+ * @nla: netlink attribute
+ */
+static inline void *nla_data(const struct nlattr *nla)
+{
+ return (char *) nla + NLA_HDRLEN;
+}
+
+/**
+ * nla_len - length of payload
+ * @nla: netlink attribute
+ */
+static inline int nla_len(const struct nlattr *nla)
+{
+ return nla->nla_len - NLA_HDRLEN;
+}
+
+/**
+ * nla_ok - check if the netlink attribute fits into the remaining bytes
+ * @nla: netlink attribute
+ * @remaining: number of bytes remaining in attribute stream
+ */
+static inline int nla_ok(const struct nlattr *nla, int remaining)
+{
+ return remaining >= (int) sizeof(*nla) &&
+ nla->nla_len >= sizeof(*nla) &&
+ nla->nla_len <= remaining;
+}
+
+/**
+ * nla_next - next netlink attribute in attribute stream
+ * @nla: netlink attribute
+ * @remaining: number of bytes remaining in attribute stream
+ *
+ * Returns the next netlink attribute in the attribute stream and
+ * decrements remaining by the size of the current attribute.
+ */
+static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
+{
+ unsigned int totlen = NLA_ALIGN(nla->nla_len);
+
+ *remaining -= totlen;
+ return (struct nlattr *) ((char *) nla + totlen);
+}
+
+/**
+ * nla_find_nested - find attribute in a set of nested attributes
+ * @nla: attribute containing the nested attributes
+ * @attrtype: type of attribute to look for
+ *
+ * Returns the first attribute which matches the specified type.
+ */
+static inline struct nlattr *
+nla_find_nested(const struct nlattr *nla, int attrtype)
+{
+ return nla_find(nla_data(nla), nla_len(nla), attrtype);
+}
+
+/**
+ * nla_parse_nested - parse nested attributes
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @nla: attribute containing the nested attributes
+ * @policy: validation policy
+ * @extack: extended ACK report struct
+ *
+ * See nla_parse()
+ */
+static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
+ const struct nlattr *nla,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ if (!(nla->nla_type & NLA_F_NESTED)) {
+ NL_SET_ERR_MSG_ATTR(extack, nla, "NLA_F_NESTED is missing");
+ return -EINVAL;
+ }
+
+ return __nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy,
+ NL_VALIDATE_STRICT, extack);
+}
+
+/**
+ * nla_parse_nested_deprecated - parse nested attributes
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @nla: attribute containing the nested attributes
+ * @policy: validation policy
+ * @extack: extended ACK report struct
+ *
+ * See nla_parse_deprecated()
+ */
+static inline int nla_parse_nested_deprecated(struct nlattr *tb[], int maxtype,
+ const struct nlattr *nla,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy,
+ NL_VALIDATE_LIBERAL, extack);
+}
+
+/**
+ * nla_put_u8 - Add a u8 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
+{
+ /* temporary variables to work around GCC PR81715 with asan-stack=1 */
+ u8 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u8), &tmp);
+}
+
+/**
+ * nla_put_u16 - Add a u16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
+{
+ u16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u16), &tmp);
+}
+
+/**
+ * nla_put_be16 - Add a __be16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
+{
+ __be16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__be16), &tmp);
+}
+
+/**
+ * nla_put_net16 - Add 16-bit network byte order netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
+{
+ __be16 tmp = value;
+
+ return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
+}
+
+/**
+ * nla_put_le16 - Add a __le16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
+{
+ __le16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__le16), &tmp);
+}
+
+/**
+ * nla_put_u32 - Add a u32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
+{
+ u32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u32), &tmp);
+}
+
+/**
+ * nla_put_be32 - Add a __be32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
+{
+ __be32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__be32), &tmp);
+}
+
+/**
+ * nla_put_net32 - Add 32-bit network byte order netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
+{
+ __be32 tmp = value;
+
+ return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
+}
+
+/**
+ * nla_put_le32 - Add a __le32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
+{
+ __le32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__le32), &tmp);
+}
+
+/**
+ * nla_put_u64_64bit - Add a u64 netlink attribute to a skb and align it
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ * @padattr: attribute type for the padding
+ */
+static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
+ u64 value, int padattr)
+{
+ u64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
+}
+
+/**
+ * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer and align it
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ * @padattr: attribute type for the padding
+ */
+static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
+ int padattr)
+{
+ __be64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(__be64), &tmp, padattr);
+}
+
+/**
+ * nla_put_net64 - Add 64-bit network byte order nlattr to a skb and align it
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ * @padattr: attribute type for the padding
+ */
+static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
+ int padattr)
+{
+ __be64 tmp = value;
+
+ return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp,
+ padattr);
+}
+
+/**
+ * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer and align it
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ * @padattr: attribute type for the padding
+ */
+static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
+ int padattr)
+{
+ __le64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(__le64), &tmp, padattr);
+}
+
+/**
+ * nla_put_s8 - Add a s8 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
+{
+ s8 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s8), &tmp);
+}
+
+/**
+ * nla_put_s16 - Add a s16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
+{
+ s16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s16), &tmp);
+}
+
+/**
+ * nla_put_s32 - Add a s32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
+{
+ s32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s32), &tmp);
+}
+
+/**
+ * nla_put_s64 - Add a s64 netlink attribute to a socket buffer and align it
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ * @padattr: attribute type for the padding
+ */
+static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
+ int padattr)
+{
+ s64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr);
+}
+
+/**
+ * nla_put_string - Add a string netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @str: NUL terminated string
+ */
+static inline int nla_put_string(struct sk_buff *skb, int attrtype,
+ const char *str)
+{
+ return nla_put(skb, attrtype, strlen(str) + 1, str);
+}
+
+/**
+ * nla_put_flag - Add a flag netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ */
+static inline int nla_put_flag(struct sk_buff *skb, int attrtype)
+{
+ return nla_put(skb, attrtype, 0, NULL);
+}
+
+/**
+ * nla_put_msecs - Add a msecs netlink attribute to a skb and align it
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @njiffies: number of jiffies to convert to msecs
+ * @padattr: attribute type for the padding
+ */
+static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
+ unsigned long njiffies, int padattr)
+{
+ u64 tmp = jiffies_to_msecs(njiffies);
+
+ return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
+}
+
+/**
+ * nla_put_in_addr - Add an IPv4 address netlink attribute to a socket
+ * buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @addr: IPv4 address
+ */
+static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
+ __be32 addr)
+{
+ __be32 tmp = addr;
+
+ return nla_put_be32(skb, attrtype, tmp);
+}
+
+/**
+ * nla_put_in6_addr - Add an IPv6 address netlink attribute to a socket
+ * buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @addr: IPv6 address
+ */
+static inline int nla_put_in6_addr(struct sk_buff *skb, int attrtype,
+ const struct in6_addr *addr)
+{
+ return nla_put(skb, attrtype, sizeof(*addr), addr);
+}
+
+/**
+ * nla_put_bitfield32 - Add a bitfield32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: value carrying bits
+ * @selector: selector of valid bits
+ */
+static inline int nla_put_bitfield32(struct sk_buff *skb, int attrtype,
+ __u32 value, __u32 selector)
+{
+ struct nla_bitfield32 tmp = { value, selector, };
+
+ return nla_put(skb, attrtype, sizeof(tmp), &tmp);
+}
+
+/**
+ * nla_get_u32 - return payload of u32 attribute
+ * @nla: u32 netlink attribute
+ */
+static inline u32 nla_get_u32(const struct nlattr *nla)
+{
+ return *(u32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_be32 - return payload of __be32 attribute
+ * @nla: __be32 netlink attribute
+ */
+static inline __be32 nla_get_be32(const struct nlattr *nla)
+{
+ return *(__be32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_le32 - return payload of __le32 attribute
+ * @nla: __le32 netlink attribute
+ */
+static inline __le32 nla_get_le32(const struct nlattr *nla)
+{
+ return *(__le32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_u16 - return payload of u16 attribute
+ * @nla: u16 netlink attribute
+ */
+static inline u16 nla_get_u16(const struct nlattr *nla)
+{
+ return *(u16 *) nla_data(nla);
+}
+
+/**
+ * nla_get_be16 - return payload of __be16 attribute
+ * @nla: __be16 netlink attribute
+ */
+static inline __be16 nla_get_be16(const struct nlattr *nla)
+{
+ return *(__be16 *) nla_data(nla);
+}
+
+/**
+ * nla_get_le16 - return payload of __le16 attribute
+ * @nla: __le16 netlink attribute
+ */
+static inline __le16 nla_get_le16(const struct nlattr *nla)
+{
+ return *(__le16 *) nla_data(nla);
+}
+
+/**
+ * nla_get_u8 - return payload of u8 attribute
+ * @nla: u8 netlink attribute
+ */
+static inline u8 nla_get_u8(const struct nlattr *nla)
+{
+ return *(u8 *) nla_data(nla);
+}
+
+/**
+ * nla_get_u64 - return payload of u64 attribute
+ * @nla: u64 netlink attribute
+ */
+static inline u64 nla_get_u64(const struct nlattr *nla)
+{
+ u64 tmp;
+
+ nla_memcpy(&tmp, nla, sizeof(tmp));
+
+ return tmp;
+}
+
+/**
+ * nla_get_be64 - return payload of __be64 attribute
+ * @nla: __be64 netlink attribute
+ */
+static inline __be64 nla_get_be64(const struct nlattr *nla)
+{
+ __be64 tmp;
+
+ nla_memcpy(&tmp, nla, sizeof(tmp));
+
+ return tmp;
+}
+
+/**
+ * nla_get_le64 - return payload of __le64 attribute
+ * @nla: __le64 netlink attribute
+ */
+static inline __le64 nla_get_le64(const struct nlattr *nla)
+{
+ return *(__le64 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s32 - return payload of s32 attribute
+ * @nla: s32 netlink attribute
+ */
+static inline s32 nla_get_s32(const struct nlattr *nla)
+{
+ return *(s32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s16 - return payload of s16 attribute
+ * @nla: s16 netlink attribute
+ */
+static inline s16 nla_get_s16(const struct nlattr *nla)
+{
+ return *(s16 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s8 - return payload of s8 attribute
+ * @nla: s8 netlink attribute
+ */
+static inline s8 nla_get_s8(const struct nlattr *nla)
+{
+ return *(s8 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s64 - return payload of s64 attribute
+ * @nla: s64 netlink attribute
+ */
+static inline s64 nla_get_s64(const struct nlattr *nla)
+{
+ s64 tmp;
+
+ nla_memcpy(&tmp, nla, sizeof(tmp));
+
+ return tmp;
+}
+
+/**
+ * nla_get_flag - return payload of flag attribute
+ * @nla: flag netlink attribute
+ */
+static inline int nla_get_flag(const struct nlattr *nla)
+{
+ return !!nla;
+}
+
+/**
+ * nla_get_msecs - return payload of msecs attribute
+ * @nla: msecs netlink attribute
+ *
+ * Returns the number of milliseconds in jiffies.
+ */
+static inline unsigned long nla_get_msecs(const struct nlattr *nla)
+{
+ u64 msecs = nla_get_u64(nla);
+
+ return msecs_to_jiffies((unsigned long) msecs);
+}
+
+/**
+ * nla_get_in_addr - return payload of IPv4 address attribute
+ * @nla: IPv4 address netlink attribute
+ */
+static inline __be32 nla_get_in_addr(const struct nlattr *nla)
+{
+ return *(__be32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_in6_addr - return payload of IPv6 address attribute
+ * @nla: IPv6 address netlink attribute
+ */
+static inline struct in6_addr nla_get_in6_addr(const struct nlattr *nla)
+{
+ struct in6_addr tmp;
+
+ nla_memcpy(&tmp, nla, sizeof(tmp));
+ return tmp;
+}
+
+/**
+ * nla_get_bitfield32 - return payload of 32 bitfield attribute
+ * @nla: nla_bitfield32 attribute
+ */
+static inline struct nla_bitfield32 nla_get_bitfield32(const struct nlattr *nla)
+{
+ struct nla_bitfield32 tmp;
+
+ nla_memcpy(&tmp, nla, sizeof(tmp));
+ return tmp;
+}
+
+/**
+ * nla_memdup - duplicate attribute memory (kmemdup)
+ * @src: netlink attribute to duplicate from
+ * @gfp: GFP mask
+ */
+static inline void *nla_memdup(const struct nlattr *src, gfp_t gfp)
+{
+ return kmemdup(nla_data(src), nla_len(src), gfp);
+}
+
+/**
+ * nla_nest_start_noflag - Start a new level of nested attributes
+ * @skb: socket buffer to add attributes to
+ * @attrtype: attribute type of container
+ *
+ * This function exists for backward compatibility to use in APIs which never
+ * marked their nest attributes with NLA_F_NESTED flag. New APIs should use
+ * nla_nest_start() which sets the flag.
+ *
+ * Returns the container attribute or NULL on error
+ */
+static inline struct nlattr *nla_nest_start_noflag(struct sk_buff *skb,
+ int attrtype)
+{
+ struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb);
+
+ if (nla_put(skb, attrtype, 0, NULL) < 0)
+ return NULL;
+
+ return start;
+}
+
+/**
+ * nla_nest_start - Start a new level of nested attributes, with NLA_F_NESTED
+ * @skb: socket buffer to add attributes to
+ * @attrtype: attribute type of container
+ *
+ * Unlike nla_nest_start_noflag(), mark the nest attribute with NLA_F_NESTED
+ * flag. This is the preferred function to use in new code.
+ *
+ * Returns the container attribute or NULL on error
+ */
+static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
+{
+ return nla_nest_start_noflag(skb, attrtype | NLA_F_NESTED);
+}
+
+/**
+ * nla_nest_end - Finalize nesting of attributes
+ * @skb: socket buffer the attributes are stored in
+ * @start: container attribute
+ *
+ * Corrects the container attribute header to include the all
+ * appeneded attributes.
+ *
+ * Returns the total data length of the skb.
+ */
+static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
+{
+ start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start;
+ return skb->len;
+}
+
+/**
+ * nla_nest_cancel - Cancel nesting of attributes
+ * @skb: socket buffer the message is stored in
+ * @start: container attribute
+ *
+ * Removes the container attribute and including all nested
+ * attributes. Returns -EMSGSIZE
+ */
+static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
+{
+ nlmsg_trim(skb, start);
+}
+
+/**
+ * __nla_validate_nested - Validate a stream of nested attributes
+ * @start: container attribute
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * @validate: validation strictness
+ * @extack: extended ACK report struct
+ *
+ * Validates all attributes in the nested attribute stream against the
+ * specified policy. Attributes with a type exceeding maxtype will be
+ * ignored. See documenation of struct nla_policy for more details.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+static inline int __nla_validate_nested(const struct nlattr *start, int maxtype,
+ const struct nla_policy *policy,
+ unsigned int validate,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_validate(nla_data(start), nla_len(start), maxtype, policy,
+ validate, extack);
+}
+
+static inline int
+nla_validate_nested(const struct nlattr *start, int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_validate_nested(start, maxtype, policy,
+ NL_VALIDATE_STRICT, extack);
+}
+
+static inline int
+nla_validate_nested_deprecated(const struct nlattr *start, int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_validate_nested(start, maxtype, policy,
+ NL_VALIDATE_LIBERAL, extack);
+}
+
+/**
+ * nla_need_padding_for_64bit - test 64-bit alignment of the next attribute
+ * @skb: socket buffer the message is stored in
+ *
+ * Return true if padding is needed to align the next attribute (nla_data()) to
+ * a 64-bit aligned area.
+ */
+static inline bool nla_need_padding_for_64bit(struct sk_buff *skb)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ /* The nlattr header is 4 bytes in size, that's why we test
+ * if the skb->data _is_ aligned. A NOP attribute, plus
+ * nlattr header for next attribute, will make nla_data()
+ * 8-byte aligned.
+ */
+ if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8))
+ return true;
+#endif
+ return false;
+}
+
+/**
+ * nla_align_64bit - 64-bit align the nla_data() of next attribute
+ * @skb: socket buffer the message is stored in
+ * @padattr: attribute type for the padding
+ *
+ * Conditionally emit a padding netlink attribute in order to make
+ * the next attribute we emit have a 64-bit aligned nla_data() area.
+ * This will only be done in architectures which do not have
+ * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS defined.
+ *
+ * Returns zero on success or a negative error code.
+ */
+static inline int nla_align_64bit(struct sk_buff *skb, int padattr)
+{
+ if (nla_need_padding_for_64bit(skb) &&
+ !nla_reserve(skb, padattr, 0))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+/**
+ * nla_total_size_64bit - total length of attribute including padding
+ * @payload: length of payload
+ */
+static inline int nla_total_size_64bit(int payload)
+{
+ return NLA_ALIGN(nla_attr_size(payload))
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ + NLA_ALIGN(nla_attr_size(0))
+#endif
+ ;
+}
+
+/**
+ * nla_for_each_attr - iterate over a stream of attributes
+ * @pos: loop counter, set to current attribute
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @rem: initialized to len, holds bytes currently remaining in stream
+ */
+#define nla_for_each_attr(pos, head, len, rem) \
+ for (pos = head, rem = len; \
+ nla_ok(pos, rem); \
+ pos = nla_next(pos, &(rem)))
+
+/**
+ * nla_for_each_nested - iterate over nested attributes
+ * @pos: loop counter, set to current attribute
+ * @nla: attribute containing the nested attributes
+ * @rem: initialized to len, holds bytes currently remaining in stream
+ */
+#define nla_for_each_nested(pos, nla, rem) \
+ nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem)
+
+/**
+ * nla_is_last - Test if attribute is last in stream
+ * @nla: attribute to test
+ * @rem: bytes remaining in stream
+ */
+static inline bool nla_is_last(const struct nlattr *nla, int rem)
+{
+ return nla->nla_len == rem;
+}
+
+void nla_get_range_unsigned(const struct nla_policy *pt,
+ struct netlink_range_validation *range);
+void nla_get_range_signed(const struct nla_policy *pt,
+ struct netlink_range_validation_signed *range);
+
+struct netlink_policy_dump_state;
+
+int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
+ const struct nla_policy *policy,
+ unsigned int maxtype);
+int netlink_policy_dump_get_policy_idx(struct netlink_policy_dump_state *state,
+ const struct nla_policy *policy,
+ unsigned int maxtype);
+bool netlink_policy_dump_loop(struct netlink_policy_dump_state *state);
+int netlink_policy_dump_write(struct sk_buff *skb,
+ struct netlink_policy_dump_state *state);
+int netlink_policy_dump_attr_size_estimate(const struct nla_policy *pt);
+int netlink_policy_dump_write_attr(struct sk_buff *skb,
+ const struct nla_policy *pt,
+ int nestattr);
+void netlink_policy_dump_free(struct netlink_policy_dump_state *state);
+
+#endif
diff --git a/include/net/netns/bpf.h b/include/net/netns/bpf.h
new file mode 100644
index 000000000..2c01a278d
--- /dev/null
+++ b/include/net/netns/bpf.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BPF programs attached to network namespace
+ */
+
+#ifndef __NETNS_BPF_H__
+#define __NETNS_BPF_H__
+
+#include <linux/list.h>
+
+struct bpf_prog;
+struct bpf_prog_array;
+
+enum netns_bpf_attach_type {
+ NETNS_BPF_INVALID = -1,
+ NETNS_BPF_FLOW_DISSECTOR = 0,
+ NETNS_BPF_SK_LOOKUP,
+ MAX_NETNS_BPF_ATTACH_TYPE
+};
+
+struct netns_bpf {
+ /* Array of programs to run compiled from progs or links */
+ struct bpf_prog_array __rcu *run_array[MAX_NETNS_BPF_ATTACH_TYPE];
+ struct bpf_prog *progs[MAX_NETNS_BPF_ATTACH_TYPE];
+ struct list_head links[MAX_NETNS_BPF_ATTACH_TYPE];
+};
+
+#endif /* __NETNS_BPF_H__ */
diff --git a/include/net/netns/can.h b/include/net/netns/can.h
new file mode 100644
index 000000000..48b79f7e6
--- /dev/null
+++ b/include/net/netns/can.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * can in net namespaces
+ */
+
+#ifndef __NETNS_CAN_H__
+#define __NETNS_CAN_H__
+
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+
+struct can_dev_rcv_lists;
+struct can_pkg_stats;
+struct can_rcv_lists_stats;
+
+struct netns_can {
+#if IS_ENABLED(CONFIG_PROC_FS)
+ struct proc_dir_entry *proc_dir;
+ struct proc_dir_entry *pde_stats;
+ struct proc_dir_entry *pde_reset_stats;
+ struct proc_dir_entry *pde_rcvlist_all;
+ struct proc_dir_entry *pde_rcvlist_fil;
+ struct proc_dir_entry *pde_rcvlist_inv;
+ struct proc_dir_entry *pde_rcvlist_sff;
+ struct proc_dir_entry *pde_rcvlist_eff;
+ struct proc_dir_entry *pde_rcvlist_err;
+ struct proc_dir_entry *bcmproc_dir;
+#endif
+
+ /* receive filters subscribed for 'all' CAN devices */
+ struct can_dev_rcv_lists *rx_alldev_list;
+ spinlock_t rcvlists_lock;
+ struct timer_list stattimer; /* timer for statistics update */
+ struct can_pkg_stats *pkg_stats;
+ struct can_rcv_lists_stats *rcv_lists_stats;
+
+ /* CAN GW per-net gateway jobs */
+ struct hlist_head cgw_list;
+};
+
+#endif /* __NETNS_CAN_H__ */
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
new file mode 100644
index 000000000..1f463b395
--- /dev/null
+++ b/include/net/netns/conntrack.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_CONNTRACK_H
+#define __NETNS_CONNTRACK_H
+
+#include <linux/list.h>
+#include <linux/list_nulls.h>
+#include <linux/atomic.h>
+#include <linux/workqueue.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+#include <linux/netfilter/nf_conntrack_dccp.h>
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+#include <linux/netfilter/nf_conntrack_sctp.h>
+#endif
+#include <linux/seqlock.h>
+
+struct ctl_table_header;
+struct nf_conntrack_ecache;
+
+struct nf_generic_net {
+ unsigned int timeout;
+};
+
+struct nf_tcp_net {
+ unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX];
+ u8 tcp_loose;
+ u8 tcp_be_liberal;
+ u8 tcp_max_retrans;
+ u8 tcp_ignore_invalid_rst;
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ unsigned int offload_timeout;
+#endif
+};
+
+enum udp_conntrack {
+ UDP_CT_UNREPLIED,
+ UDP_CT_REPLIED,
+ UDP_CT_MAX
+};
+
+struct nf_udp_net {
+ unsigned int timeouts[UDP_CT_MAX];
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ unsigned int offload_timeout;
+#endif
+};
+
+struct nf_icmp_net {
+ unsigned int timeout;
+};
+
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+struct nf_dccp_net {
+ u8 dccp_loose;
+ unsigned int dccp_timeout[CT_DCCP_MAX + 1];
+};
+#endif
+
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+struct nf_sctp_net {
+ unsigned int timeouts[SCTP_CONNTRACK_MAX];
+};
+#endif
+
+#ifdef CONFIG_NF_CT_PROTO_GRE
+enum gre_conntrack {
+ GRE_CT_UNREPLIED,
+ GRE_CT_REPLIED,
+ GRE_CT_MAX
+};
+
+struct nf_gre_net {
+ struct list_head keymap_list;
+ unsigned int timeouts[GRE_CT_MAX];
+};
+#endif
+
+struct nf_ip_net {
+ struct nf_generic_net generic;
+ struct nf_tcp_net tcp;
+ struct nf_udp_net udp;
+ struct nf_icmp_net icmp;
+ struct nf_icmp_net icmpv6;
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ struct nf_dccp_net dccp;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ struct nf_sctp_net sctp;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ struct nf_gre_net gre;
+#endif
+};
+
+struct netns_ct {
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ bool ecache_dwork_pending;
+#endif
+ u8 sysctl_log_invalid; /* Log invalid packets */
+ u8 sysctl_events;
+ u8 sysctl_acct;
+ u8 sysctl_tstamp;
+ u8 sysctl_checksum;
+
+ struct ip_conntrack_stat __percpu *stat;
+ struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
+ struct nf_ip_net nf_ct_proto;
+#if defined(CONFIG_NF_CONNTRACK_LABELS)
+ unsigned int labels_used;
+#endif
+};
+#endif
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
new file mode 100644
index 000000000..8249060cf
--- /dev/null
+++ b/include/net/netns/core.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_CORE_H__
+#define __NETNS_CORE_H__
+
+#include <linux/types.h>
+
+struct ctl_table_header;
+struct prot_inuse;
+
+struct netns_core {
+ /* core sysctls */
+ struct ctl_table_header *sysctl_hdr;
+
+ int sysctl_somaxconn;
+ u8 sysctl_txrehash;
+
+#ifdef CONFIG_PROC_FS
+ struct prot_inuse __percpu *prot_inuse;
+#endif
+};
+
+#endif
diff --git a/include/net/netns/flow_table.h b/include/net/netns/flow_table.h
new file mode 100644
index 000000000..1c5fc657e
--- /dev/null
+++ b/include/net/netns/flow_table.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_FLOW_TABLE_H
+#define __NETNS_FLOW_TABLE_H
+
+struct nf_flow_table_stat {
+ unsigned int count_wq_add;
+ unsigned int count_wq_del;
+ unsigned int count_wq_stats;
+};
+
+struct netns_ft {
+ struct nf_flow_table_stat __percpu *stat;
+};
+#endif
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
new file mode 100644
index 000000000..00c399ede
--- /dev/null
+++ b/include/net/netns/generic.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * generic net pointers
+ */
+
+#ifndef __NET_GENERIC_H__
+#define __NET_GENERIC_H__
+
+#include <linux/bug.h>
+#include <linux/rcupdate.h>
+#include <net/net_namespace.h>
+
+/*
+ * Generic net pointers are to be used by modules to put some private
+ * stuff on the struct net without explicit struct net modification
+ *
+ * The rules are simple:
+ * 1. set pernet_operations->id. After register_pernet_device you
+ * will have the id of your private pointer.
+ * 2. set pernet_operations->size to have the code allocate and free
+ * a private structure pointed to from struct net.
+ * 3. do not change this pointer while the net is alive;
+ * 4. do not try to have any private reference on the net_generic object.
+ *
+ * After accomplishing all of the above, the private pointer can be
+ * accessed with the net_generic() call.
+ */
+
+struct net_generic {
+ union {
+ struct {
+ unsigned int len;
+ struct rcu_head rcu;
+ } s;
+
+ DECLARE_FLEX_ARRAY(void *, ptr);
+ };
+};
+
+static inline void *net_generic(const struct net *net, unsigned int id)
+{
+ struct net_generic *ng;
+ void *ptr;
+
+ rcu_read_lock();
+ ng = rcu_dereference(net->gen);
+ ptr = ng->ptr[id];
+ rcu_read_unlock();
+
+ return ptr;
+}
+#endif
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
new file mode 100644
index 000000000..d9b665151
--- /dev/null
+++ b/include/net/netns/hash.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_NS_HASH_H__
+#define __NET_NS_HASH_H__
+
+#include <net/net_namespace.h>
+
+static inline u32 net_hash_mix(const struct net *net)
+{
+ return net->hash_mix;
+}
+#endif
diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h
new file mode 100644
index 000000000..95406e134
--- /dev/null
+++ b/include/net/netns/ieee802154_6lowpan.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ieee802154 6lowpan in net namespaces
+ */
+
+#include <net/inet_frag.h>
+
+#ifndef __NETNS_IEEE802154_6LOWPAN_H__
+#define __NETNS_IEEE802154_6LOWPAN_H__
+
+struct netns_sysctl_lowpan {
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *frags_hdr;
+#endif
+};
+
+struct netns_ieee802154_lowpan {
+ struct netns_sysctl_lowpan sysctl;
+ struct fqdir *fqdir;
+};
+
+#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
new file mode 100644
index 000000000..ede2ff1da
--- /dev/null
+++ b/include/net/netns/ipv4.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ipv4 in net namespaces
+ */
+
+#ifndef __NETNS_IPV4_H__
+#define __NETNS_IPV4_H__
+
+#include <linux/uidgid.h>
+#include <net/inet_frag.h>
+#include <linux/rcupdate.h>
+#include <linux/seqlock.h>
+#include <linux/siphash.h>
+
+struct ctl_table_header;
+struct ipv4_devconf;
+struct fib_rules_ops;
+struct hlist_head;
+struct fib_table;
+struct sock;
+struct local_ports {
+ seqlock_t lock;
+ int range[2];
+ bool warned;
+};
+
+struct ping_group_range {
+ seqlock_t lock;
+ kgid_t range[2];
+};
+
+struct inet_hashinfo;
+
+struct inet_timewait_death_row {
+ refcount_t tw_refcount;
+
+ /* Padding to avoid false sharing, tw_refcount can be often written */
+ struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp;
+ int sysctl_max_tw_buckets;
+};
+
+struct tcp_fastopen_context;
+
+struct netns_ipv4 {
+ struct inet_timewait_death_row tcp_death_row;
+
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *forw_hdr;
+ struct ctl_table_header *frags_hdr;
+ struct ctl_table_header *ipv4_hdr;
+ struct ctl_table_header *route_hdr;
+ struct ctl_table_header *xfrm4_hdr;
+#endif
+ struct ipv4_devconf *devconf_all;
+ struct ipv4_devconf *devconf_dflt;
+ struct ip_ra_chain __rcu *ra_chain;
+ struct mutex ra_mutex;
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+ struct fib_rules_ops *rules_ops;
+ struct fib_table __rcu *fib_main;
+ struct fib_table __rcu *fib_default;
+ unsigned int fib_rules_require_fldissect;
+ bool fib_has_custom_rules;
+#endif
+ bool fib_has_custom_local_routes;
+ bool fib_offload_disabled;
+ u8 sysctl_tcp_shrink_window;
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ atomic_t fib_num_tclassid_users;
+#endif
+ struct hlist_head *fib_table_hash;
+ struct sock *fibnl;
+
+ struct sock *mc_autojoin_sk;
+
+ struct inet_peer_base *peers;
+ struct fqdir *fqdir;
+
+ u8 sysctl_icmp_echo_ignore_all;
+ u8 sysctl_icmp_echo_enable_probe;
+ u8 sysctl_icmp_echo_ignore_broadcasts;
+ u8 sysctl_icmp_ignore_bogus_error_responses;
+ u8 sysctl_icmp_errors_use_inbound_ifaddr;
+ int sysctl_icmp_ratelimit;
+ int sysctl_icmp_ratemask;
+
+ u32 ip_rt_min_pmtu;
+ int ip_rt_mtu_expires;
+ int ip_rt_min_advmss;
+
+ struct local_ports ip_local_ports;
+
+ u8 sysctl_tcp_ecn;
+ u8 sysctl_tcp_ecn_fallback;
+
+ u8 sysctl_ip_default_ttl;
+ u8 sysctl_ip_no_pmtu_disc;
+ u8 sysctl_ip_fwd_use_pmtu;
+ u8 sysctl_ip_fwd_update_priority;
+ u8 sysctl_ip_nonlocal_bind;
+ u8 sysctl_ip_autobind_reuse;
+ /* Shall we try to damage output packets if routing dev changes? */
+ u8 sysctl_ip_dynaddr;
+ u8 sysctl_ip_early_demux;
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ u8 sysctl_raw_l3mdev_accept;
+#endif
+ u8 sysctl_tcp_early_demux;
+ u8 sysctl_udp_early_demux;
+
+ u8 sysctl_nexthop_compat_mode;
+
+ u8 sysctl_fwmark_reflect;
+ u8 sysctl_tcp_fwmark_accept;
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ u8 sysctl_tcp_l3mdev_accept;
+#endif
+ u8 sysctl_tcp_mtu_probing;
+ int sysctl_tcp_mtu_probe_floor;
+ int sysctl_tcp_base_mss;
+ int sysctl_tcp_min_snd_mss;
+ int sysctl_tcp_probe_threshold;
+ u32 sysctl_tcp_probe_interval;
+
+ int sysctl_tcp_keepalive_time;
+ int sysctl_tcp_keepalive_intvl;
+ u8 sysctl_tcp_keepalive_probes;
+
+ u8 sysctl_tcp_syn_retries;
+ u8 sysctl_tcp_synack_retries;
+ u8 sysctl_tcp_syncookies;
+ u8 sysctl_tcp_migrate_req;
+ u8 sysctl_tcp_comp_sack_nr;
+ int sysctl_tcp_reordering;
+ u8 sysctl_tcp_retries1;
+ u8 sysctl_tcp_retries2;
+ u8 sysctl_tcp_orphan_retries;
+ u8 sysctl_tcp_tw_reuse;
+ int sysctl_tcp_fin_timeout;
+ unsigned int sysctl_tcp_notsent_lowat;
+ u8 sysctl_tcp_sack;
+ u8 sysctl_tcp_window_scaling;
+ u8 sysctl_tcp_timestamps;
+ u8 sysctl_tcp_early_retrans;
+ u8 sysctl_tcp_recovery;
+ u8 sysctl_tcp_thin_linear_timeouts;
+ u8 sysctl_tcp_slow_start_after_idle;
+ u8 sysctl_tcp_retrans_collapse;
+ u8 sysctl_tcp_stdurg;
+ u8 sysctl_tcp_rfc1337;
+ u8 sysctl_tcp_abort_on_overflow;
+ u8 sysctl_tcp_fack; /* obsolete */
+ int sysctl_tcp_max_reordering;
+ int sysctl_tcp_adv_win_scale;
+ u8 sysctl_tcp_dsack;
+ u8 sysctl_tcp_app_win;
+ u8 sysctl_tcp_frto;
+ u8 sysctl_tcp_nometrics_save;
+ u8 sysctl_tcp_no_ssthresh_metrics_save;
+ u8 sysctl_tcp_moderate_rcvbuf;
+ u8 sysctl_tcp_tso_win_divisor;
+ u8 sysctl_tcp_workaround_signed_windows;
+ int sysctl_tcp_limit_output_bytes;
+ int sysctl_tcp_challenge_ack_limit;
+ int sysctl_tcp_min_rtt_wlen;
+ u8 sysctl_tcp_min_tso_segs;
+ u8 sysctl_tcp_tso_rtt_log;
+ u8 sysctl_tcp_autocorking;
+ u8 sysctl_tcp_reflect_tos;
+ int sysctl_tcp_invalid_ratelimit;
+ int sysctl_tcp_pacing_ss_ratio;
+ int sysctl_tcp_pacing_ca_ratio;
+ int sysctl_tcp_wmem[3];
+ int sysctl_tcp_rmem[3];
+ unsigned int sysctl_tcp_child_ehash_entries;
+ unsigned long sysctl_tcp_comp_sack_delay_ns;
+ unsigned long sysctl_tcp_comp_sack_slack_ns;
+ int sysctl_max_syn_backlog;
+ int sysctl_tcp_fastopen;
+ const struct tcp_congestion_ops __rcu *tcp_congestion_control;
+ struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
+ unsigned int sysctl_tcp_fastopen_blackhole_timeout;
+ atomic_t tfo_active_disable_times;
+ unsigned long tfo_active_disable_stamp;
+ u32 tcp_challenge_timestamp;
+ u32 tcp_challenge_count;
+
+ int sysctl_udp_wmem_min;
+ int sysctl_udp_rmem_min;
+
+ u8 sysctl_fib_notify_on_flag_change;
+
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ u8 sysctl_udp_l3mdev_accept;
+#endif
+
+ u8 sysctl_igmp_llm_reports;
+ int sysctl_igmp_max_memberships;
+ int sysctl_igmp_max_msf;
+ int sysctl_igmp_qrv;
+
+ struct ping_group_range ping_group_range;
+
+ atomic_t dev_addr_genid;
+
+#ifdef CONFIG_SYSCTL
+ unsigned long *sysctl_local_reserved_ports;
+ int sysctl_ip_prot_sock;
+#endif
+
+#ifdef CONFIG_IP_MROUTE
+#ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+ struct mr_table *mrt;
+#else
+ struct list_head mr_tables;
+ struct fib_rules_ops *mr_rules_ops;
+#endif
+#endif
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ u32 sysctl_fib_multipath_hash_fields;
+ u8 sysctl_fib_multipath_use_neigh;
+ u8 sysctl_fib_multipath_hash_policy;
+#endif
+
+ struct fib_notifier_ops *notifier_ops;
+ unsigned int fib_seq; /* protected by rtnl_mutex */
+
+ struct fib_notifier_ops *ipmr_notifier_ops;
+ unsigned int ipmr_seq; /* protected by rtnl_mutex */
+
+ atomic_t rt_genid;
+ siphash_key_t ip_id_key;
+};
+#endif
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
new file mode 100644
index 000000000..f6e6a3ab9
--- /dev/null
+++ b/include/net/netns/ipv6.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ipv6 in net namespaces
+ */
+
+#include <net/inet_frag.h>
+
+#ifndef __NETNS_IPV6_H__
+#define __NETNS_IPV6_H__
+#include <net/dst_ops.h>
+#include <uapi/linux/icmpv6.h>
+
+struct ctl_table_header;
+
+struct netns_sysctl_ipv6 {
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *hdr;
+ struct ctl_table_header *route_hdr;
+ struct ctl_table_header *icmp_hdr;
+ struct ctl_table_header *frags_hdr;
+ struct ctl_table_header *xfrm6_hdr;
+#endif
+ int flush_delay;
+ int ip6_rt_max_size;
+ int ip6_rt_gc_min_interval;
+ int ip6_rt_gc_timeout;
+ int ip6_rt_gc_interval;
+ int ip6_rt_gc_elasticity;
+ int ip6_rt_mtu_expires;
+ int ip6_rt_min_advmss;
+ u32 multipath_hash_fields;
+ u8 multipath_hash_policy;
+ u8 bindv6only;
+ u8 flowlabel_consistency;
+ u8 auto_flowlabels;
+ int icmpv6_time;
+ u8 icmpv6_echo_ignore_all;
+ u8 icmpv6_echo_ignore_multicast;
+ u8 icmpv6_echo_ignore_anycast;
+ DECLARE_BITMAP(icmpv6_ratemask, ICMPV6_MSG_MAX + 1);
+ unsigned long *icmpv6_ratemask_ptr;
+ u8 anycast_src_echo_reply;
+ u8 ip_nonlocal_bind;
+ u8 fwmark_reflect;
+ u8 flowlabel_state_ranges;
+ int idgen_retries;
+ int idgen_delay;
+ int flowlabel_reflect;
+ int max_dst_opts_cnt;
+ int max_hbh_opts_cnt;
+ int max_dst_opts_len;
+ int max_hbh_opts_len;
+ int seg6_flowlabel;
+ u32 ioam6_id;
+ u64 ioam6_id_wide;
+ int skip_notify_on_dev_down;
+ u8 fib_notify_on_flag_change;
+};
+
+struct netns_ipv6 {
+ /* Keep ip6_dst_ops at the beginning of netns_sysctl_ipv6 */
+ struct dst_ops ip6_dst_ops;
+
+ struct netns_sysctl_ipv6 sysctl;
+ struct ipv6_devconf *devconf_all;
+ struct ipv6_devconf *devconf_dflt;
+ struct inet_peer_base *peers;
+ struct fqdir *fqdir;
+ struct fib6_info *fib6_null_entry;
+ struct rt6_info *ip6_null_entry;
+ struct rt6_statistics *rt6_stats;
+ struct timer_list ip6_fib_timer;
+ struct hlist_head *fib_table_hash;
+ struct fib6_table *fib6_main_tbl;
+ struct list_head fib6_walkers;
+ rwlock_t fib6_walker_lock;
+ spinlock_t fib6_gc_lock;
+ atomic_t ip6_rt_gc_expire;
+ unsigned long ip6_rt_last_gc;
+ unsigned char flowlabel_has_excl;
+#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ bool fib6_has_custom_rules;
+ unsigned int fib6_rules_require_fldissect;
+#ifdef CONFIG_IPV6_SUBTREES
+ unsigned int fib6_routes_require_src;
+#endif
+ struct rt6_info *ip6_prohibit_entry;
+ struct rt6_info *ip6_blk_hole_entry;
+ struct fib6_table *fib6_local_tbl;
+ struct fib_rules_ops *fib6_rules_ops;
+#endif
+ struct sock *ndisc_sk;
+ struct sock *tcp_sk;
+ struct sock *igmp_sk;
+ struct sock *mc_autojoin_sk;
+
+ struct hlist_head *inet6_addr_lst;
+ spinlock_t addrconf_hash_lock;
+ struct delayed_work addr_chk_work;
+
+#ifdef CONFIG_IPV6_MROUTE
+#ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
+ struct mr_table *mrt6;
+#else
+ struct list_head mr6_tables;
+ struct fib_rules_ops *mr6_rules_ops;
+#endif
+#endif
+ atomic_t dev_addr_genid;
+ atomic_t fib6_sernum;
+ struct seg6_pernet_data *seg6_data;
+ struct fib_notifier_ops *notifier_ops;
+ struct fib_notifier_ops *ip6mr_notifier_ops;
+ unsigned int ipmr_seq; /* protected by rtnl_mutex */
+ struct {
+ struct hlist_head head;
+ spinlock_t lock;
+ u32 seq;
+ } ip6addrlbl_table;
+ struct ioam6_pernet_data *ioam6_data;
+};
+
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+struct netns_nf_frag {
+ struct fqdir *fqdir;
+};
+#endif
+
+#endif
diff --git a/include/net/netns/mctp.h b/include/net/netns/mctp.h
new file mode 100644
index 000000000..1db8f9aad
--- /dev/null
+++ b/include/net/netns/mctp.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MCTP per-net structures
+ */
+
+#ifndef __NETNS_MCTP_H__
+#define __NETNS_MCTP_H__
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+struct netns_mctp {
+ /* Only updated under RTNL, entries freed via RCU */
+ struct list_head routes;
+
+ /* Bound sockets: list of sockets bound by type.
+ * This list is updated from non-atomic contexts (under bind_lock),
+ * and read (under rcu) in packet rx
+ */
+ struct mutex bind_lock;
+ struct hlist_head binds;
+
+ /* tag allocations. This list is read and updated from atomic contexts,
+ * but elements are free()ed after a RCU grace-period
+ */
+ spinlock_t keys_lock;
+ struct hlist_head keys;
+
+ /* MCTP network */
+ unsigned int default_net;
+
+ /* neighbour table */
+ struct mutex neigh_lock;
+ struct list_head neighbours;
+};
+
+#endif /* __NETNS_MCTP_H__ */
diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h
new file mode 100644
index 000000000..7e373664b
--- /dev/null
+++ b/include/net/netns/mib.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_MIB_H__
+#define __NETNS_MIB_H__
+
+#include <net/snmp.h>
+
+struct netns_mib {
+ DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics);
+#if IS_ENABLED(CONFIG_IPV6)
+ DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics);
+#endif
+
+ DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
+ DEFINE_SNMP_STAT(struct linux_mib, net_statistics);
+
+ DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
+#if IS_ENABLED(CONFIG_IPV6)
+ DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6);
+#endif
+
+#ifdef CONFIG_XFRM_STATISTICS
+ DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics);
+#endif
+#if IS_ENABLED(CONFIG_TLS)
+ DEFINE_SNMP_STAT(struct linux_tls_mib, tls_statistics);
+#endif
+#ifdef CONFIG_MPTCP
+ DEFINE_SNMP_STAT(struct mptcp_mib, mptcp_statistics);
+#endif
+
+ DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics);
+#if IS_ENABLED(CONFIG_IPV6)
+ DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6);
+#endif
+
+ DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
+ DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics);
+#if IS_ENABLED(CONFIG_IPV6)
+ DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
+ DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics);
+ struct proc_dir_entry *proc_net_devsnmp6;
+#endif
+};
+
+#endif
diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h
new file mode 100644
index 000000000..19ad2574b
--- /dev/null
+++ b/include/net/netns/mpls.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mpls in net namespaces
+ */
+
+#ifndef __NETNS_MPLS_H__
+#define __NETNS_MPLS_H__
+
+#include <linux/types.h>
+
+struct mpls_route;
+struct ctl_table_header;
+
+struct netns_mpls {
+ int ip_ttl_propagate;
+ int default_ttl;
+ size_t platform_labels;
+ struct mpls_route __rcu * __rcu *platform_label;
+
+ struct ctl_table_header *ctl;
+};
+
+#endif /* __NETNS_MPLS_H__ */
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
new file mode 100644
index 000000000..02bbdc577
--- /dev/null
+++ b/include/net/netns/netfilter.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_NETFILTER_H
+#define __NETNS_NETFILTER_H
+
+#include <linux/netfilter_defs.h>
+
+struct proc_dir_entry;
+struct nf_logger;
+struct nf_queue_handler;
+
+struct netns_nf {
+#if defined CONFIG_PROC_FS
+ struct proc_dir_entry *proc_netfilter;
+#endif
+ const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *nf_log_dir_header;
+#endif
+ struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS];
+ struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS];
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+ struct nf_hook_entries __rcu *hooks_arp[NF_ARP_NUMHOOKS];
+#endif
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+ struct nf_hook_entries __rcu *hooks_bridge[NF_INET_NUMHOOKS];
+#endif
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
+ unsigned int defrag_ipv4_users;
+#endif
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+ unsigned int defrag_ipv6_users;
+#endif
+};
+#endif
diff --git a/include/net/netns/nexthop.h b/include/net/netns/nexthop.h
new file mode 100644
index 000000000..434239b37
--- /dev/null
+++ b/include/net/netns/nexthop.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * nexthops in net namespaces
+ */
+
+#ifndef __NETNS_NEXTHOP_H__
+#define __NETNS_NEXTHOP_H__
+
+#include <linux/notifier.h>
+#include <linux/rbtree.h>
+
+struct netns_nexthop {
+ struct rb_root rb_root; /* tree of nexthops by id */
+ struct hlist_head *devhash; /* nexthops by device */
+
+ unsigned int seq; /* protected by rtnl_mutex */
+ u32 last_id_allocated;
+ struct blocking_notifier_head notifier_chain;
+};
+#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
new file mode 100644
index 000000000..8c77832d0
--- /dev/null
+++ b/include/net/netns/nftables.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NETNS_NFTABLES_H_
+#define _NETNS_NFTABLES_H_
+
+#include <linux/list.h>
+
+struct netns_nftables {
+ u8 gencursor;
+};
+
+#endif
diff --git a/include/net/netns/packet.h b/include/net/netns/packet.h
new file mode 100644
index 000000000..aae69bb43
--- /dev/null
+++ b/include/net/netns/packet.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Packet network namespace
+ */
+#ifndef __NETNS_PACKET_H__
+#define __NETNS_PACKET_H__
+
+#include <linux/rculist.h>
+#include <linux/mutex.h>
+
+struct netns_packet {
+ struct mutex sklist_lock;
+ struct hlist_head sklist;
+};
+
+#endif /* __NETNS_PACKET_H__ */
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
new file mode 100644
index 000000000..a681147ae
--- /dev/null
+++ b/include/net/netns/sctp.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_SCTP_H__
+#define __NETNS_SCTP_H__
+
+#include <linux/timer.h>
+#include <net/snmp.h>
+
+struct sock;
+struct proc_dir_entry;
+struct sctp_mib;
+struct ctl_table_header;
+
+struct netns_sctp {
+ DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics);
+
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc_net_sctp;
+#endif
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *sysctl_header;
+#endif
+ /* This is the global socket data structure used for responding to
+ * the Out-of-the-blue (OOTB) packets. A control sock will be created
+ * for this socket at the initialization time.
+ */
+ struct sock *ctl_sock;
+
+ /* UDP tunneling listening sock. */
+ struct sock *udp4_sock;
+ struct sock *udp6_sock;
+ /* UDP tunneling listening port. */
+ int udp_port;
+ /* UDP tunneling remote encap port. */
+ int encap_port;
+
+ /* This is the global local address list.
+ * We actively maintain this complete list of addresses on
+ * the system by catching address add/delete events.
+ *
+ * It is a list of sctp_sockaddr_entry.
+ */
+ struct list_head local_addr_list;
+ struct list_head addr_waitq;
+ struct timer_list addr_wq_timer;
+ struct list_head auto_asconf_splist;
+ /* Lock that protects both addr_waitq and auto_asconf_splist */
+ spinlock_t addr_wq_lock;
+
+ /* Lock that protects the local_addr_list writers */
+ spinlock_t local_addr_lock;
+
+ /* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
+ *
+ * The following protocol parameters are RECOMMENDED:
+ *
+ * RTO.Initial - 3 seconds
+ * RTO.Min - 1 second
+ * RTO.Max - 60 seconds
+ * RTO.Alpha - 1/8 (3 when converted to right shifts.)
+ * RTO.Beta - 1/4 (2 when converted to right shifts.)
+ */
+ unsigned int rto_initial;
+ unsigned int rto_min;
+ unsigned int rto_max;
+
+ /* Note: rto_alpha and rto_beta are really defined as inverse
+ * powers of two to facilitate integer operations.
+ */
+ int rto_alpha;
+ int rto_beta;
+
+ /* Max.Burst - 4 */
+ int max_burst;
+
+ /* Whether Cookie Preservative is enabled(1) or not(0) */
+ int cookie_preserve_enable;
+
+ /* The namespace default hmac alg */
+ char *sctp_hmac_alg;
+
+ /* Valid.Cookie.Life - 60 seconds */
+ unsigned int valid_cookie_life;
+
+ /* Delayed SACK timeout 200ms default*/
+ unsigned int sack_timeout;
+
+ /* HB.interval - 30 seconds */
+ unsigned int hb_interval;
+
+ /* The interval for PLPMTUD probe timer */
+ unsigned int probe_interval;
+
+ /* Association.Max.Retrans - 10 attempts
+ * Path.Max.Retrans - 5 attempts (per destination address)
+ * Max.Init.Retransmits - 8 attempts
+ */
+ int max_retrans_association;
+ int max_retrans_path;
+ int max_retrans_init;
+ /* Potentially-Failed.Max.Retrans sysctl value
+ * taken from:
+ * http://tools.ietf.org/html/draft-nishida-tsvwg-sctp-failover-05
+ */
+ int pf_retrans;
+
+ /* Primary.Switchover.Max.Retrans sysctl value
+ * taken from:
+ * https://tools.ietf.org/html/rfc7829
+ */
+ int ps_retrans;
+
+ /*
+ * Disable Potentially-Failed feature, the feature is enabled by default
+ * pf_enable - 0 : disable pf
+ * - >0 : enable pf
+ */
+ int pf_enable;
+
+ /*
+ * Disable Potentially-Failed state exposure, ignored by default
+ * pf_expose - 0 : compatible with old applications (by default)
+ * - 1 : disable pf state exposure
+ * - 2 : enable pf state exposure
+ */
+ int pf_expose;
+
+ /*
+ * Policy for preforming sctp/socket accounting
+ * 0 - do socket level accounting, all assocs share sk_sndbuf
+ * 1 - do sctp accounting, each asoc may use sk_sndbuf bytes
+ */
+ int sndbuf_policy;
+
+ /*
+ * Policy for preforming sctp/socket accounting
+ * 0 - do socket level accounting, all assocs share sk_rcvbuf
+ * 1 - do sctp accounting, each asoc may use sk_rcvbuf bytes
+ */
+ int rcvbuf_policy;
+
+ int default_auto_asconf;
+
+ /* Flag to indicate if addip is enabled. */
+ int addip_enable;
+ int addip_noauth;
+
+ /* Flag to indicate if PR-SCTP is enabled. */
+ int prsctp_enable;
+
+ /* Flag to indicate if PR-CONFIG is enabled. */
+ int reconf_enable;
+
+ /* Flag to indicate if SCTP-AUTH is enabled */
+ int auth_enable;
+
+ /* Flag to indicate if stream interleave is enabled */
+ int intl_enable;
+
+ /* Flag to indicate if ecn is enabled */
+ int ecn_enable;
+
+ /*
+ * Policy to control SCTP IPv4 address scoping
+ * 0 - Disable IPv4 address scoping
+ * 1 - Enable IPv4 address scoping
+ * 2 - Selectively allow only IPv4 private addresses
+ * 3 - Selectively allow only IPv4 link local address
+ */
+ int scope_policy;
+
+ /* Threshold for rwnd update SACKS. Receive buffer shifted this many
+ * bits is an indicator of when to send and window update SACK.
+ */
+ int rwnd_upd_shift;
+
+ /* Threshold for autoclose timeout, in seconds. */
+ unsigned long max_autoclose;
+};
+
+#endif /* __NETNS_SCTP_H__ */
diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h
new file mode 100644
index 000000000..582212ada
--- /dev/null
+++ b/include/net/netns/smc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_SMC_H__
+#define __NETNS_SMC_H__
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+
+struct smc_stats_rsn;
+struct smc_stats;
+struct netns_smc {
+ /* per cpu counters for SMC */
+ struct smc_stats __percpu *smc_stats;
+ /* protect fback_rsn */
+ struct mutex mutex_fback_rsn;
+ struct smc_stats_rsn *fback_rsn;
+
+ bool limit_smc_hs; /* constraint on handshake */
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *smc_hdr;
+#endif
+ unsigned int sysctl_autocorking_size;
+ unsigned int sysctl_smcr_buf_type;
+ int sysctl_smcr_testlink_time;
+ int sysctl_wmem;
+ int sysctl_rmem;
+};
+#endif
diff --git a/include/net/netns/unix.h b/include/net/netns/unix.h
new file mode 100644
index 000000000..9859d134d
--- /dev/null
+++ b/include/net/netns/unix.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Unix network namespace
+ */
+#ifndef __NETNS_UNIX_H__
+#define __NETNS_UNIX_H__
+
+#include <linux/spinlock.h>
+
+struct unix_table {
+ spinlock_t *locks;
+ struct hlist_head *buckets;
+};
+
+struct ctl_table_header;
+struct netns_unix {
+ struct unix_table table;
+ int sysctl_max_dgram_qlen;
+ struct ctl_table_header *ctl;
+};
+
+#endif /* __NETNS_UNIX_H__ */
diff --git a/include/net/netns/xdp.h b/include/net/netns/xdp.h
new file mode 100644
index 000000000..e5734261b
--- /dev/null
+++ b/include/net/netns/xdp.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_XDP_H__
+#define __NETNS_XDP_H__
+
+#include <linux/rculist.h>
+#include <linux/mutex.h>
+
+struct netns_xdp {
+ struct mutex lock;
+ struct hlist_head list;
+};
+
+#endif /* __NETNS_XDP_H__ */
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
new file mode 100644
index 000000000..423b52eca
--- /dev/null
+++ b/include/net/netns/xfrm.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_XFRM_H
+#define __NETNS_XFRM_H
+
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/rhashtable-types.h>
+#include <linux/xfrm.h>
+#include <net/dst_ops.h>
+
+struct ctl_table_header;
+
+struct xfrm_policy_hash {
+ struct hlist_head __rcu *table;
+ unsigned int hmask;
+ u8 dbits4;
+ u8 sbits4;
+ u8 dbits6;
+ u8 sbits6;
+};
+
+struct xfrm_policy_hthresh {
+ struct work_struct work;
+ seqlock_t lock;
+ u8 lbits4;
+ u8 rbits4;
+ u8 lbits6;
+ u8 rbits6;
+};
+
+struct netns_xfrm {
+ struct list_head state_all;
+ /*
+ * Hash table to find appropriate SA towards given target (endpoint of
+ * tunnel or destination of transport mode) allowed by selector.
+ *
+ * Main use is finding SA after policy selected tunnel or transport
+ * mode. Also, it can be used by ah/esp icmp error handler to find
+ * offending SA.
+ */
+ struct hlist_head __rcu *state_bydst;
+ struct hlist_head __rcu *state_bysrc;
+ struct hlist_head __rcu *state_byspi;
+ struct hlist_head __rcu *state_byseq;
+ unsigned int state_hmask;
+ unsigned int state_num;
+ struct work_struct state_hash_work;
+
+ struct list_head policy_all;
+ struct hlist_head *policy_byidx;
+ unsigned int policy_idx_hmask;
+ unsigned int idx_generator;
+ struct hlist_head policy_inexact[XFRM_POLICY_MAX];
+ struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX];
+ unsigned int policy_count[XFRM_POLICY_MAX * 2];
+ struct work_struct policy_hash_work;
+ struct xfrm_policy_hthresh policy_hthresh;
+ struct list_head inexact_bins;
+
+
+ struct sock *nlsk;
+ struct sock *nlsk_stash;
+
+ u32 sysctl_aevent_etime;
+ u32 sysctl_aevent_rseqth;
+ int sysctl_larval_drop;
+ u32 sysctl_acq_expires;
+
+ u8 policy_default[XFRM_POLICY_MAX];
+
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *sysctl_hdr;
+#endif
+
+ struct dst_ops xfrm4_dst_ops;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct dst_ops xfrm6_dst_ops;
+#endif
+ spinlock_t xfrm_state_lock;
+ seqcount_spinlock_t xfrm_state_hash_generation;
+ seqcount_spinlock_t xfrm_policy_hash_generation;
+
+ spinlock_t xfrm_policy_lock;
+ struct mutex xfrm_cfg_mutex;
+};
+
+#endif
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
new file mode 100644
index 000000000..dec7522b6
--- /dev/null
+++ b/include/net/netprio_cgroup.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * netprio_cgroup.h Control Group Priority set
+ *
+ * Authors: Neil Horman <nhorman@tuxdriver.com>
+ */
+
+#ifndef _NETPRIO_CGROUP_H
+#define _NETPRIO_CGROUP_H
+
+#include <linux/cgroup.h>
+#include <linux/hardirq.h>
+#include <linux/rcupdate.h>
+
+#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
+struct netprio_map {
+ struct rcu_head rcu;
+ u32 priomap_len;
+ u32 priomap[];
+};
+
+static inline u32 task_netprioidx(struct task_struct *p)
+{
+ struct cgroup_subsys_state *css;
+ u32 idx;
+
+ rcu_read_lock();
+ css = task_css(p, net_prio_cgrp_id);
+ idx = css->id;
+ rcu_read_unlock();
+ return idx;
+}
+
+static inline void sock_update_netprioidx(struct sock_cgroup_data *skcd)
+{
+ if (in_interrupt())
+ return;
+
+ sock_cgroup_set_prioidx(skcd, task_netprioidx(current));
+}
+
+#else /* !CONFIG_CGROUP_NET_PRIO */
+
+static inline u32 task_netprioidx(struct task_struct *p)
+{
+ return 0;
+}
+
+static inline void sock_update_netprioidx(struct sock_cgroup_data *skcd)
+{
+}
+
+#endif /* CONFIG_CGROUP_NET_PRIO */
+#endif /* _NET_CLS_CGROUP_H */
diff --git a/include/net/netrom.h b/include/net/netrom.h
new file mode 100644
index 000000000..f0565a598
--- /dev/null
+++ b/include/net/netrom.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Declarations of NET/ROM type objects.
+ *
+ * Jonathan Naylor G4KLX 9/4/95
+ */
+
+#ifndef _NETROM_H
+#define _NETROM_H
+
+#include <linux/netrom.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <net/sock.h>
+#include <linux/refcount.h>
+#include <linux/seq_file.h>
+#include <net/ax25.h>
+
+#define NR_NETWORK_LEN 15
+#define NR_TRANSPORT_LEN 5
+
+#define NR_PROTO_IP 0x0C
+
+#define NR_PROTOEXT 0x00
+#define NR_CONNREQ 0x01
+#define NR_CONNACK 0x02
+#define NR_DISCREQ 0x03
+#define NR_DISCACK 0x04
+#define NR_INFO 0x05
+#define NR_INFOACK 0x06
+#define NR_RESET 0x07
+
+#define NR_CHOKE_FLAG 0x80
+#define NR_NAK_FLAG 0x40
+#define NR_MORE_FLAG 0x20
+
+/* Define Link State constants. */
+enum {
+ NR_STATE_0,
+ NR_STATE_1,
+ NR_STATE_2,
+ NR_STATE_3
+};
+
+#define NR_COND_ACK_PENDING 0x01
+#define NR_COND_REJECT 0x02
+#define NR_COND_PEER_RX_BUSY 0x04
+#define NR_COND_OWN_RX_BUSY 0x08
+
+#define NR_DEFAULT_T1 120000 /* Outstanding frames - 120 seconds */
+#define NR_DEFAULT_T2 5000 /* Response delay - 5 seconds */
+#define NR_DEFAULT_N2 3 /* Number of Retries - 3 */
+#define NR_DEFAULT_T4 180000 /* Busy Delay - 180 seconds */
+#define NR_DEFAULT_IDLE 0 /* No Activity Timeout - none */
+#define NR_DEFAULT_WINDOW 4 /* Default Window Size - 4 */
+#define NR_DEFAULT_OBS 6 /* Default Obsolescence Count - 6 */
+#define NR_DEFAULT_QUAL 10 /* Default Neighbour Quality - 10 */
+#define NR_DEFAULT_TTL 16 /* Default Time To Live - 16 */
+#define NR_DEFAULT_ROUTING 1 /* Is routing enabled ? */
+#define NR_DEFAULT_FAILS 2 /* Link fails until route fails */
+#define NR_DEFAULT_RESET 0 /* Sent / accept reset cmds? */
+
+#define NR_MODULUS 256
+#define NR_MAX_WINDOW_SIZE 127 /* Maximum Window Allowable - 127 */
+#define NR_MAX_PACKET_SIZE 236 /* Maximum Packet Length - 236 */
+
+struct nr_sock {
+ struct sock sock;
+ ax25_address user_addr, source_addr, dest_addr;
+ struct net_device *device;
+ unsigned char my_index, my_id;
+ unsigned char your_index, your_id;
+ unsigned char state, condition, bpqext, window;
+ unsigned short vs, vr, va, vl;
+ unsigned char n2, n2count;
+ unsigned long t1, t2, t4, idle;
+ unsigned short fraglen;
+ struct timer_list t1timer;
+ struct timer_list t2timer;
+ struct timer_list t4timer;
+ struct timer_list idletimer;
+ struct sk_buff_head ack_queue;
+ struct sk_buff_head reseq_queue;
+ struct sk_buff_head frag_queue;
+};
+
+#define nr_sk(sk) ((struct nr_sock *)(sk))
+
+struct nr_neigh {
+ struct hlist_node neigh_node;
+ ax25_address callsign;
+ ax25_digi *digipeat;
+ ax25_cb *ax25;
+ struct net_device *dev;
+ unsigned char quality;
+ unsigned char locked;
+ unsigned short count;
+ unsigned int number;
+ unsigned char failed;
+ refcount_t refcount;
+};
+
+struct nr_route {
+ unsigned char quality;
+ unsigned char obs_count;
+ struct nr_neigh *neighbour;
+};
+
+struct nr_node {
+ struct hlist_node node_node;
+ ax25_address callsign;
+ char mnemonic[7];
+ unsigned char which;
+ unsigned char count;
+ struct nr_route routes[3];
+ refcount_t refcount;
+ spinlock_t node_lock;
+};
+
+/*********************************************************************
+ * nr_node & nr_neigh lists, refcounting and locking
+ *********************************************************************/
+
+#define nr_node_hold(__nr_node) \
+ refcount_inc(&((__nr_node)->refcount))
+
+static __inline__ void nr_node_put(struct nr_node *nr_node)
+{
+ if (refcount_dec_and_test(&nr_node->refcount)) {
+ kfree(nr_node);
+ }
+}
+
+#define nr_neigh_hold(__nr_neigh) \
+ refcount_inc(&((__nr_neigh)->refcount))
+
+static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh)
+{
+ if (refcount_dec_and_test(&nr_neigh->refcount)) {
+ if (nr_neigh->ax25)
+ ax25_cb_put(nr_neigh->ax25);
+ kfree(nr_neigh->digipeat);
+ kfree(nr_neigh);
+ }
+}
+
+/* nr_node_lock and nr_node_unlock also hold/put the node's refcounter.
+ */
+static __inline__ void nr_node_lock(struct nr_node *nr_node)
+{
+ nr_node_hold(nr_node);
+ spin_lock_bh(&nr_node->node_lock);
+}
+
+static __inline__ void nr_node_unlock(struct nr_node *nr_node)
+{
+ spin_unlock_bh(&nr_node->node_lock);
+ nr_node_put(nr_node);
+}
+
+#define nr_neigh_for_each(__nr_neigh, list) \
+ hlist_for_each_entry(__nr_neigh, list, neigh_node)
+
+#define nr_neigh_for_each_safe(__nr_neigh, node2, list) \
+ hlist_for_each_entry_safe(__nr_neigh, node2, list, neigh_node)
+
+#define nr_node_for_each(__nr_node, list) \
+ hlist_for_each_entry(__nr_node, list, node_node)
+
+#define nr_node_for_each_safe(__nr_node, node2, list) \
+ hlist_for_each_entry_safe(__nr_node, node2, list, node_node)
+
+
+/*********************************************************************/
+
+/* af_netrom.c */
+extern int sysctl_netrom_default_path_quality;
+extern int sysctl_netrom_obsolescence_count_initialiser;
+extern int sysctl_netrom_network_ttl_initialiser;
+extern int sysctl_netrom_transport_timeout;
+extern int sysctl_netrom_transport_maximum_tries;
+extern int sysctl_netrom_transport_acknowledge_delay;
+extern int sysctl_netrom_transport_busy_delay;
+extern int sysctl_netrom_transport_requested_window_size;
+extern int sysctl_netrom_transport_no_activity_timeout;
+extern int sysctl_netrom_routing_control;
+extern int sysctl_netrom_link_fails_count;
+extern int sysctl_netrom_reset_circuit;
+
+int nr_rx_frame(struct sk_buff *, struct net_device *);
+void nr_destroy_socket(struct sock *);
+
+/* nr_dev.c */
+int nr_rx_ip(struct sk_buff *, struct net_device *);
+void nr_setup(struct net_device *);
+
+/* nr_in.c */
+int nr_process_rx_frame(struct sock *, struct sk_buff *);
+
+/* nr_loopback.c */
+void nr_loopback_init(void);
+void nr_loopback_clear(void);
+int nr_loopback_queue(struct sk_buff *);
+
+/* nr_out.c */
+void nr_output(struct sock *, struct sk_buff *);
+void nr_send_nak_frame(struct sock *);
+void nr_kick(struct sock *);
+void nr_transmit_buffer(struct sock *, struct sk_buff *);
+void nr_establish_data_link(struct sock *);
+void nr_enquiry_response(struct sock *);
+void nr_check_iframes_acked(struct sock *, unsigned short);
+
+/* nr_route.c */
+void nr_rt_device_down(struct net_device *);
+struct net_device *nr_dev_first(void);
+struct net_device *nr_dev_get(ax25_address *);
+int nr_rt_ioctl(unsigned int, void __user *);
+void nr_link_failed(ax25_cb *, int);
+int nr_route_frame(struct sk_buff *, ax25_cb *);
+extern const struct seq_operations nr_node_seqops;
+extern const struct seq_operations nr_neigh_seqops;
+void nr_rt_free(void);
+
+/* nr_subr.c */
+void nr_clear_queues(struct sock *);
+void nr_frames_acked(struct sock *, unsigned short);
+void nr_requeue_frames(struct sock *);
+int nr_validate_nr(struct sock *, unsigned short);
+int nr_in_rx_window(struct sock *, unsigned short);
+void nr_write_internal(struct sock *, int);
+
+void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags);
+
+/*
+ * This routine is called when a Connect Acknowledge with the Choke Flag
+ * set is needed to refuse a connection.
+ */
+#define nr_transmit_refusal(skb, mine) \
+do { \
+ __nr_transmit_reply((skb), (mine), NR_CONNACK | NR_CHOKE_FLAG); \
+} while (0)
+
+/*
+ * This routine is called when we don't have a circuit matching an incoming
+ * NET/ROM packet. This is an G8PZT Xrouter extension.
+ */
+#define nr_transmit_reset(skb, mine) \
+do { \
+ __nr_transmit_reply((skb), (mine), NR_RESET); \
+} while (0)
+
+void nr_disconnect(struct sock *, int);
+
+/* nr_timer.c */
+void nr_init_timers(struct sock *sk);
+void nr_start_heartbeat(struct sock *);
+void nr_start_t1timer(struct sock *);
+void nr_start_t2timer(struct sock *);
+void nr_start_t4timer(struct sock *);
+void nr_start_idletimer(struct sock *);
+void nr_stop_heartbeat(struct sock *);
+void nr_stop_t1timer(struct sock *);
+void nr_stop_t2timer(struct sock *);
+void nr_stop_t4timer(struct sock *);
+void nr_stop_idletimer(struct sock *);
+int nr_t1timer_running(struct sock *);
+
+/* sysctl_net_netrom.c */
+int nr_register_sysctl(void);
+void nr_unregister_sysctl(void);
+
+#endif
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
new file mode 100644
index 000000000..2b12725de
--- /dev/null
+++ b/include/net/nexthop.h
@@ -0,0 +1,556 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Generic nexthop implementation
+ *
+ * Copyright (c) 2017-19 Cumulus Networks
+ * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
+ */
+
+#ifndef __LINUX_NEXTHOP_H
+#define __LINUX_NEXTHOP_H
+
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/route.h>
+#include <linux/types.h>
+#include <net/ip_fib.h>
+#include <net/ip6_fib.h>
+#include <net/netlink.h>
+
+#define NEXTHOP_VALID_USER_FLAGS RTNH_F_ONLINK
+
+struct nexthop;
+
+struct nh_config {
+ u32 nh_id;
+
+ u8 nh_family;
+ u8 nh_protocol;
+ u8 nh_blackhole;
+ u8 nh_fdb;
+ u32 nh_flags;
+
+ int nh_ifindex;
+ struct net_device *dev;
+
+ union {
+ __be32 ipv4;
+ struct in6_addr ipv6;
+ } gw;
+
+ struct nlattr *nh_grp;
+ u16 nh_grp_type;
+ u16 nh_grp_res_num_buckets;
+ unsigned long nh_grp_res_idle_timer;
+ unsigned long nh_grp_res_unbalanced_timer;
+ bool nh_grp_res_has_num_buckets;
+ bool nh_grp_res_has_idle_timer;
+ bool nh_grp_res_has_unbalanced_timer;
+
+ struct nlattr *nh_encap;
+ u16 nh_encap_type;
+
+ u32 nlflags;
+ struct nl_info nlinfo;
+};
+
+struct nh_info {
+ struct hlist_node dev_hash; /* entry on netns devhash */
+ struct nexthop *nh_parent;
+
+ u8 family;
+ bool reject_nh;
+ bool fdb_nh;
+
+ union {
+ struct fib_nh_common fib_nhc;
+ struct fib_nh fib_nh;
+ struct fib6_nh fib6_nh;
+ };
+};
+
+struct nh_res_bucket {
+ struct nh_grp_entry __rcu *nh_entry;
+ atomic_long_t used_time;
+ unsigned long migrated_time;
+ bool occupied;
+ u8 nh_flags;
+};
+
+struct nh_res_table {
+ struct net *net;
+ u32 nhg_id;
+ struct delayed_work upkeep_dw;
+
+ /* List of NHGEs that have too few buckets ("uw" for underweight).
+ * Reclaimed buckets will be given to entries in this list.
+ */
+ struct list_head uw_nh_entries;
+ unsigned long unbalanced_since;
+
+ u32 idle_timer;
+ u32 unbalanced_timer;
+
+ u16 num_nh_buckets;
+ struct nh_res_bucket nh_buckets[];
+};
+
+struct nh_grp_entry {
+ struct nexthop *nh;
+ u8 weight;
+
+ union {
+ struct {
+ atomic_t upper_bound;
+ } hthr;
+ struct {
+ /* Member on uw_nh_entries. */
+ struct list_head uw_nh_entry;
+
+ u16 count_buckets;
+ u16 wants_buckets;
+ } res;
+ };
+
+ struct list_head nh_list;
+ struct nexthop *nh_parent; /* nexthop of group with this entry */
+};
+
+struct nh_group {
+ struct nh_group *spare; /* spare group for removals */
+ u16 num_nh;
+ bool is_multipath;
+ bool hash_threshold;
+ bool resilient;
+ bool fdb_nh;
+ bool has_v4;
+
+ struct nh_res_table __rcu *res_table;
+ struct nh_grp_entry nh_entries[];
+};
+
+struct nexthop {
+ struct rb_node rb_node; /* entry on netns rbtree */
+ struct list_head fi_list; /* v4 entries using nh */
+ struct list_head f6i_list; /* v6 entries using nh */
+ struct list_head fdb_list; /* fdb entries using this nh */
+ struct list_head grp_list; /* nh group entries using this nh */
+ struct net *net;
+
+ u32 id;
+
+ u8 protocol; /* app managing this nh */
+ u8 nh_flags;
+ bool is_group;
+
+ refcount_t refcnt;
+ struct rcu_head rcu;
+
+ union {
+ struct nh_info __rcu *nh_info;
+ struct nh_group __rcu *nh_grp;
+ };
+};
+
+enum nexthop_event_type {
+ NEXTHOP_EVENT_DEL,
+ NEXTHOP_EVENT_REPLACE,
+ NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
+ NEXTHOP_EVENT_BUCKET_REPLACE,
+};
+
+enum nh_notifier_info_type {
+ NH_NOTIFIER_INFO_TYPE_SINGLE,
+ NH_NOTIFIER_INFO_TYPE_GRP,
+ NH_NOTIFIER_INFO_TYPE_RES_TABLE,
+ NH_NOTIFIER_INFO_TYPE_RES_BUCKET,
+};
+
+struct nh_notifier_single_info {
+ struct net_device *dev;
+ u8 gw_family;
+ union {
+ __be32 ipv4;
+ struct in6_addr ipv6;
+ };
+ u8 is_reject:1,
+ is_fdb:1,
+ has_encap:1;
+};
+
+struct nh_notifier_grp_entry_info {
+ u8 weight;
+ u32 id;
+ struct nh_notifier_single_info nh;
+};
+
+struct nh_notifier_grp_info {
+ u16 num_nh;
+ bool is_fdb;
+ struct nh_notifier_grp_entry_info nh_entries[];
+};
+
+struct nh_notifier_res_bucket_info {
+ u16 bucket_index;
+ unsigned int idle_timer_ms;
+ bool force;
+ struct nh_notifier_single_info old_nh;
+ struct nh_notifier_single_info new_nh;
+};
+
+struct nh_notifier_res_table_info {
+ u16 num_nh_buckets;
+ struct nh_notifier_single_info nhs[];
+};
+
+struct nh_notifier_info {
+ struct net *net;
+ struct netlink_ext_ack *extack;
+ u32 id;
+ enum nh_notifier_info_type type;
+ union {
+ struct nh_notifier_single_info *nh;
+ struct nh_notifier_grp_info *nh_grp;
+ struct nh_notifier_res_table_info *nh_res_table;
+ struct nh_notifier_res_bucket_info *nh_res_bucket;
+ };
+};
+
+int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
+int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
+void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap);
+void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
+ bool offload, bool trap);
+void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
+ unsigned long *activity);
+
+/* caller is holding rcu or rtnl; no reference taken to nexthop */
+struct nexthop *nexthop_find_by_id(struct net *net, u32 id);
+void nexthop_free_rcu(struct rcu_head *head);
+
+static inline bool nexthop_get(struct nexthop *nh)
+{
+ return refcount_inc_not_zero(&nh->refcnt);
+}
+
+static inline void nexthop_put(struct nexthop *nh)
+{
+ if (refcount_dec_and_test(&nh->refcnt))
+ call_rcu(&nh->rcu, nexthop_free_rcu);
+}
+
+static inline bool nexthop_cmp(const struct nexthop *nh1,
+ const struct nexthop *nh2)
+{
+ return nh1 == nh2;
+}
+
+static inline bool nexthop_is_fdb(const struct nexthop *nh)
+{
+ if (nh->is_group) {
+ const struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ return nh_grp->fdb_nh;
+ } else {
+ const struct nh_info *nhi;
+
+ nhi = rcu_dereference_rtnl(nh->nh_info);
+ return nhi->fdb_nh;
+ }
+}
+
+static inline bool nexthop_has_v4(const struct nexthop *nh)
+{
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ return nh_grp->has_v4;
+ }
+ return false;
+}
+
+static inline bool nexthop_is_multipath(const struct nexthop *nh)
+{
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ return nh_grp->is_multipath;
+ }
+ return false;
+}
+
+struct nexthop *nexthop_select_path(struct nexthop *nh, int hash);
+
+static inline unsigned int nexthop_num_path(const struct nexthop *nh)
+{
+ unsigned int rc = 1;
+
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ if (nh_grp->is_multipath)
+ rc = nh_grp->num_nh;
+ }
+
+ return rc;
+}
+
+static inline
+struct nexthop *nexthop_mpath_select(const struct nh_group *nhg, int nhsel)
+{
+ /* for_nexthops macros in fib_semantics.c grabs a pointer to
+ * the nexthop before checking nhsel
+ */
+ if (nhsel >= nhg->num_nh)
+ return NULL;
+
+ return nhg->nh_entries[nhsel].nh;
+}
+
+static inline
+int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
+ u8 rt_family)
+{
+ struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
+ int i;
+
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nexthop *nhe = nhg->nh_entries[i].nh;
+ struct nh_info *nhi = rcu_dereference_rtnl(nhe->nh_info);
+ struct fib_nh_common *nhc = &nhi->fib_nhc;
+ int weight = nhg->nh_entries[i].weight;
+
+ if (fib_add_nexthop(skb, nhc, weight, rt_family, 0) < 0)
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+/* called with rcu lock */
+static inline bool nexthop_is_blackhole(const struct nexthop *nh)
+{
+ const struct nh_info *nhi;
+
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ if (nh_grp->num_nh > 1)
+ return false;
+
+ nh = nh_grp->nh_entries[0].nh;
+ }
+
+ nhi = rcu_dereference_rtnl(nh->nh_info);
+ return nhi->reject_nh;
+}
+
+static inline void nexthop_path_fib_result(struct fib_result *res, int hash)
+{
+ struct nh_info *nhi;
+ struct nexthop *nh;
+
+ nh = nexthop_select_path(res->fi->nh, hash);
+ nhi = rcu_dereference(nh->nh_info);
+ res->nhc = &nhi->fib_nhc;
+}
+
+/* called with rcu read lock or rtnl held */
+static inline
+struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel)
+{
+ struct nh_info *nhi;
+
+ BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0);
+ BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0);
+
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ if (nh_grp->is_multipath) {
+ nh = nexthop_mpath_select(nh_grp, nhsel);
+ if (!nh)
+ return NULL;
+ }
+ }
+
+ nhi = rcu_dereference_rtnl(nh->nh_info);
+ return &nhi->fib_nhc;
+}
+
+/* called from fib_table_lookup with rcu_lock */
+static inline
+struct fib_nh_common *nexthop_get_nhc_lookup(const struct nexthop *nh,
+ int fib_flags,
+ const struct flowi4 *flp,
+ int *nhsel)
+{
+ struct nh_info *nhi;
+
+ if (nh->is_group) {
+ struct nh_group *nhg = rcu_dereference(nh->nh_grp);
+ int i;
+
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nexthop *nhe = nhg->nh_entries[i].nh;
+
+ nhi = rcu_dereference(nhe->nh_info);
+ if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) {
+ *nhsel = i;
+ return &nhi->fib_nhc;
+ }
+ }
+ } else {
+ nhi = rcu_dereference(nh->nh_info);
+ if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) {
+ *nhsel = 0;
+ return &nhi->fib_nhc;
+ }
+ }
+
+ return NULL;
+}
+
+static inline bool nexthop_uses_dev(const struct nexthop *nh,
+ const struct net_device *dev)
+{
+ struct nh_info *nhi;
+
+ if (nh->is_group) {
+ struct nh_group *nhg = rcu_dereference(nh->nh_grp);
+ int i;
+
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nexthop *nhe = nhg->nh_entries[i].nh;
+
+ nhi = rcu_dereference(nhe->nh_info);
+ if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
+ return true;
+ }
+ } else {
+ nhi = rcu_dereference(nh->nh_info);
+ if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
+ return true;
+ }
+
+ return false;
+}
+
+static inline unsigned int fib_info_num_path(const struct fib_info *fi)
+{
+ if (unlikely(fi->nh))
+ return nexthop_num_path(fi->nh);
+
+ return fi->fib_nhs;
+}
+
+int fib_check_nexthop(struct nexthop *nh, u8 scope,
+ struct netlink_ext_ack *extack);
+
+static inline struct fib_nh_common *fib_info_nhc(struct fib_info *fi, int nhsel)
+{
+ if (unlikely(fi->nh))
+ return nexthop_fib_nhc(fi->nh, nhsel);
+
+ return &fi->fib_nh[nhsel].nh_common;
+}
+
+/* only used when fib_nh is built into fib_info */
+static inline struct fib_nh *fib_info_nh(struct fib_info *fi, int nhsel)
+{
+ WARN_ON(fi->nh);
+
+ return &fi->fib_nh[nhsel];
+}
+
+/*
+ * IPv6 variants
+ */
+int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
+ struct netlink_ext_ack *extack);
+
+/* Caller should either hold rcu_read_lock(), or RTNL. */
+static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
+{
+ struct nh_info *nhi;
+
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ nh = nexthop_mpath_select(nh_grp, 0);
+ if (!nh)
+ return NULL;
+ }
+
+ nhi = rcu_dereference_rtnl(nh->nh_info);
+ if (nhi->family == AF_INET6)
+ return &nhi->fib6_nh;
+
+ return NULL;
+}
+
+static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i)
+{
+ struct fib6_nh *fib6_nh;
+
+ fib6_nh = f6i->nh ? nexthop_fib6_nh(f6i->nh) : f6i->fib6_nh;
+ return fib6_nh->fib_nh_dev;
+}
+
+static inline void nexthop_path_fib6_result(struct fib6_result *res, int hash)
+{
+ struct nexthop *nh = res->f6i->nh;
+ struct nh_info *nhi;
+
+ nh = nexthop_select_path(nh, hash);
+
+ nhi = rcu_dereference_rtnl(nh->nh_info);
+ if (nhi->reject_nh) {
+ res->fib6_type = RTN_BLACKHOLE;
+ res->fib6_flags |= RTF_REJECT;
+ res->nh = nexthop_fib6_nh(nh);
+ } else {
+ res->nh = &nhi->fib6_nh;
+ }
+}
+
+int nexthop_for_each_fib6_nh(struct nexthop *nh,
+ int (*cb)(struct fib6_nh *nh, void *arg),
+ void *arg);
+
+static inline int nexthop_get_family(struct nexthop *nh)
+{
+ struct nh_info *nhi = rcu_dereference_rtnl(nh->nh_info);
+
+ return nhi->family;
+}
+
+static inline
+struct fib_nh_common *nexthop_fdb_nhc(struct nexthop *nh)
+{
+ struct nh_info *nhi = rcu_dereference_rtnl(nh->nh_info);
+
+ return &nhi->fib_nhc;
+}
+
+static inline struct fib_nh_common *nexthop_path_fdb_result(struct nexthop *nh,
+ int hash)
+{
+ struct nh_info *nhi;
+ struct nexthop *nhp;
+
+ nhp = nexthop_select_path(nh, hash);
+ if (unlikely(!nhp))
+ return NULL;
+ nhi = rcu_dereference(nhp->nh_info);
+ return &nhi->fib_nhc;
+}
+#endif
diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h
new file mode 100644
index 000000000..bb3e8fdc0
--- /dev/null
+++ b/include/net/nfc/digital.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ */
+
+#ifndef __NFC_DIGITAL_H
+#define __NFC_DIGITAL_H
+
+#include <linux/skbuff.h>
+#include <net/nfc/nfc.h>
+
+/**
+ * Configuration types for in_configure_hw and tg_configure_hw.
+ */
+enum {
+ NFC_DIGITAL_CONFIG_RF_TECH = 0,
+ NFC_DIGITAL_CONFIG_FRAMING,
+};
+
+/**
+ * RF technology values passed as param argument to in_configure_hw and
+ * tg_configure_hw for NFC_DIGITAL_CONFIG_RF_TECH configuration type.
+ */
+enum {
+ NFC_DIGITAL_RF_TECH_106A = 0,
+ NFC_DIGITAL_RF_TECH_212F,
+ NFC_DIGITAL_RF_TECH_424F,
+ NFC_DIGITAL_RF_TECH_ISO15693,
+ NFC_DIGITAL_RF_TECH_106B,
+
+ NFC_DIGITAL_RF_TECH_LAST,
+};
+
+/**
+ * Framing configuration passed as param argument to in_configure_hw and
+ * tg_configure_hw for NFC_DIGITAL_CONFIG_FRAMING configuration type.
+ */
+enum {
+ NFC_DIGITAL_FRAMING_NFCA_SHORT = 0,
+ NFC_DIGITAL_FRAMING_NFCA_STANDARD,
+ NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A,
+ NFC_DIGITAL_FRAMING_NFCA_ANTICOL_COMPLETE,
+
+ NFC_DIGITAL_FRAMING_NFCA_T1T,
+ NFC_DIGITAL_FRAMING_NFCA_T2T,
+ NFC_DIGITAL_FRAMING_NFCA_T4T,
+ NFC_DIGITAL_FRAMING_NFCA_NFC_DEP,
+
+ NFC_DIGITAL_FRAMING_NFCF,
+ NFC_DIGITAL_FRAMING_NFCF_T3T,
+ NFC_DIGITAL_FRAMING_NFCF_NFC_DEP,
+ NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED,
+
+ NFC_DIGITAL_FRAMING_ISO15693_INVENTORY,
+ NFC_DIGITAL_FRAMING_ISO15693_T5T,
+
+ NFC_DIGITAL_FRAMING_NFCB,
+ NFC_DIGITAL_FRAMING_NFCB_T4T,
+
+ NFC_DIGITAL_FRAMING_LAST,
+};
+
+#define DIGITAL_MDAA_NFCID1_SIZE 3
+
+struct digital_tg_mdaa_params {
+ u16 sens_res;
+ u8 nfcid1[DIGITAL_MDAA_NFCID1_SIZE];
+ u8 sel_res;
+
+ u8 nfcid2[NFC_NFCID2_MAXSIZE];
+ u16 sc;
+};
+
+struct nfc_digital_dev;
+
+/**
+ * nfc_digital_cmd_complete_t - Definition of command result callback
+ *
+ * @ddev: nfc_digital_device ref
+ * @arg: user data
+ * @resp: response data
+ *
+ * resp pointer can be an error code and will be checked with IS_ERR() macro.
+ * The callback is responsible for freeing resp sk_buff.
+ */
+typedef void (*nfc_digital_cmd_complete_t)(struct nfc_digital_dev *ddev,
+ void *arg, struct sk_buff *resp);
+
+/**
+ * Device side NFC Digital operations
+ *
+ * Initiator mode:
+ * @in_configure_hw: Hardware configuration for RF technology and communication
+ * framing in initiator mode. This is a synchronous function.
+ * @in_send_cmd: Initiator mode data exchange using RF technology and framing
+ * previously set with in_configure_hw. The peer response is returned
+ * through callback cb. If an io error occurs or the peer didn't reply
+ * within the specified timeout (ms), the error code is passed back through
+ * the resp pointer. This is an asynchronous function.
+ *
+ * Target mode: Only NFC-DEP protocol is supported in target mode.
+ * @tg_configure_hw: Hardware configuration for RF technology and communication
+ * framing in target mode. This is a synchronous function.
+ * @tg_send_cmd: Target mode data exchange using RF technology and framing
+ * previously set with tg_configure_hw. The peer next command is returned
+ * through callback cb. If an io error occurs or the peer didn't reply
+ * within the specified timeout (ms), the error code is passed back through
+ * the resp pointer. This is an asynchronous function.
+ * @tg_listen: Put the device in listen mode waiting for data from the peer
+ * device. This is an asynchronous function.
+ * @tg_listen_mdaa: If supported, put the device in automatic listen mode with
+ * mode detection and automatic anti-collision. In this mode, the device
+ * automatically detects the RF technology and executes the anti-collision
+ * detection using the command responses specified in mdaa_params. The
+ * mdaa_params structure contains SENS_RES, NFCID1, and SEL_RES for 106A RF
+ * tech. NFCID2 and system code (sc) for 212F and 424F. The driver returns
+ * the NFC-DEP ATR_REQ command through cb. The digital stack deducts the RF
+ * tech by analyzing the SoD of the frame containing the ATR_REQ command.
+ * This is an asynchronous function.
+ * @tg_listen_md: If supported, put the device in automatic listen mode with
+ * mode detection but without automatic anti-collision. In this mode, the
+ * device automatically detects the RF technology. What the actual
+ * RF technology is can be retrieved by calling @tg_get_rf_tech.
+ * The digital stack will then perform the appropriate anti-collision
+ * sequence. This is an asynchronous function.
+ * @tg_get_rf_tech: Required when @tg_listen_md is supported, unused otherwise.
+ * Return the RF Technology that was detected by the @tg_listen_md call.
+ * This is a synchronous function.
+ *
+ * @switch_rf: Turns device radio on or off. The stack does not call explicitly
+ * switch_rf to turn the radio on. A call to in|tg_configure_hw must turn
+ * the device radio on.
+ * @abort_cmd: Discard the last sent command.
+ *
+ * Notes: Asynchronous functions have a timeout parameter. It is the driver
+ * responsibility to call the digital stack back through the
+ * nfc_digital_cmd_complete_t callback when no RF respsonse has been
+ * received within the specified time (in milliseconds). In that case the
+ * driver must set the resp sk_buff to ERR_PTR(-ETIMEDOUT).
+ * Since the digital stack serializes commands to be sent, it's mandatory
+ * for the driver to handle the timeout correctly. Otherwise the stack
+ * would not be able to send new commands, waiting for the reply of the
+ * current one.
+ */
+struct nfc_digital_ops {
+ int (*in_configure_hw)(struct nfc_digital_dev *ddev, int type,
+ int param);
+ int (*in_send_cmd)(struct nfc_digital_dev *ddev, struct sk_buff *skb,
+ u16 timeout, nfc_digital_cmd_complete_t cb,
+ void *arg);
+
+ int (*tg_configure_hw)(struct nfc_digital_dev *ddev, int type,
+ int param);
+ int (*tg_send_cmd)(struct nfc_digital_dev *ddev, struct sk_buff *skb,
+ u16 timeout, nfc_digital_cmd_complete_t cb,
+ void *arg);
+ int (*tg_listen)(struct nfc_digital_dev *ddev, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg);
+ int (*tg_listen_mdaa)(struct nfc_digital_dev *ddev,
+ struct digital_tg_mdaa_params *mdaa_params,
+ u16 timeout, nfc_digital_cmd_complete_t cb,
+ void *arg);
+ int (*tg_listen_md)(struct nfc_digital_dev *ddev, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg);
+ int (*tg_get_rf_tech)(struct nfc_digital_dev *ddev, u8 *rf_tech);
+
+ int (*switch_rf)(struct nfc_digital_dev *ddev, bool on);
+ void (*abort_cmd)(struct nfc_digital_dev *ddev);
+};
+
+#define NFC_DIGITAL_POLL_MODE_COUNT_MAX 6 /* 106A, 212F, and 424F in & tg */
+
+typedef int (*digital_poll_t)(struct nfc_digital_dev *ddev, u8 rf_tech);
+
+struct digital_poll_tech {
+ u8 rf_tech;
+ digital_poll_t poll_func;
+};
+
+/**
+ * Driver capabilities - bit mask made of the following values
+ *
+ * @NFC_DIGITAL_DRV_CAPS_IN_CRC: The driver handles CRC calculation in initiator
+ * mode.
+ * @NFC_DIGITAL_DRV_CAPS_TG_CRC: The driver handles CRC calculation in target
+ * mode.
+ */
+#define NFC_DIGITAL_DRV_CAPS_IN_CRC 0x0001
+#define NFC_DIGITAL_DRV_CAPS_TG_CRC 0x0002
+
+struct nfc_digital_dev {
+ struct nfc_dev *nfc_dev;
+ const struct nfc_digital_ops *ops;
+
+ u32 protocols;
+
+ int tx_headroom;
+ int tx_tailroom;
+
+ u32 driver_capabilities;
+ void *driver_data;
+
+ struct digital_poll_tech poll_techs[NFC_DIGITAL_POLL_MODE_COUNT_MAX];
+ u8 poll_tech_count;
+ u8 poll_tech_index;
+ struct mutex poll_lock;
+
+ struct work_struct cmd_work;
+ struct work_struct cmd_complete_work;
+ struct list_head cmd_queue;
+ struct mutex cmd_lock;
+
+ struct delayed_work poll_work;
+
+ u8 curr_protocol;
+ u8 curr_rf_tech;
+ u8 curr_nfc_dep_pni;
+ u8 did;
+ u16 dep_rwt;
+
+ u8 local_payload_max;
+ u8 remote_payload_max;
+
+ struct sk_buff *chaining_skb;
+ struct digital_data_exch *data_exch;
+
+ int atn_count;
+ int nack_count;
+
+ struct sk_buff *saved_skb;
+
+ u16 target_fsc;
+
+ int (*skb_check_crc)(struct sk_buff *skb);
+ void (*skb_add_crc)(struct sk_buff *skb);
+};
+
+struct nfc_digital_dev *nfc_digital_allocate_device(const struct nfc_digital_ops *ops,
+ __u32 supported_protocols,
+ __u32 driver_capabilities,
+ int tx_headroom,
+ int tx_tailroom);
+void nfc_digital_free_device(struct nfc_digital_dev *ndev);
+int nfc_digital_register_device(struct nfc_digital_dev *ndev);
+void nfc_digital_unregister_device(struct nfc_digital_dev *ndev);
+
+static inline void nfc_digital_set_parent_dev(struct nfc_digital_dev *ndev,
+ struct device *dev)
+{
+ nfc_set_parent_dev(ndev->nfc_dev, dev);
+}
+
+static inline void nfc_digital_set_drvdata(struct nfc_digital_dev *dev,
+ void *data)
+{
+ dev->driver_data = data;
+}
+
+static inline void *nfc_digital_get_drvdata(struct nfc_digital_dev *dev)
+{
+ return dev->driver_data;
+}
+
+#endif /* __NFC_DIGITAL_H */
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
new file mode 100644
index 000000000..756c11084
--- /dev/null
+++ b/include/net/nfc/hci.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2011 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __NET_HCI_H
+#define __NET_HCI_H
+
+#include <linux/skbuff.h>
+
+#include <net/nfc/nfc.h>
+
+struct nfc_hci_dev;
+
+struct nfc_hci_ops {
+ int (*open) (struct nfc_hci_dev *hdev);
+ void (*close) (struct nfc_hci_dev *hdev);
+ int (*load_session) (struct nfc_hci_dev *hdev);
+ int (*hci_ready) (struct nfc_hci_dev *hdev);
+ /*
+ * xmit must always send the complete buffer before
+ * returning. Returned result must be 0 for success
+ * or negative for failure.
+ */
+ int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
+ int (*start_poll) (struct nfc_hci_dev *hdev,
+ u32 im_protocols, u32 tm_protocols);
+ void (*stop_poll) (struct nfc_hci_dev *hdev);
+ int (*dep_link_up)(struct nfc_hci_dev *hdev, struct nfc_target *target,
+ u8 comm_mode, u8 *gb, size_t gb_len);
+ int (*dep_link_down)(struct nfc_hci_dev *hdev);
+ int (*target_from_gate) (struct nfc_hci_dev *hdev, u8 gate,
+ struct nfc_target *target);
+ int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate,
+ struct nfc_target *target);
+ int (*im_transceive) (struct nfc_hci_dev *hdev,
+ struct nfc_target *target, struct sk_buff *skb,
+ data_exchange_cb_t cb, void *cb_context);
+ int (*tm_send)(struct nfc_hci_dev *hdev, struct sk_buff *skb);
+ int (*check_presence)(struct nfc_hci_dev *hdev,
+ struct nfc_target *target);
+ int (*event_received)(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
+ struct sk_buff *skb);
+ void (*cmd_received)(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+ struct sk_buff *skb);
+ int (*fw_download)(struct nfc_hci_dev *hdev, const char *firmware_name);
+ int (*discover_se)(struct nfc_hci_dev *dev);
+ int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx);
+ int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx);
+ int (*se_io)(struct nfc_hci_dev *dev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context);
+};
+
+/* Pipes */
+#define NFC_HCI_DO_NOT_CREATE_PIPE 0x81
+#define NFC_HCI_INVALID_PIPE 0x80
+#define NFC_HCI_INVALID_GATE 0xFF
+#define NFC_HCI_INVALID_HOST 0x80
+#define NFC_HCI_LINK_MGMT_PIPE 0x00
+#define NFC_HCI_ADMIN_PIPE 0x01
+
+struct nfc_hci_gate {
+ u8 gate;
+ u8 pipe;
+};
+
+struct nfc_hci_pipe {
+ u8 gate;
+ u8 dest_host;
+};
+
+#define NFC_HCI_MAX_CUSTOM_GATES 50
+/*
+ * According to specification 102 622 chapter 4.4 Pipes,
+ * the pipe identifier is 7 bits long.
+ */
+#define NFC_HCI_MAX_PIPES 128
+struct nfc_hci_init_data {
+ u8 gate_count;
+ struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
+ char session_id[9];
+};
+
+typedef int (*xmit) (struct sk_buff *skb, void *cb_data);
+
+#define NFC_HCI_MAX_GATES 256
+
+/*
+ * These values can be specified by a driver to indicate it requires some
+ * adaptation of the HCI standard.
+ *
+ * NFC_HCI_QUIRK_SHORT_CLEAR - send HCI_ADM_CLEAR_ALL_PIPE cmd with no params
+ */
+enum {
+ NFC_HCI_QUIRK_SHORT_CLEAR = 0,
+};
+
+struct nfc_hci_dev {
+ struct nfc_dev *ndev;
+
+ u32 max_data_link_payload;
+
+ bool shutting_down;
+
+ struct mutex msg_tx_mutex;
+
+ struct list_head msg_tx_queue;
+
+ struct work_struct msg_tx_work;
+
+ struct timer_list cmd_timer;
+ struct hci_msg *cmd_pending_msg;
+
+ struct sk_buff_head rx_hcp_frags;
+
+ struct work_struct msg_rx_work;
+
+ struct sk_buff_head msg_rx_queue;
+
+ const struct nfc_hci_ops *ops;
+
+ struct nfc_llc *llc;
+
+ struct nfc_hci_init_data init_data;
+
+ void *clientdata;
+
+ u8 gate2pipe[NFC_HCI_MAX_GATES];
+ struct nfc_hci_pipe pipes[NFC_HCI_MAX_PIPES];
+
+ u8 sw_romlib;
+ u8 sw_patch;
+ u8 sw_flashlib_major;
+ u8 sw_flashlib_minor;
+
+ u8 hw_derivative;
+ u8 hw_version;
+ u8 hw_mpw;
+ u8 hw_software;
+ u8 hw_bsid;
+
+ int async_cb_type;
+ data_exchange_cb_t async_cb;
+ void *async_cb_context;
+
+ u8 *gb;
+ size_t gb_len;
+
+ unsigned long quirks;
+};
+
+/* hci device allocation */
+struct nfc_hci_dev *nfc_hci_allocate_device(const struct nfc_hci_ops *ops,
+ struct nfc_hci_init_data *init_data,
+ unsigned long quirks,
+ u32 protocols,
+ const char *llc_name,
+ int tx_headroom,
+ int tx_tailroom,
+ int max_link_payload);
+void nfc_hci_free_device(struct nfc_hci_dev *hdev);
+
+int nfc_hci_register_device(struct nfc_hci_dev *hdev);
+void nfc_hci_unregister_device(struct nfc_hci_dev *hdev);
+
+void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata);
+void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev);
+
+static inline int nfc_hci_set_vendor_cmds(struct nfc_hci_dev *hdev,
+ const struct nfc_vendor_cmd *cmds,
+ int n_cmds)
+{
+ return nfc_set_vendor_cmds(hdev->ndev, cmds, n_cmds);
+}
+
+void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err);
+
+int nfc_hci_result_to_errno(u8 result);
+void nfc_hci_reset_pipes(struct nfc_hci_dev *dev);
+void nfc_hci_reset_pipes_per_host(struct nfc_hci_dev *hdev, u8 host);
+
+/* Host IDs */
+#define NFC_HCI_HOST_CONTROLLER_ID 0x00
+#define NFC_HCI_TERMINAL_HOST_ID 0x01
+#define NFC_HCI_UICC_HOST_ID 0x02
+
+/* Host Controller Gates and registry indexes */
+#define NFC_HCI_ADMIN_GATE 0x00
+#define NFC_HCI_ADMIN_SESSION_IDENTITY 0x01
+#define NFC_HCI_ADMIN_MAX_PIPE 0x02
+#define NFC_HCI_ADMIN_WHITELIST 0x03
+#define NFC_HCI_ADMIN_HOST_LIST 0x04
+
+#define NFC_HCI_LOOPBACK_GATE 0x04
+
+#define NFC_HCI_ID_MGMT_GATE 0x05
+#define NFC_HCI_ID_MGMT_VERSION_SW 0x01
+#define NFC_HCI_ID_MGMT_VERSION_HW 0x03
+#define NFC_HCI_ID_MGMT_VENDOR_NAME 0x04
+#define NFC_HCI_ID_MGMT_MODEL_ID 0x05
+#define NFC_HCI_ID_MGMT_HCI_VERSION 0x02
+#define NFC_HCI_ID_MGMT_GATES_LIST 0x06
+
+#define NFC_HCI_LINK_MGMT_GATE 0x06
+#define NFC_HCI_LINK_MGMT_REC_ERROR 0x01
+
+#define NFC_HCI_RF_READER_B_GATE 0x11
+#define NFC_HCI_RF_READER_B_PUPI 0x03
+#define NFC_HCI_RF_READER_B_APPLICATION_DATA 0x04
+#define NFC_HCI_RF_READER_B_AFI 0x02
+#define NFC_HCI_RF_READER_B_HIGHER_LAYER_RESPONSE 0x01
+#define NFC_HCI_RF_READER_B_HIGHER_LAYER_DATA 0x05
+
+#define NFC_HCI_RF_READER_A_GATE 0x13
+#define NFC_HCI_RF_READER_A_UID 0x02
+#define NFC_HCI_RF_READER_A_ATQA 0x04
+#define NFC_HCI_RF_READER_A_APPLICATION_DATA 0x05
+#define NFC_HCI_RF_READER_A_SAK 0x03
+#define NFC_HCI_RF_READER_A_FWI_SFGT 0x06
+#define NFC_HCI_RF_READER_A_DATARATE_MAX 0x01
+
+#define NFC_HCI_TYPE_A_SEL_PROT(x) (((x) & 0x60) >> 5)
+#define NFC_HCI_TYPE_A_SEL_PROT_MIFARE 0
+#define NFC_HCI_TYPE_A_SEL_PROT_ISO14443 1
+#define NFC_HCI_TYPE_A_SEL_PROT_DEP 2
+#define NFC_HCI_TYPE_A_SEL_PROT_ISO14443_DEP 3
+
+/* Generic events */
+#define NFC_HCI_EVT_HCI_END_OF_OPERATION 0x01
+#define NFC_HCI_EVT_POST_DATA 0x02
+#define NFC_HCI_EVT_HOT_PLUG 0x03
+
+/* Generic commands */
+#define NFC_HCI_ANY_SET_PARAMETER 0x01
+#define NFC_HCI_ANY_GET_PARAMETER 0x02
+#define NFC_HCI_ANY_OPEN_PIPE 0x03
+#define NFC_HCI_ANY_CLOSE_PIPE 0x04
+
+/* Reader RF gates events */
+#define NFC_HCI_EVT_READER_REQUESTED 0x10
+#define NFC_HCI_EVT_END_OPERATION 0x11
+
+/* Reader Application gate events */
+#define NFC_HCI_EVT_TARGET_DISCOVERED 0x10
+
+/* receiving messages from lower layer */
+void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
+ struct sk_buff *skb);
+void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+ struct sk_buff *skb);
+void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
+ struct sk_buff *skb);
+void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb);
+
+/* connecting to gates and sending hci instructions */
+int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate,
+ u8 pipe);
+int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate);
+int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev);
+int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
+ struct sk_buff **skb);
+int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
+ const u8 *param, size_t param_len);
+int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
+ const u8 *param, size_t param_len, struct sk_buff **skb);
+int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
+ const u8 *param, size_t param_len,
+ data_exchange_cb_t cb, void *cb_context);
+int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
+ const u8 *param, size_t param_len);
+int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate);
+u32 nfc_hci_sak_to_protocol(u8 sak);
+
+#endif /* __NET_HCI_H */
diff --git a/include/net/nfc/llc.h b/include/net/nfc/llc.h
new file mode 100644
index 000000000..1f699cb17
--- /dev/null
+++ b/include/net/nfc/llc.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Link Layer Control manager public interface
+ *
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __NFC_LLC_H_
+#define __NFC_LLC_H_
+
+#include <net/nfc/hci.h>
+#include <linux/skbuff.h>
+
+#define LLC_NOP_NAME "nop"
+#define LLC_SHDLC_NAME "shdlc"
+
+typedef void (*rcv_to_hci_t) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
+typedef int (*xmit_to_drv_t) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
+typedef void (*llc_failure_t) (struct nfc_hci_dev *hdev, int err);
+
+struct nfc_llc;
+
+struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev,
+ xmit_to_drv_t xmit_to_drv,
+ rcv_to_hci_t rcv_to_hci, int tx_headroom,
+ int tx_tailroom, llc_failure_t llc_failure);
+void nfc_llc_free(struct nfc_llc *llc);
+
+int nfc_llc_start(struct nfc_llc *llc);
+int nfc_llc_stop(struct nfc_llc *llc);
+void nfc_llc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb);
+int nfc_llc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb);
+
+int nfc_llc_init(void);
+void nfc_llc_exit(void);
+
+#endif /* __NFC_LLC_H_ */
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
new file mode 100644
index 000000000..e82f55f54
--- /dev/null
+++ b/include/net/nfc/nci.h
@@ -0,0 +1,561 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2014 Marvell International Ltd.
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on hci.h, which was written
+ * by Maxim Krasnyansky.
+ */
+
+#ifndef __NCI_H
+#define __NCI_H
+
+#include <net/nfc/nfc.h>
+
+/* NCI constants */
+#define NCI_MAX_NUM_MAPPING_CONFIGS 10
+#define NCI_MAX_NUM_RF_CONFIGS 10
+#define NCI_MAX_NUM_CONN 10
+#define NCI_MAX_PARAM_LEN 251
+#define NCI_MAX_PAYLOAD_SIZE 255
+#define NCI_MAX_PACKET_SIZE 258
+#define NCI_MAX_LARGE_PARAMS_NCI_v2 15
+#define NCI_VER_2_MASK 0x20
+
+/* NCI Status Codes */
+#define NCI_STATUS_OK 0x00
+#define NCI_STATUS_REJECTED 0x01
+#define NCI_STATUS_RF_FRAME_CORRUPTED 0x02
+#define NCI_STATUS_FAILED 0x03
+#define NCI_STATUS_NOT_INITIALIZED 0x04
+#define NCI_STATUS_SYNTAX_ERROR 0x05
+#define NCI_STATUS_SEMANTIC_ERROR 0x06
+#define NCI_STATUS_UNKNOWN_GID 0x07
+#define NCI_STATUS_UNKNOWN_OID 0x08
+#define NCI_STATUS_INVALID_PARAM 0x09
+#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0a
+/* Discovery Specific Status Codes */
+#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0
+#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1
+#define NCI_STATUS_DISCOVERY_TEAR_DOWN 0xa2
+/* RF Interface Specific Status Codes */
+#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0
+#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1
+#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2
+/* NFCEE Interface Specific Status Codes */
+#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc0
+#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc1
+#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc2
+#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc3
+
+/* NFCEE Interface/Protocols */
+#define NCI_NFCEE_INTERFACE_APDU 0x00
+#define NCI_NFCEE_INTERFACE_HCI_ACCESS 0x01
+#define NCI_NFCEE_INTERFACE_TYPE3_CMD_SET 0x02
+#define NCI_NFCEE_INTERFACE_TRANSPARENT 0x03
+
+/* Destination type */
+#define NCI_DESTINATION_NFCC_LOOPBACK 0x01
+#define NCI_DESTINATION_REMOTE_NFC_ENDPOINT 0x02
+#define NCI_DESTINATION_NFCEE 0x03
+
+/* Destination-specific parameters type */
+#define NCI_DESTINATION_SPECIFIC_PARAM_RF_TYPE 0x00
+#define NCI_DESTINATION_SPECIFIC_PARAM_NFCEE_TYPE 0x01
+
+/* NFCEE Discovery Action */
+#define NCI_NFCEE_DISCOVERY_ACTION_DISABLE 0x00
+#define NCI_NFCEE_DISCOVERY_ACTION_ENABLE 0x01
+
+/* NCI RF Technology and Mode */
+#define NCI_NFC_A_PASSIVE_POLL_MODE 0x00
+#define NCI_NFC_B_PASSIVE_POLL_MODE 0x01
+#define NCI_NFC_F_PASSIVE_POLL_MODE 0x02
+#define NCI_NFC_A_ACTIVE_POLL_MODE 0x03
+#define NCI_NFC_F_ACTIVE_POLL_MODE 0x05
+#define NCI_NFC_V_PASSIVE_POLL_MODE 0x06
+#define NCI_NFC_A_PASSIVE_LISTEN_MODE 0x80
+#define NCI_NFC_B_PASSIVE_LISTEN_MODE 0x81
+#define NCI_NFC_F_PASSIVE_LISTEN_MODE 0x82
+#define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83
+#define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85
+
+#define NCI_RF_TECH_MODE_LISTEN_MASK 0x80
+
+/* NCI RF Technologies */
+#define NCI_NFC_RF_TECHNOLOGY_A 0x00
+#define NCI_NFC_RF_TECHNOLOGY_B 0x01
+#define NCI_NFC_RF_TECHNOLOGY_F 0x02
+#define NCI_NFC_RF_TECHNOLOGY_V 0x03
+
+/* NCI Bit Rates */
+#define NCI_NFC_BIT_RATE_106 0x00
+#define NCI_NFC_BIT_RATE_212 0x01
+#define NCI_NFC_BIT_RATE_424 0x02
+#define NCI_NFC_BIT_RATE_848 0x03
+#define NCI_NFC_BIT_RATE_1695 0x04
+#define NCI_NFC_BIT_RATE_3390 0x05
+#define NCI_NFC_BIT_RATE_6780 0x06
+#define NCI_NFC_BIT_RATE_26 0x20
+
+/* NCI RF Protocols */
+#define NCI_RF_PROTOCOL_UNKNOWN 0x00
+#define NCI_RF_PROTOCOL_T1T 0x01
+#define NCI_RF_PROTOCOL_T2T 0x02
+#define NCI_RF_PROTOCOL_T3T 0x03
+#define NCI_RF_PROTOCOL_ISO_DEP 0x04
+#define NCI_RF_PROTOCOL_NFC_DEP 0x05
+#define NCI_RF_PROTOCOL_T5T 0x06
+
+/* NCI RF Interfaces */
+#define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00
+#define NCI_RF_INTERFACE_FRAME 0x01
+#define NCI_RF_INTERFACE_ISO_DEP 0x02
+#define NCI_RF_INTERFACE_NFC_DEP 0x03
+
+/* NCI Configuration Parameter Tags */
+#define NCI_PN_ATR_REQ_GEN_BYTES 0x29
+#define NCI_LN_ATR_RES_GEN_BYTES 0x61
+#define NCI_LA_SEL_INFO 0x32
+#define NCI_LF_PROTOCOL_TYPE 0x50
+#define NCI_LF_CON_BITR_F 0x54
+
+/* NCI Configuration Parameters masks */
+#define NCI_LA_SEL_INFO_ISO_DEP_MASK 0x20
+#define NCI_LA_SEL_INFO_NFC_DEP_MASK 0x40
+#define NCI_LF_PROTOCOL_TYPE_NFC_DEP_MASK 0x02
+#define NCI_LF_CON_BITR_F_212 0x02
+#define NCI_LF_CON_BITR_F_424 0x04
+
+/* NCI 2.x Feature Enable Bit */
+#define NCI_FEATURE_DISABLE 0x00
+
+/* NCI Reset types */
+#define NCI_RESET_TYPE_KEEP_CONFIG 0x00
+#define NCI_RESET_TYPE_RESET_CONFIG 0x01
+
+/* NCI Static RF connection ID */
+#define NCI_STATIC_RF_CONN_ID 0x00
+
+/* NCI Data Flow Control */
+#define NCI_DATA_FLOW_CONTROL_NOT_USED 0xff
+
+/* NCI RF_DISCOVER_MAP_CMD modes */
+#define NCI_DISC_MAP_MODE_POLL 0x01
+#define NCI_DISC_MAP_MODE_LISTEN 0x02
+
+/* NCI Discover Notification Type */
+#define NCI_DISCOVER_NTF_TYPE_LAST 0x00
+#define NCI_DISCOVER_NTF_TYPE_LAST_NFCC 0x01
+#define NCI_DISCOVER_NTF_TYPE_MORE 0x02
+
+/* NCI Deactivation Type */
+#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00
+#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01
+#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02
+#define NCI_DEACTIVATE_TYPE_DISCOVERY 0x03
+
+/* Message Type (MT) */
+#define NCI_MT_DATA_PKT 0x00
+#define NCI_MT_CMD_PKT 0x01
+#define NCI_MT_RSP_PKT 0x02
+#define NCI_MT_NTF_PKT 0x03
+
+#define nci_mt(hdr) (((hdr)[0]>>5)&0x07)
+#define nci_mt_set(hdr, mt) ((hdr)[0] |= (__u8)(((mt)&0x07)<<5))
+
+/* Packet Boundary Flag (PBF) */
+#define NCI_PBF_LAST 0x00
+#define NCI_PBF_CONT 0x01
+
+#define nci_pbf(hdr) (__u8)(((hdr)[0]>>4)&0x01)
+#define nci_pbf_set(hdr, pbf) ((hdr)[0] |= (__u8)(((pbf)&0x01)<<4))
+
+/* Control Opcode manipulation */
+#define nci_opcode_pack(gid, oid) (__u16)((((__u16)((gid)&0x0f))<<8)|\
+ ((__u16)((oid)&0x3f)))
+#define nci_opcode(hdr) nci_opcode_pack(hdr[0], hdr[1])
+#define nci_opcode_gid(op) (__u8)(((op)&0x0f00)>>8)
+#define nci_opcode_oid(op) (__u8)((op)&0x003f)
+
+/* Payload Length */
+#define nci_plen(hdr) (__u8)((hdr)[2])
+
+/* Connection ID */
+#define nci_conn_id(hdr) (__u8)(((hdr)[0])&0x0f)
+
+/* GID values */
+#define NCI_GID_CORE 0x0
+#define NCI_GID_RF_MGMT 0x1
+#define NCI_GID_NFCEE_MGMT 0x2
+#define NCI_GID_PROPRIETARY 0xf
+
+/* ----- NCI over SPI head/crc(tail) room needed for outgoing frames ----- */
+#define NCI_SPI_HDR_LEN 4
+#define NCI_SPI_CRC_LEN 2
+
+/* ---- NCI Packet structures ---- */
+#define NCI_CTRL_HDR_SIZE 3
+#define NCI_DATA_HDR_SIZE 3
+
+struct nci_ctrl_hdr {
+ __u8 gid; /* MT & PBF & GID */
+ __u8 oid;
+ __u8 plen;
+} __packed;
+
+struct nci_data_hdr {
+ __u8 conn_id; /* MT & PBF & ConnID */
+ __u8 rfu;
+ __u8 plen;
+} __packed;
+
+/* ------------------------ */
+/* ----- NCI Commands ---- */
+/* ------------------------ */
+#define NCI_OP_CORE_RESET_CMD nci_opcode_pack(NCI_GID_CORE, 0x00)
+struct nci_core_reset_cmd {
+ __u8 reset_type;
+} __packed;
+
+#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01)
+/* To support NCI 2.x */
+struct nci_core_init_v2_cmd {
+ u8 feature1;
+ u8 feature2;
+};
+
+#define NCI_OP_CORE_SET_CONFIG_CMD nci_opcode_pack(NCI_GID_CORE, 0x02)
+struct set_config_param {
+ __u8 id;
+ __u8 len;
+ __u8 val[NCI_MAX_PARAM_LEN];
+} __packed;
+
+struct nci_core_set_config_cmd {
+ __u8 num_params;
+ struct set_config_param param; /* support 1 param per cmd is enough */
+} __packed;
+
+#define NCI_OP_CORE_CONN_CREATE_CMD nci_opcode_pack(NCI_GID_CORE, 0x04)
+#define DEST_SPEC_PARAMS_ID_INDEX 0
+#define DEST_SPEC_PARAMS_PROTOCOL_INDEX 1
+struct dest_spec_params {
+ __u8 id;
+ __u8 protocol;
+} __packed;
+
+struct core_conn_create_dest_spec_params {
+ __u8 type;
+ __u8 length;
+ __u8 value[];
+} __packed;
+
+struct nci_core_conn_create_cmd {
+ __u8 destination_type;
+ __u8 number_destination_params;
+ struct core_conn_create_dest_spec_params params[];
+} __packed;
+
+#define NCI_OP_CORE_CONN_CLOSE_CMD nci_opcode_pack(NCI_GID_CORE, 0x05)
+
+#define NCI_OP_RF_DISCOVER_MAP_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
+struct disc_map_config {
+ __u8 rf_protocol;
+ __u8 mode;
+ __u8 rf_interface;
+} __packed;
+
+struct nci_rf_disc_map_cmd {
+ __u8 num_mapping_configs;
+ struct disc_map_config mapping_configs
+ [NCI_MAX_NUM_MAPPING_CONFIGS];
+} __packed;
+
+#define NCI_OP_RF_DISCOVER_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
+struct disc_config {
+ __u8 rf_tech_and_mode;
+ __u8 frequency;
+} __packed;
+
+struct nci_rf_disc_cmd {
+ __u8 num_disc_configs;
+ struct disc_config disc_configs[NCI_MAX_NUM_RF_CONFIGS];
+} __packed;
+
+#define NCI_OP_RF_DISCOVER_SELECT_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x04)
+struct nci_rf_discover_select_cmd {
+ __u8 rf_discovery_id;
+ __u8 rf_protocol;
+ __u8 rf_interface;
+} __packed;
+
+#define NCI_OP_RF_DEACTIVATE_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
+struct nci_rf_deactivate_cmd {
+ __u8 type;
+} __packed;
+
+#define NCI_OP_NFCEE_DISCOVER_CMD nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00)
+struct nci_nfcee_discover_cmd {
+ __u8 discovery_action;
+} __packed;
+
+#define NCI_OP_NFCEE_MODE_SET_CMD nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x01)
+#define NCI_NFCEE_DISABLE 0x00
+#define NCI_NFCEE_ENABLE 0x01
+struct nci_nfcee_mode_set_cmd {
+ __u8 nfcee_id;
+ __u8 nfcee_mode;
+} __packed;
+
+#define NCI_OP_CORE_GET_CONFIG_CMD nci_opcode_pack(NCI_GID_CORE, 0x03)
+
+/* ----------------------- */
+/* ---- NCI Responses ---- */
+/* ----------------------- */
+#define NCI_OP_CORE_RESET_RSP nci_opcode_pack(NCI_GID_CORE, 0x00)
+struct nci_core_reset_rsp {
+ __u8 status;
+ __u8 nci_ver;
+ __u8 config_status;
+} __packed;
+
+#define NCI_OP_CORE_INIT_RSP nci_opcode_pack(NCI_GID_CORE, 0x01)
+struct nci_core_init_rsp_1 {
+ __u8 status;
+ __le32 nfcc_features;
+ __u8 num_supported_rf_interfaces;
+ __u8 supported_rf_interfaces[]; /* variable size array */
+ /* continuted in nci_core_init_rsp_2 */
+} __packed;
+
+struct nci_core_init_rsp_2 {
+ __u8 max_logical_connections;
+ __le16 max_routing_table_size;
+ __u8 max_ctrl_pkt_payload_len;
+ __le16 max_size_for_large_params;
+ __u8 manufact_id;
+ __le32 manufact_specific_info;
+} __packed;
+
+/* To support NCI ver 2.x */
+struct nci_core_init_rsp_nci_ver2 {
+ u8 status;
+ __le32 nfcc_features;
+ u8 max_logical_connections;
+ __le16 max_routing_table_size;
+ u8 max_ctrl_pkt_payload_len;
+ u8 max_data_pkt_hci_payload_len;
+ u8 number_of_hci_credit;
+ __le16 max_nfc_v_frame_size;
+ u8 num_supported_rf_interfaces;
+ u8 supported_rf_interfaces[];
+} __packed;
+
+#define NCI_OP_CORE_SET_CONFIG_RSP nci_opcode_pack(NCI_GID_CORE, 0x02)
+struct nci_core_set_config_rsp {
+ __u8 status;
+ __u8 num_params;
+ __u8 params_id[]; /* variable size array */
+} __packed;
+
+#define NCI_OP_CORE_CONN_CREATE_RSP nci_opcode_pack(NCI_GID_CORE, 0x04)
+struct nci_core_conn_create_rsp {
+ __u8 status;
+ __u8 max_ctrl_pkt_payload_len;
+ __u8 credits_cnt;
+ __u8 conn_id;
+} __packed;
+
+#define NCI_OP_CORE_CONN_CLOSE_RSP nci_opcode_pack(NCI_GID_CORE, 0x05)
+
+#define NCI_OP_RF_DISCOVER_MAP_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
+
+#define NCI_OP_RF_DISCOVER_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
+
+#define NCI_OP_RF_DISCOVER_SELECT_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x04)
+
+#define NCI_OP_RF_DEACTIVATE_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
+
+#define NCI_OP_NFCEE_DISCOVER_RSP nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00)
+struct nci_nfcee_discover_rsp {
+ __u8 status;
+ __u8 num_nfcee;
+} __packed;
+
+#define NCI_OP_NFCEE_MODE_SET_RSP nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x01)
+
+#define NCI_OP_CORE_GET_CONFIG_RSP nci_opcode_pack(NCI_GID_CORE, 0x03)
+
+/* --------------------------- */
+/* ---- NCI Notifications ---- */
+/* --------------------------- */
+#define NCI_OP_CORE_RESET_NTF nci_opcode_pack(NCI_GID_CORE, 0x00)
+struct nci_core_reset_ntf {
+ u8 reset_trigger;
+ u8 config_status;
+ u8 nci_ver;
+ u8 manufact_id;
+ u8 manufacturer_specific_len;
+ __le32 manufact_specific_info;
+} __packed;
+
+#define NCI_OP_CORE_CONN_CREDITS_NTF nci_opcode_pack(NCI_GID_CORE, 0x06)
+struct conn_credit_entry {
+ __u8 conn_id;
+ __u8 credits;
+} __packed;
+
+struct nci_core_conn_credit_ntf {
+ __u8 num_entries;
+ struct conn_credit_entry conn_entries[NCI_MAX_NUM_CONN];
+} __packed;
+
+#define NCI_OP_CORE_GENERIC_ERROR_NTF nci_opcode_pack(NCI_GID_CORE, 0x07)
+
+#define NCI_OP_CORE_INTF_ERROR_NTF nci_opcode_pack(NCI_GID_CORE, 0x08)
+struct nci_core_intf_error_ntf {
+ __u8 status;
+ __u8 conn_id;
+} __packed;
+
+#define NCI_OP_RF_DISCOVER_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
+struct rf_tech_specific_params_nfca_poll {
+ __u16 sens_res;
+ __u8 nfcid1_len; /* 0, 4, 7, or 10 Bytes */
+ __u8 nfcid1[NFC_NFCID1_MAXSIZE];
+ __u8 sel_res_len; /* 0 or 1 Bytes */
+ __u8 sel_res;
+} __packed;
+
+struct rf_tech_specific_params_nfcb_poll {
+ __u8 sensb_res_len;
+ __u8 sensb_res[NFC_SENSB_RES_MAXSIZE]; /* 11 or 12 Bytes */
+} __packed;
+
+struct rf_tech_specific_params_nfcf_poll {
+ __u8 bit_rate;
+ __u8 sensf_res_len;
+ __u8 sensf_res[NFC_SENSF_RES_MAXSIZE]; /* 16 or 18 Bytes */
+} __packed;
+
+struct rf_tech_specific_params_nfcv_poll {
+ __u8 res_flags;
+ __u8 dsfid;
+ __u8 uid[NFC_ISO15693_UID_MAXSIZE]; /* 8 Bytes */
+} __packed;
+
+struct rf_tech_specific_params_nfcf_listen {
+ __u8 local_nfcid2_len;
+ __u8 local_nfcid2[NFC_NFCID2_MAXSIZE]; /* 0 or 8 Bytes */
+} __packed;
+
+struct nci_rf_discover_ntf {
+ __u8 rf_discovery_id;
+ __u8 rf_protocol;
+ __u8 rf_tech_and_mode;
+ __u8 rf_tech_specific_params_len;
+
+ union {
+ struct rf_tech_specific_params_nfca_poll nfca_poll;
+ struct rf_tech_specific_params_nfcb_poll nfcb_poll;
+ struct rf_tech_specific_params_nfcf_poll nfcf_poll;
+ struct rf_tech_specific_params_nfcv_poll nfcv_poll;
+ } rf_tech_specific_params;
+
+ __u8 ntf_type;
+} __packed;
+
+#define NCI_OP_RF_INTF_ACTIVATED_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05)
+struct activation_params_nfca_poll_iso_dep {
+ __u8 rats_res_len;
+ __u8 rats_res[20];
+};
+
+struct activation_params_nfcb_poll_iso_dep {
+ __u8 attrib_res_len;
+ __u8 attrib_res[50];
+};
+
+struct activation_params_poll_nfc_dep {
+ __u8 atr_res_len;
+ __u8 atr_res[NFC_ATR_RES_MAXSIZE - 2]; /* ATR_RES from byte 3 */
+};
+
+struct activation_params_listen_nfc_dep {
+ __u8 atr_req_len;
+ __u8 atr_req[NFC_ATR_REQ_MAXSIZE - 2]; /* ATR_REQ from byte 3 */
+};
+
+struct nci_rf_intf_activated_ntf {
+ __u8 rf_discovery_id;
+ __u8 rf_interface;
+ __u8 rf_protocol;
+ __u8 activation_rf_tech_and_mode;
+ __u8 max_data_pkt_payload_size;
+ __u8 initial_num_credits;
+ __u8 rf_tech_specific_params_len;
+
+ union {
+ struct rf_tech_specific_params_nfca_poll nfca_poll;
+ struct rf_tech_specific_params_nfcb_poll nfcb_poll;
+ struct rf_tech_specific_params_nfcf_poll nfcf_poll;
+ struct rf_tech_specific_params_nfcv_poll nfcv_poll;
+ struct rf_tech_specific_params_nfcf_listen nfcf_listen;
+ } rf_tech_specific_params;
+
+ __u8 data_exch_rf_tech_and_mode;
+ __u8 data_exch_tx_bit_rate;
+ __u8 data_exch_rx_bit_rate;
+ __u8 activation_params_len;
+
+ union {
+ struct activation_params_nfca_poll_iso_dep nfca_poll_iso_dep;
+ struct activation_params_nfcb_poll_iso_dep nfcb_poll_iso_dep;
+ struct activation_params_poll_nfc_dep poll_nfc_dep;
+ struct activation_params_listen_nfc_dep listen_nfc_dep;
+ } activation_params;
+
+} __packed;
+
+#define NCI_OP_RF_DEACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
+struct nci_rf_deactivate_ntf {
+ __u8 type;
+ __u8 reason;
+} __packed;
+
+#define NCI_OP_RF_NFCEE_ACTION_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x09)
+struct nci_rf_nfcee_action_ntf {
+ __u8 nfcee_id;
+ __u8 trigger;
+ __u8 supported_data_length;
+ __u8 supported_data[];
+} __packed;
+
+#define NCI_OP_NFCEE_DISCOVER_NTF nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00)
+struct nci_nfcee_supported_protocol {
+ __u8 num_protocol;
+ __u8 supported_protocol[];
+} __packed;
+
+struct nci_nfcee_information_tlv {
+ __u8 num_tlv;
+ __u8 information_tlv[];
+} __packed;
+
+struct nci_nfcee_discover_ntf {
+ __u8 nfcee_id;
+ __u8 nfcee_status;
+ struct nci_nfcee_supported_protocol supported_protocols;
+ struct nci_nfcee_information_tlv information_tlv;
+} __packed;
+
+#define NCI_OP_CORE_RESET_NTF nci_opcode_pack(NCI_GID_CORE, 0x00)
+
+#endif /* __NCI_H */
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
new file mode 100644
index 000000000..ea8595651
--- /dev/null
+++ b/include/net/nfc/nci_core.h
@@ -0,0 +1,465 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2013 Intel Corporation. All rights reserved.
+ * Copyright (C) 2014 Marvell International Ltd.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on hci_core.h, which was written
+ * by Maxim Krasnyansky.
+ */
+
+#ifndef __NCI_CORE_H
+#define __NCI_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/tty.h>
+
+#include <net/nfc/nfc.h>
+#include <net/nfc/nci.h>
+
+/* NCI device flags */
+enum nci_flag {
+ NCI_INIT,
+ NCI_UP,
+ NCI_DATA_EXCHANGE,
+ NCI_DATA_EXCHANGE_TO,
+ NCI_UNREG,
+};
+
+/* NCI device states */
+enum nci_state {
+ NCI_IDLE,
+ NCI_DISCOVERY,
+ NCI_W4_ALL_DISCOVERIES,
+ NCI_W4_HOST_SELECT,
+ NCI_POLL_ACTIVE,
+ NCI_LISTEN_ACTIVE,
+ NCI_LISTEN_SLEEP,
+};
+
+/* NCI timeouts */
+#define NCI_RESET_TIMEOUT 5000
+#define NCI_INIT_TIMEOUT 5000
+#define NCI_SET_CONFIG_TIMEOUT 5000
+#define NCI_RF_DISC_TIMEOUT 5000
+#define NCI_RF_DISC_SELECT_TIMEOUT 5000
+#define NCI_RF_DEACTIVATE_TIMEOUT 30000
+#define NCI_CMD_TIMEOUT 5000
+#define NCI_DATA_TIMEOUT 700
+
+struct nci_dev;
+
+struct nci_driver_ops {
+ __u16 opcode;
+ int (*rsp)(struct nci_dev *dev, struct sk_buff *skb);
+ int (*ntf)(struct nci_dev *dev, struct sk_buff *skb);
+};
+
+struct nci_ops {
+ int (*init)(struct nci_dev *ndev);
+ int (*open)(struct nci_dev *ndev);
+ int (*close)(struct nci_dev *ndev);
+ int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
+ int (*setup)(struct nci_dev *ndev);
+ int (*post_setup)(struct nci_dev *ndev);
+ int (*fw_download)(struct nci_dev *ndev, const char *firmware_name);
+ __u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol);
+ int (*discover_se)(struct nci_dev *ndev);
+ int (*disable_se)(struct nci_dev *ndev, u32 se_idx);
+ int (*enable_se)(struct nci_dev *ndev, u32 se_idx);
+ int (*se_io)(struct nci_dev *ndev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context);
+ int (*hci_load_session)(struct nci_dev *ndev);
+ void (*hci_event_received)(struct nci_dev *ndev, u8 pipe, u8 event,
+ struct sk_buff *skb);
+ void (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd,
+ struct sk_buff *skb);
+
+ const struct nci_driver_ops *prop_ops;
+ size_t n_prop_ops;
+
+ const struct nci_driver_ops *core_ops;
+ size_t n_core_ops;
+};
+
+#define NCI_MAX_SUPPORTED_RF_INTERFACES 4
+#define NCI_MAX_DISCOVERED_TARGETS 10
+#define NCI_MAX_NUM_NFCEE 255
+#define NCI_MAX_CONN_ID 7
+#define NCI_MAX_PROPRIETARY_CMD 64
+
+struct nci_conn_info {
+ struct list_head list;
+ /* NCI specification 4.4.2 Connection Creation
+ * The combination of destination type and destination specific
+ * parameters shall uniquely identify a single destination for the
+ * Logical Connection
+ */
+ struct dest_spec_params *dest_params;
+ __u8 dest_type;
+ __u8 conn_id;
+ __u8 max_pkt_payload_len;
+
+ atomic_t credits_cnt;
+ __u8 initial_num_credits;
+
+ data_exchange_cb_t data_exchange_cb;
+ void *data_exchange_cb_context;
+
+ struct sk_buff *rx_skb;
+};
+
+#define NCI_INVALID_CONN_ID 0x80
+
+#define NCI_HCI_ANY_OPEN_PIPE 0x03
+
+/* Gates */
+#define NCI_HCI_ADMIN_GATE 0x00
+#define NCI_HCI_LOOPBACK_GATE 0x04
+#define NCI_HCI_IDENTITY_MGMT_GATE 0x05
+#define NCI_HCI_LINK_MGMT_GATE 0x06
+
+/* Pipes */
+#define NCI_HCI_LINK_MGMT_PIPE 0x00
+#define NCI_HCI_ADMIN_PIPE 0x01
+
+/* Generic responses */
+#define NCI_HCI_ANY_OK 0x00
+#define NCI_HCI_ANY_E_NOT_CONNECTED 0x01
+#define NCI_HCI_ANY_E_CMD_PAR_UNKNOWN 0x02
+#define NCI_HCI_ANY_E_NOK 0x03
+#define NCI_HCI_ANY_E_PIPES_FULL 0x04
+#define NCI_HCI_ANY_E_REG_PAR_UNKNOWN 0x05
+#define NCI_HCI_ANY_E_PIPE_NOT_OPENED 0x06
+#define NCI_HCI_ANY_E_CMD_NOT_SUPPORTED 0x07
+#define NCI_HCI_ANY_E_INHIBITED 0x08
+#define NCI_HCI_ANY_E_TIMEOUT 0x09
+#define NCI_HCI_ANY_E_REG_ACCESS_DENIED 0x0a
+#define NCI_HCI_ANY_E_PIPE_ACCESS_DENIED 0x0b
+
+#define NCI_HCI_DO_NOT_OPEN_PIPE 0x81
+#define NCI_HCI_INVALID_PIPE 0x80
+#define NCI_HCI_INVALID_GATE 0xFF
+#define NCI_HCI_INVALID_HOST 0x80
+
+#define NCI_HCI_MAX_CUSTOM_GATES 50
+/*
+ * According to specification 102 622 chapter 4.4 Pipes,
+ * the pipe identifier is 7 bits long.
+ */
+#define NCI_HCI_MAX_PIPES 128
+
+struct nci_hci_gate {
+ u8 gate;
+ u8 pipe;
+ u8 dest_host;
+} __packed;
+
+struct nci_hci_pipe {
+ u8 gate;
+ u8 host;
+} __packed;
+
+struct nci_hci_init_data {
+ u8 gate_count;
+ struct nci_hci_gate gates[NCI_HCI_MAX_CUSTOM_GATES];
+ char session_id[9];
+};
+
+#define NCI_HCI_MAX_GATES 256
+
+struct nci_hci_dev {
+ u8 nfcee_id;
+ struct nci_dev *ndev;
+ struct nci_conn_info *conn_info;
+
+ struct nci_hci_init_data init_data;
+ struct nci_hci_pipe pipes[NCI_HCI_MAX_PIPES];
+ u8 gate2pipe[NCI_HCI_MAX_GATES];
+ int expected_pipes;
+ int count_pipes;
+
+ struct sk_buff_head rx_hcp_frags;
+ struct work_struct msg_rx_work;
+ struct sk_buff_head msg_rx_queue;
+};
+
+/* NCI Core structures */
+struct nci_dev {
+ struct nfc_dev *nfc_dev;
+ const struct nci_ops *ops;
+ struct nci_hci_dev *hci_dev;
+
+ int tx_headroom;
+ int tx_tailroom;
+
+ atomic_t state;
+ unsigned long flags;
+
+ atomic_t cmd_cnt;
+ __u8 cur_conn_id;
+
+ struct list_head conn_info_list;
+ struct nci_conn_info *rf_conn_info;
+
+ struct timer_list cmd_timer;
+ struct timer_list data_timer;
+
+ struct workqueue_struct *cmd_wq;
+ struct work_struct cmd_work;
+
+ struct workqueue_struct *rx_wq;
+ struct work_struct rx_work;
+
+ struct workqueue_struct *tx_wq;
+ struct work_struct tx_work;
+
+ struct sk_buff_head cmd_q;
+ struct sk_buff_head rx_q;
+ struct sk_buff_head tx_q;
+
+ struct mutex req_lock;
+ struct completion req_completion;
+ __u32 req_status;
+ __u32 req_result;
+
+ void *driver_data;
+
+ __u32 poll_prots;
+ __u32 target_active_prot;
+
+ struct nfc_target targets[NCI_MAX_DISCOVERED_TARGETS];
+ int n_targets;
+
+ /* received during NCI_OP_CORE_RESET_RSP */
+ __u8 nci_ver;
+
+ /* received during NCI_OP_CORE_INIT_RSP */
+ __u32 nfcc_features;
+ __u8 num_supported_rf_interfaces;
+ __u8 supported_rf_interfaces
+ [NCI_MAX_SUPPORTED_RF_INTERFACES];
+ __u8 max_logical_connections;
+ __u16 max_routing_table_size;
+ __u8 max_ctrl_pkt_payload_len;
+ __u16 max_size_for_large_params;
+ __u8 manufact_id;
+ __u32 manufact_specific_info;
+
+ /* Save RF Discovery ID or NFCEE ID under conn_create */
+ struct dest_spec_params cur_params;
+ /* Save destination type under conn_create */
+ __u8 cur_dest_type;
+
+ /* stored during nci_data_exchange */
+ struct sk_buff *rx_data_reassembly;
+
+ /* stored during intf_activated_ntf */
+ __u8 remote_gb[NFC_MAX_GT_LEN];
+ __u8 remote_gb_len;
+};
+
+/* ----- NCI Devices ----- */
+struct nci_dev *nci_allocate_device(const struct nci_ops *ops,
+ __u32 supported_protocols,
+ int tx_headroom,
+ int tx_tailroom);
+void nci_free_device(struct nci_dev *ndev);
+int nci_register_device(struct nci_dev *ndev);
+void nci_unregister_device(struct nci_dev *ndev);
+int nci_request(struct nci_dev *ndev,
+ void (*req)(struct nci_dev *ndev,
+ const void *opt),
+ const void *opt, __u32 timeout);
+int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len,
+ const __u8 *payload);
+int nci_core_cmd(struct nci_dev *ndev, __u16 opcode, size_t len,
+ const __u8 *payload);
+int nci_core_reset(struct nci_dev *ndev);
+int nci_core_init(struct nci_dev *ndev);
+
+int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
+int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb);
+int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, const __u8 *val);
+
+int nci_nfcee_discover(struct nci_dev *ndev, u8 action);
+int nci_nfcee_mode_set(struct nci_dev *ndev, u8 nfcee_id, u8 nfcee_mode);
+int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type,
+ u8 number_destination_params,
+ size_t params_len,
+ const struct core_conn_create_dest_spec_params *params);
+int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id);
+int nci_nfcc_loopback(struct nci_dev *ndev, const void *data, size_t data_len,
+ struct sk_buff **resp);
+
+struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
+void nci_hci_deallocate(struct nci_dev *ndev);
+int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
+ const u8 *param, size_t param_len);
+int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate,
+ u8 cmd, const u8 *param, size_t param_len,
+ struct sk_buff **skb);
+int nci_hci_open_pipe(struct nci_dev *ndev, u8 pipe);
+int nci_hci_connect_gate(struct nci_dev *ndev, u8 dest_host,
+ u8 dest_gate, u8 pipe);
+int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx,
+ const u8 *param, size_t param_len);
+int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx,
+ struct sk_buff **skb);
+int nci_hci_clear_all_pipes(struct nci_dev *ndev);
+int nci_hci_dev_session_init(struct nci_dev *ndev);
+
+static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev,
+ unsigned int len,
+ gfp_t how)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len + ndev->tx_headroom + ndev->tx_tailroom, how);
+ if (skb)
+ skb_reserve(skb, ndev->tx_headroom);
+
+ return skb;
+}
+
+static inline void nci_set_parent_dev(struct nci_dev *ndev, struct device *dev)
+{
+ nfc_set_parent_dev(ndev->nfc_dev, dev);
+}
+
+static inline void nci_set_drvdata(struct nci_dev *ndev, void *data)
+{
+ ndev->driver_data = data;
+}
+
+static inline void *nci_get_drvdata(struct nci_dev *ndev)
+{
+ return ndev->driver_data;
+}
+
+static inline int nci_set_vendor_cmds(struct nci_dev *ndev,
+ const struct nfc_vendor_cmd *cmds,
+ int n_cmds)
+{
+ return nfc_set_vendor_cmds(ndev->nfc_dev, cmds, n_cmds);
+}
+
+void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb);
+void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb);
+int nci_prop_rsp_packet(struct nci_dev *ndev, __u16 opcode,
+ struct sk_buff *skb);
+int nci_prop_ntf_packet(struct nci_dev *ndev, __u16 opcode,
+ struct sk_buff *skb);
+int nci_core_rsp_packet(struct nci_dev *ndev, __u16 opcode,
+ struct sk_buff *skb);
+int nci_core_ntf_packet(struct nci_dev *ndev, __u16 opcode,
+ struct sk_buff *skb);
+void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb);
+int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, const void *payload);
+int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb);
+int nci_conn_max_data_pkt_payload_size(struct nci_dev *ndev, __u8 conn_id);
+void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ __u8 conn_id, int err);
+void nci_hci_data_received_cb(void *context, struct sk_buff *skb, int err);
+
+void nci_clear_target_list(struct nci_dev *ndev);
+
+/* ----- NCI requests ----- */
+#define NCI_REQ_DONE 0
+#define NCI_REQ_PEND 1
+#define NCI_REQ_CANCELED 2
+
+void nci_req_complete(struct nci_dev *ndev, int result);
+struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev,
+ int conn_id);
+int nci_get_conn_info_by_dest_type_params(struct nci_dev *ndev, u8 dest_type,
+ const struct dest_spec_params *params);
+
+/* ----- NCI status code ----- */
+int nci_to_errno(__u8 code);
+
+/* ----- NCI over SPI acknowledge modes ----- */
+#define NCI_SPI_CRC_DISABLED 0x00
+#define NCI_SPI_CRC_ENABLED 0x01
+
+/* ----- NCI SPI structures ----- */
+struct nci_spi {
+ struct nci_dev *ndev;
+ struct spi_device *spi;
+
+ unsigned int xfer_udelay; /* microseconds delay between
+ transactions */
+
+ unsigned int xfer_speed_hz; /*
+ * SPI clock frequency
+ * 0 => default clock
+ */
+
+ u8 acknowledge_mode;
+
+ struct completion req_completion;
+ u8 req_result;
+};
+
+/* ----- NCI SPI ----- */
+struct nci_spi *nci_spi_allocate_spi(struct spi_device *spi,
+ u8 acknowledge_mode, unsigned int delay,
+ struct nci_dev *ndev);
+int nci_spi_send(struct nci_spi *nspi,
+ struct completion *write_handshake_completion,
+ struct sk_buff *skb);
+struct sk_buff *nci_spi_read(struct nci_spi *nspi);
+
+/* ----- NCI UART ---- */
+
+/* Ioctl */
+#define NCIUARTSETDRIVER _IOW('U', 0, char *)
+
+enum nci_uart_driver {
+ NCI_UART_DRIVER_MARVELL = 0,
+ NCI_UART_DRIVER_MAX
+};
+
+struct nci_uart;
+
+struct nci_uart_ops {
+ int (*open)(struct nci_uart *nci_uart);
+ void (*close)(struct nci_uart *nci_uart);
+ int (*recv)(struct nci_uart *nci_uart, struct sk_buff *skb);
+ int (*send)(struct nci_uart *nci_uart, struct sk_buff *skb);
+ void (*tx_start)(struct nci_uart *nci_uart);
+ void (*tx_done)(struct nci_uart *nci_uart);
+};
+
+struct nci_uart {
+ struct module *owner;
+ struct nci_uart_ops ops;
+ const char *name;
+ enum nci_uart_driver driver;
+
+ /* Dynamic data */
+ struct nci_dev *ndev;
+ spinlock_t rx_lock;
+ struct work_struct write_work;
+ struct tty_struct *tty;
+ unsigned long tx_state;
+ struct sk_buff_head tx_q;
+ struct sk_buff *tx_skb;
+ struct sk_buff *rx_skb;
+ int rx_packet_len;
+ void *drv_data;
+};
+
+int nci_uart_register(struct nci_uart *nu);
+void nci_uart_unregister(struct nci_uart *nu);
+void nci_uart_set_config(struct nci_uart *nu, int baudrate, int flow_ctrl);
+
+#endif /* __NCI_CORE_H */
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
new file mode 100644
index 000000000..5dee575fb
--- /dev/null
+++ b/include/net/nfc/nfc.h
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2011 Instituto Nokia de Tecnologia
+ * Copyright (C) 2014 Marvell International Ltd.
+ *
+ * Authors:
+ * Lauro Ramos Venancio <lauro.venancio@openbossa.org>
+ * Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
+ */
+
+#ifndef __NET_NFC_H
+#define __NET_NFC_H
+
+#include <linux/nfc.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+
+#define nfc_dbg(dev, fmt, ...) dev_dbg((dev), "NFC: " fmt, ##__VA_ARGS__)
+#define nfc_info(dev, fmt, ...) dev_info((dev), "NFC: " fmt, ##__VA_ARGS__)
+#define nfc_err(dev, fmt, ...) dev_err((dev), "NFC: " fmt, ##__VA_ARGS__)
+
+struct nfc_phy_ops {
+ int (*write)(void *dev_id, struct sk_buff *skb);
+ int (*enable)(void *dev_id);
+ void (*disable)(void *dev_id);
+};
+
+struct nfc_dev;
+
+/**
+ * data_exchange_cb_t - Definition of nfc_data_exchange callback
+ *
+ * @context: nfc_data_exchange cb_context parameter
+ * @skb: response data
+ * @err: If an error has occurred during data exchange, it is the
+ * error number. Zero means no error.
+ *
+ * When a rx or tx package is lost or corrupted or the target gets out
+ * of the operating field, err is -EIO.
+ */
+typedef void (*data_exchange_cb_t)(void *context, struct sk_buff *skb,
+ int err);
+
+typedef void (*se_io_cb_t)(void *context, u8 *apdu, size_t apdu_len, int err);
+
+struct nfc_target;
+
+struct nfc_ops {
+ int (*dev_up)(struct nfc_dev *dev);
+ int (*dev_down)(struct nfc_dev *dev);
+ int (*start_poll)(struct nfc_dev *dev,
+ u32 im_protocols, u32 tm_protocols);
+ void (*stop_poll)(struct nfc_dev *dev);
+ int (*dep_link_up)(struct nfc_dev *dev, struct nfc_target *target,
+ u8 comm_mode, u8 *gb, size_t gb_len);
+ int (*dep_link_down)(struct nfc_dev *dev);
+ int (*activate_target)(struct nfc_dev *dev, struct nfc_target *target,
+ u32 protocol);
+ void (*deactivate_target)(struct nfc_dev *dev,
+ struct nfc_target *target, u8 mode);
+ int (*im_transceive)(struct nfc_dev *dev, struct nfc_target *target,
+ struct sk_buff *skb, data_exchange_cb_t cb,
+ void *cb_context);
+ int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb);
+ int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target);
+ int (*fw_download)(struct nfc_dev *dev, const char *firmware_name);
+
+ /* Secure Element API */
+ int (*discover_se)(struct nfc_dev *dev);
+ int (*enable_se)(struct nfc_dev *dev, u32 se_idx);
+ int (*disable_se)(struct nfc_dev *dev, u32 se_idx);
+ int (*se_io) (struct nfc_dev *dev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context);
+};
+
+#define NFC_TARGET_IDX_ANY -1
+#define NFC_MAX_GT_LEN 48
+#define NFC_ATR_RES_GT_OFFSET 15
+#define NFC_ATR_REQ_GT_OFFSET 14
+
+/**
+ * struct nfc_target - NFC target descriptiom
+ *
+ * @sens_res: 2 bytes describing the target SENS_RES response, if the target
+ * is a type A one. The %sens_res most significant byte must be byte 2
+ * as described by the NFC Forum digital specification (i.e. the platform
+ * configuration one) while %sens_res least significant byte is byte 1.
+ */
+struct nfc_target {
+ u32 idx;
+ u32 supported_protocols;
+ u16 sens_res;
+ u8 sel_res;
+ u8 nfcid1_len;
+ u8 nfcid1[NFC_NFCID1_MAXSIZE];
+ u8 nfcid2_len;
+ u8 nfcid2[NFC_NFCID2_MAXSIZE];
+ u8 sensb_res_len;
+ u8 sensb_res[NFC_SENSB_RES_MAXSIZE];
+ u8 sensf_res_len;
+ u8 sensf_res[NFC_SENSF_RES_MAXSIZE];
+ u8 hci_reader_gate;
+ u8 logical_idx;
+ u8 is_iso15693;
+ u8 iso15693_dsfid;
+ u8 iso15693_uid[NFC_ISO15693_UID_MAXSIZE];
+};
+
+/**
+ * nfc_se - A structure for NFC accessible secure elements.
+ *
+ * @idx: The secure element index. User space will enable or
+ * disable a secure element by its index.
+ * @type: The secure element type. It can be SE_UICC or
+ * SE_EMBEDDED.
+ * @state: The secure element state, either enabled or disabled.
+ *
+ */
+struct nfc_se {
+ struct list_head list;
+ u32 idx;
+ u16 type;
+ u16 state;
+};
+
+/**
+ * nfc_evt_transaction - A struct for NFC secure element event transaction.
+ *
+ * @aid: The application identifier triggering the event
+ *
+ * @aid_len: The application identifier length [5:16]
+ *
+ * @params: The application parameters transmitted during the transaction
+ *
+ * @params_len: The applications parameters length [0:255]
+ *
+ */
+#define NFC_MIN_AID_LENGTH 5
+#define NFC_MAX_AID_LENGTH 16
+#define NFC_MAX_PARAMS_LENGTH 255
+
+#define NFC_EVT_TRANSACTION_AID_TAG 0x81
+#define NFC_EVT_TRANSACTION_PARAMS_TAG 0x82
+struct nfc_evt_transaction {
+ u32 aid_len;
+ u8 aid[NFC_MAX_AID_LENGTH];
+ u8 params_len;
+ u8 params[];
+} __packed;
+
+struct nfc_genl_data {
+ u32 poll_req_portid;
+ struct mutex genl_data_mutex;
+};
+
+struct nfc_vendor_cmd {
+ __u32 vendor_id;
+ __u32 subcmd;
+ int (*doit)(struct nfc_dev *dev, void *data, size_t data_len);
+};
+
+struct nfc_dev {
+ int idx;
+ u32 target_next_idx;
+ struct nfc_target *targets;
+ int n_targets;
+ int targets_generation;
+ struct device dev;
+ bool dev_up;
+ bool fw_download_in_progress;
+ u8 rf_mode;
+ bool polling;
+ struct nfc_target *active_target;
+ bool dep_link_up;
+ struct nfc_genl_data genl_data;
+ u32 supported_protocols;
+
+ struct list_head secure_elements;
+
+ int tx_headroom;
+ int tx_tailroom;
+
+ struct timer_list check_pres_timer;
+ struct work_struct check_pres_work;
+
+ bool shutting_down;
+
+ struct rfkill *rfkill;
+
+ const struct nfc_vendor_cmd *vendor_cmds;
+ int n_vendor_cmds;
+
+ const struct nfc_ops *ops;
+ struct genl_info *cur_cmd_info;
+};
+#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
+
+extern struct class nfc_class;
+
+struct nfc_dev *nfc_allocate_device(const struct nfc_ops *ops,
+ u32 supported_protocols,
+ int tx_headroom,
+ int tx_tailroom);
+
+/**
+ * nfc_free_device - free nfc device
+ *
+ * @dev: The nfc device to free
+ */
+static inline void nfc_free_device(struct nfc_dev *dev)
+{
+ put_device(&dev->dev);
+}
+
+int nfc_register_device(struct nfc_dev *dev);
+
+void nfc_unregister_device(struct nfc_dev *dev);
+
+/**
+ * nfc_set_parent_dev - set the parent device
+ *
+ * @nfc_dev: The nfc device whose parent is being set
+ * @dev: The parent device
+ */
+static inline void nfc_set_parent_dev(struct nfc_dev *nfc_dev,
+ struct device *dev)
+{
+ nfc_dev->dev.parent = dev;
+}
+
+/**
+ * nfc_set_drvdata - set driver specifc data
+ *
+ * @dev: The nfc device
+ * @data: Pointer to driver specifc data
+ */
+static inline void nfc_set_drvdata(struct nfc_dev *dev, void *data)
+{
+ dev_set_drvdata(&dev->dev, data);
+}
+
+/**
+ * nfc_get_drvdata - get driver specifc data
+ *
+ * @dev: The nfc device
+ */
+static inline void *nfc_get_drvdata(const struct nfc_dev *dev)
+{
+ return dev_get_drvdata(&dev->dev);
+}
+
+/**
+ * nfc_device_name - get the nfc device name
+ *
+ * @dev: The nfc device whose name to return
+ */
+static inline const char *nfc_device_name(const struct nfc_dev *dev)
+{
+ return dev_name(&dev->dev);
+}
+
+struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
+ unsigned int flags, unsigned int size,
+ unsigned int *err);
+struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp);
+
+int nfc_set_remote_general_bytes(struct nfc_dev *dev,
+ const u8 *gt, u8 gt_len);
+u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len);
+
+int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
+ u32 result);
+
+int nfc_targets_found(struct nfc_dev *dev,
+ struct nfc_target *targets, int ntargets);
+int nfc_target_lost(struct nfc_dev *dev, u32 target_idx);
+
+int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
+ u8 comm_mode, u8 rf_mode);
+
+int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode,
+ const u8 *gb, size_t gb_len);
+int nfc_tm_deactivated(struct nfc_dev *dev);
+int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb);
+
+void nfc_driver_failure(struct nfc_dev *dev, int err);
+
+int nfc_se_transaction(struct nfc_dev *dev, u8 se_idx,
+ struct nfc_evt_transaction *evt_transaction);
+int nfc_se_connectivity(struct nfc_dev *dev, u8 se_idx);
+int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type);
+int nfc_remove_se(struct nfc_dev *dev, u32 se_idx);
+struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx);
+
+void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
+ u8 payload_type, u8 direction);
+
+static inline int nfc_set_vendor_cmds(struct nfc_dev *dev,
+ const struct nfc_vendor_cmd *cmds,
+ int n_cmds)
+{
+ if (dev->vendor_cmds || dev->n_vendor_cmds)
+ return -EINVAL;
+
+ dev->vendor_cmds = cmds;
+ dev->n_vendor_cmds = n_cmds;
+
+ return 0;
+}
+
+struct sk_buff *__nfc_alloc_vendor_cmd_reply_skb(struct nfc_dev *dev,
+ enum nfc_attrs attr,
+ u32 oui, u32 subcmd,
+ int approxlen);
+int nfc_vendor_cmd_reply(struct sk_buff *skb);
+
+/**
+ * nfc_vendor_cmd_alloc_reply_skb - allocate vendor command reply
+ * @dev: nfc device
+ * @oui: vendor oui
+ * @approxlen: an upper bound of the length of the data that will
+ * be put into the skb
+ *
+ * This function allocates and pre-fills an skb for a reply to
+ * a vendor command. Since it is intended for a reply, calling
+ * it outside of a vendor command's doit() operation is invalid.
+ *
+ * The returned skb is pre-filled with some identifying data in
+ * a way that any data that is put into the skb (with skb_put(),
+ * nla_put() or similar) will end up being within the
+ * %NFC_ATTR_VENDOR_DATA attribute, so all that needs to be done
+ * with the skb is adding data for the corresponding userspace tool
+ * which can then read that data out of the vendor data attribute.
+ * You must not modify the skb in any other way.
+ *
+ * When done, call nfc_vendor_cmd_reply() with the skb and return
+ * its error code as the result of the doit() operation.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+nfc_vendor_cmd_alloc_reply_skb(struct nfc_dev *dev,
+ u32 oui, u32 subcmd, int approxlen)
+{
+ return __nfc_alloc_vendor_cmd_reply_skb(dev,
+ NFC_ATTR_VENDOR_DATA,
+ oui,
+ subcmd, approxlen);
+}
+
+#endif /* __NET_NFC_H */
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
new file mode 100644
index 000000000..f5850b569
--- /dev/null
+++ b/include/net/nl802154.h
@@ -0,0 +1,451 @@
+#ifndef __NL802154_H
+#define __NL802154_H
+/*
+ * 802.15.4 netlink interface public header
+ *
+ * Copyright 2014 Alexander Aring <aar@pengutronix.de>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+
+#define NL802154_GENL_NAME "nl802154"
+
+enum nl802154_commands {
+/* don't change the order or add anything between, this is ABI! */
+/* currently we don't shipping this file via uapi, ignore the above one */
+ NL802154_CMD_UNSPEC,
+
+ NL802154_CMD_GET_WPAN_PHY, /* can dump */
+ NL802154_CMD_SET_WPAN_PHY,
+ NL802154_CMD_NEW_WPAN_PHY,
+ NL802154_CMD_DEL_WPAN_PHY,
+
+ NL802154_CMD_GET_INTERFACE, /* can dump */
+ NL802154_CMD_SET_INTERFACE,
+ NL802154_CMD_NEW_INTERFACE,
+ NL802154_CMD_DEL_INTERFACE,
+
+ NL802154_CMD_SET_CHANNEL,
+
+ NL802154_CMD_SET_PAN_ID,
+ NL802154_CMD_SET_SHORT_ADDR,
+
+ NL802154_CMD_SET_TX_POWER,
+ NL802154_CMD_SET_CCA_MODE,
+ NL802154_CMD_SET_CCA_ED_LEVEL,
+
+ NL802154_CMD_SET_MAX_FRAME_RETRIES,
+
+ NL802154_CMD_SET_BACKOFF_EXPONENT,
+ NL802154_CMD_SET_MAX_CSMA_BACKOFFS,
+
+ NL802154_CMD_SET_LBT_MODE,
+
+ NL802154_CMD_SET_ACKREQ_DEFAULT,
+
+ NL802154_CMD_SET_WPAN_PHY_NETNS,
+
+ NL802154_CMD_SET_SEC_PARAMS,
+ NL802154_CMD_GET_SEC_KEY, /* can dump */
+ NL802154_CMD_NEW_SEC_KEY,
+ NL802154_CMD_DEL_SEC_KEY,
+ NL802154_CMD_GET_SEC_DEV, /* can dump */
+ NL802154_CMD_NEW_SEC_DEV,
+ NL802154_CMD_DEL_SEC_DEV,
+ NL802154_CMD_GET_SEC_DEVKEY, /* can dump */
+ NL802154_CMD_NEW_SEC_DEVKEY,
+ NL802154_CMD_DEL_SEC_DEVKEY,
+ NL802154_CMD_GET_SEC_LEVEL, /* can dump */
+ NL802154_CMD_NEW_SEC_LEVEL,
+ NL802154_CMD_DEL_SEC_LEVEL,
+
+ /* add new commands above here */
+
+ /* used to define NL802154_CMD_MAX below */
+ __NL802154_CMD_AFTER_LAST,
+ NL802154_CMD_MAX = __NL802154_CMD_AFTER_LAST - 1
+};
+
+enum nl802154_attrs {
+/* don't change the order or add anything between, this is ABI! */
+/* currently we don't shipping this file via uapi, ignore the above one */
+ NL802154_ATTR_UNSPEC,
+
+ NL802154_ATTR_WPAN_PHY,
+ NL802154_ATTR_WPAN_PHY_NAME,
+
+ NL802154_ATTR_IFINDEX,
+ NL802154_ATTR_IFNAME,
+ NL802154_ATTR_IFTYPE,
+
+ NL802154_ATTR_WPAN_DEV,
+
+ NL802154_ATTR_PAGE,
+ NL802154_ATTR_CHANNEL,
+
+ NL802154_ATTR_PAN_ID,
+ NL802154_ATTR_SHORT_ADDR,
+
+ NL802154_ATTR_TX_POWER,
+
+ NL802154_ATTR_CCA_MODE,
+ NL802154_ATTR_CCA_OPT,
+ NL802154_ATTR_CCA_ED_LEVEL,
+
+ NL802154_ATTR_MAX_FRAME_RETRIES,
+
+ NL802154_ATTR_MAX_BE,
+ NL802154_ATTR_MIN_BE,
+ NL802154_ATTR_MAX_CSMA_BACKOFFS,
+
+ NL802154_ATTR_LBT_MODE,
+
+ NL802154_ATTR_GENERATION,
+
+ NL802154_ATTR_CHANNELS_SUPPORTED,
+ NL802154_ATTR_SUPPORTED_CHANNEL,
+
+ NL802154_ATTR_EXTENDED_ADDR,
+
+ NL802154_ATTR_WPAN_PHY_CAPS,
+
+ NL802154_ATTR_SUPPORTED_COMMANDS,
+
+ NL802154_ATTR_ACKREQ_DEFAULT,
+
+ NL802154_ATTR_PAD,
+
+ NL802154_ATTR_PID,
+ NL802154_ATTR_NETNS_FD,
+
+ /* add attributes here, update the policy in nl802154.c */
+
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+ NL802154_ATTR_SEC_ENABLED,
+ NL802154_ATTR_SEC_OUT_LEVEL,
+ NL802154_ATTR_SEC_OUT_KEY_ID,
+ NL802154_ATTR_SEC_FRAME_COUNTER,
+
+ NL802154_ATTR_SEC_LEVEL,
+ NL802154_ATTR_SEC_DEVICE,
+ NL802154_ATTR_SEC_DEVKEY,
+ NL802154_ATTR_SEC_KEY,
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
+ __NL802154_ATTR_AFTER_LAST,
+ NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1
+};
+
+enum nl802154_iftype {
+ NL802154_IFTYPE_UNSPEC = (~(__u32)0),
+
+ NL802154_IFTYPE_NODE = 0,
+ NL802154_IFTYPE_MONITOR,
+ NL802154_IFTYPE_COORD,
+
+ /* keep last */
+ NUM_NL802154_IFTYPES,
+ NL802154_IFTYPE_MAX = NUM_NL802154_IFTYPES - 1
+};
+
+/**
+ * enum nl802154_wpan_phy_capability_attr - wpan phy capability attributes
+ *
+ * @__NL802154_CAP_ATTR_INVALID: attribute number 0 is reserved
+ * @NL802154_CAP_ATTR_CHANNELS: a nested attribute for nl802154_channel_attr
+ * @NL802154_CAP_ATTR_TX_POWERS: a nested attribute for
+ * nl802154_wpan_phy_tx_power
+ * @NL802154_CAP_ATTR_MIN_CCA_ED_LEVEL: minimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maxmimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_CCA_MODES: nl802154_cca_modes flags
+ * @NL802154_CAP_ATTR_CCA_OPTS: nl802154_cca_opts flags
+ * @NL802154_CAP_ATTR_MIN_MINBE: minimum of minbe value
+ * @NL802154_CAP_ATTR_MAX_MINBE: maximum of minbe value
+ * @NL802154_CAP_ATTR_MIN_MAXBE: minimum of maxbe value
+ * @NL802154_CAP_ATTR_MAX_MINBE: maximum of maxbe value
+ * @NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS: minimum of csma backoff value
+ * @NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS: maximum of csma backoffs value
+ * @NL802154_CAP_ATTR_MIN_FRAME_RETRIES: minimum of frame retries value
+ * @NL802154_CAP_ATTR_MAX_FRAME_RETRIES: maximum of frame retries value
+ * @NL802154_CAP_ATTR_IFTYPES: nl802154_iftype flags
+ * @NL802154_CAP_ATTR_LBT: nl802154_supported_bool_states flags
+ * @NL802154_CAP_ATTR_MAX: highest cap attribute currently defined
+ * @__NL802154_CAP_ATTR_AFTER_LAST: internal use
+ */
+enum nl802154_wpan_phy_capability_attr {
+ __NL802154_CAP_ATTR_INVALID,
+
+ NL802154_CAP_ATTR_IFTYPES,
+
+ NL802154_CAP_ATTR_CHANNELS,
+ NL802154_CAP_ATTR_TX_POWERS,
+
+ NL802154_CAP_ATTR_CCA_ED_LEVELS,
+ NL802154_CAP_ATTR_CCA_MODES,
+ NL802154_CAP_ATTR_CCA_OPTS,
+
+ NL802154_CAP_ATTR_MIN_MINBE,
+ NL802154_CAP_ATTR_MAX_MINBE,
+
+ NL802154_CAP_ATTR_MIN_MAXBE,
+ NL802154_CAP_ATTR_MAX_MAXBE,
+
+ NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS,
+ NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS,
+
+ NL802154_CAP_ATTR_MIN_FRAME_RETRIES,
+ NL802154_CAP_ATTR_MAX_FRAME_RETRIES,
+
+ NL802154_CAP_ATTR_LBT,
+
+ /* keep last */
+ __NL802154_CAP_ATTR_AFTER_LAST,
+ NL802154_CAP_ATTR_MAX = __NL802154_CAP_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum nl802154_cca_modes - cca modes
+ *
+ * @__NL802154_CCA_INVALID: cca mode number 0 is reserved
+ * @NL802154_CCA_ENERGY: Energy above threshold
+ * @NL802154_CCA_CARRIER: Carrier sense only
+ * @NL802154_CCA_ENERGY_CARRIER: Carrier sense with energy above threshold
+ * @NL802154_CCA_ALOHA: CCA shall always report an idle medium
+ * @NL802154_CCA_UWB_SHR: UWB preamble sense based on the SHR of a frame
+ * @NL802154_CCA_UWB_MULTIPLEXED: UWB preamble sense based on the packet with
+ * the multiplexed preamble
+ * @__NL802154_CCA_ATTR_AFTER_LAST: Internal
+ * @NL802154_CCA_ATTR_MAX: Maximum CCA attribute number
+ */
+enum nl802154_cca_modes {
+ __NL802154_CCA_INVALID,
+ NL802154_CCA_ENERGY,
+ NL802154_CCA_CARRIER,
+ NL802154_CCA_ENERGY_CARRIER,
+ NL802154_CCA_ALOHA,
+ NL802154_CCA_UWB_SHR,
+ NL802154_CCA_UWB_MULTIPLEXED,
+
+ /* keep last */
+ __NL802154_CCA_ATTR_AFTER_LAST,
+ NL802154_CCA_ATTR_MAX = __NL802154_CCA_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum nl802154_cca_opts - additional options for cca modes
+ *
+ * @NL802154_CCA_OPT_ENERGY_CARRIER_OR: NL802154_CCA_ENERGY_CARRIER with OR
+ * @NL802154_CCA_OPT_ENERGY_CARRIER_AND: NL802154_CCA_ENERGY_CARRIER with AND
+ */
+enum nl802154_cca_opts {
+ NL802154_CCA_OPT_ENERGY_CARRIER_AND,
+ NL802154_CCA_OPT_ENERGY_CARRIER_OR,
+
+ /* keep last */
+ __NL802154_CCA_OPT_ATTR_AFTER_LAST,
+ NL802154_CCA_OPT_ATTR_MAX = __NL802154_CCA_OPT_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum nl802154_supported_bool_states - bool states for bool capability entry
+ *
+ * @NL802154_SUPPORTED_BOOL_FALSE: indicates to set false
+ * @NL802154_SUPPORTED_BOOL_TRUE: indicates to set true
+ * @__NL802154_SUPPORTED_BOOL_INVALD: reserved
+ * @NL802154_SUPPORTED_BOOL_BOTH: indicates to set true and false
+ * @__NL802154_SUPPORTED_BOOL_AFTER_LAST: Internal
+ * @NL802154_SUPPORTED_BOOL_MAX: highest value for bool states
+ */
+enum nl802154_supported_bool_states {
+ NL802154_SUPPORTED_BOOL_FALSE,
+ NL802154_SUPPORTED_BOOL_TRUE,
+ /* to handle them in a mask */
+ __NL802154_SUPPORTED_BOOL_INVALD,
+ NL802154_SUPPORTED_BOOL_BOTH,
+
+ /* keep last */
+ __NL802154_SUPPORTED_BOOL_AFTER_LAST,
+ NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1
+};
+
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+
+enum nl802154_dev_addr_modes {
+ NL802154_DEV_ADDR_NONE,
+ __NL802154_DEV_ADDR_INVALID,
+ NL802154_DEV_ADDR_SHORT,
+ NL802154_DEV_ADDR_EXTENDED,
+
+ /* keep last */
+ __NL802154_DEV_ADDR_AFTER_LAST,
+ NL802154_DEV_ADDR_MAX = __NL802154_DEV_ADDR_AFTER_LAST - 1
+};
+
+enum nl802154_dev_addr_attrs {
+ NL802154_DEV_ADDR_ATTR_UNSPEC,
+
+ NL802154_DEV_ADDR_ATTR_PAN_ID,
+ NL802154_DEV_ADDR_ATTR_MODE,
+ NL802154_DEV_ADDR_ATTR_SHORT,
+ NL802154_DEV_ADDR_ATTR_EXTENDED,
+ NL802154_DEV_ADDR_ATTR_PAD,
+
+ /* keep last */
+ __NL802154_DEV_ADDR_ATTR_AFTER_LAST,
+ NL802154_DEV_ADDR_ATTR_MAX = __NL802154_DEV_ADDR_ATTR_AFTER_LAST - 1
+};
+
+enum nl802154_key_id_modes {
+ NL802154_KEY_ID_MODE_IMPLICIT,
+ NL802154_KEY_ID_MODE_INDEX,
+ NL802154_KEY_ID_MODE_INDEX_SHORT,
+ NL802154_KEY_ID_MODE_INDEX_EXTENDED,
+
+ /* keep last */
+ __NL802154_KEY_ID_MODE_AFTER_LAST,
+ NL802154_KEY_ID_MODE_MAX = __NL802154_KEY_ID_MODE_AFTER_LAST - 1
+};
+
+enum nl802154_key_id_attrs {
+ NL802154_KEY_ID_ATTR_UNSPEC,
+
+ NL802154_KEY_ID_ATTR_MODE,
+ NL802154_KEY_ID_ATTR_INDEX,
+ NL802154_KEY_ID_ATTR_IMPLICIT,
+ NL802154_KEY_ID_ATTR_SOURCE_SHORT,
+ NL802154_KEY_ID_ATTR_SOURCE_EXTENDED,
+ NL802154_KEY_ID_ATTR_PAD,
+
+ /* keep last */
+ __NL802154_KEY_ID_ATTR_AFTER_LAST,
+ NL802154_KEY_ID_ATTR_MAX = __NL802154_KEY_ID_ATTR_AFTER_LAST - 1
+};
+
+enum nl802154_seclevels {
+ NL802154_SECLEVEL_NONE,
+ NL802154_SECLEVEL_MIC32,
+ NL802154_SECLEVEL_MIC64,
+ NL802154_SECLEVEL_MIC128,
+ NL802154_SECLEVEL_ENC,
+ NL802154_SECLEVEL_ENC_MIC32,
+ NL802154_SECLEVEL_ENC_MIC64,
+ NL802154_SECLEVEL_ENC_MIC128,
+
+ /* keep last */
+ __NL802154_SECLEVEL_AFTER_LAST,
+ NL802154_SECLEVEL_MAX = __NL802154_SECLEVEL_AFTER_LAST - 1
+};
+
+enum nl802154_frames {
+ NL802154_FRAME_BEACON,
+ NL802154_FRAME_DATA,
+ NL802154_FRAME_ACK,
+ NL802154_FRAME_CMD,
+
+ /* keep last */
+ __NL802154_FRAME_AFTER_LAST,
+ NL802154_FRAME_MAX = __NL802154_FRAME_AFTER_LAST - 1
+};
+
+enum nl802154_cmd_frames {
+ __NL802154_CMD_FRAME_INVALID,
+ NL802154_CMD_FRAME_ASSOC_REQUEST,
+ NL802154_CMD_FRAME_ASSOC_RESPONSE,
+ NL802154_CMD_FRAME_DISASSOC_NOTIFY,
+ NL802154_CMD_FRAME_DATA_REQUEST,
+ NL802154_CMD_FRAME_PAN_ID_CONFLICT_NOTIFY,
+ NL802154_CMD_FRAME_ORPHAN_NOTIFY,
+ NL802154_CMD_FRAME_BEACON_REQUEST,
+ NL802154_CMD_FRAME_COORD_REALIGNMENT,
+ NL802154_CMD_FRAME_GTS_REQUEST,
+
+ /* keep last */
+ __NL802154_CMD_FRAME_AFTER_LAST,
+ NL802154_CMD_FRAME_MAX = __NL802154_CMD_FRAME_AFTER_LAST - 1
+};
+
+enum nl802154_seclevel_attrs {
+ NL802154_SECLEVEL_ATTR_UNSPEC,
+
+ NL802154_SECLEVEL_ATTR_LEVELS,
+ NL802154_SECLEVEL_ATTR_FRAME,
+ NL802154_SECLEVEL_ATTR_CMD_FRAME,
+ NL802154_SECLEVEL_ATTR_DEV_OVERRIDE,
+
+ /* keep last */
+ __NL802154_SECLEVEL_ATTR_AFTER_LAST,
+ NL802154_SECLEVEL_ATTR_MAX = __NL802154_SECLEVEL_ATTR_AFTER_LAST - 1
+};
+
+/* TODO what is this? couldn't find in mib */
+enum {
+ NL802154_DEVKEY_IGNORE,
+ NL802154_DEVKEY_RESTRICT,
+ NL802154_DEVKEY_RECORD,
+
+ /* keep last */
+ __NL802154_DEVKEY_AFTER_LAST,
+ NL802154_DEVKEY_MAX = __NL802154_DEVKEY_AFTER_LAST - 1
+};
+
+enum nl802154_dev {
+ NL802154_DEV_ATTR_UNSPEC,
+
+ NL802154_DEV_ATTR_FRAME_COUNTER,
+ NL802154_DEV_ATTR_PAN_ID,
+ NL802154_DEV_ATTR_SHORT_ADDR,
+ NL802154_DEV_ATTR_EXTENDED_ADDR,
+ NL802154_DEV_ATTR_SECLEVEL_EXEMPT,
+ NL802154_DEV_ATTR_KEY_MODE,
+ NL802154_DEV_ATTR_PAD,
+
+ /* keep last */
+ __NL802154_DEV_ATTR_AFTER_LAST,
+ NL802154_DEV_ATTR_MAX = __NL802154_DEV_ATTR_AFTER_LAST - 1
+};
+
+enum nl802154_devkey {
+ NL802154_DEVKEY_ATTR_UNSPEC,
+
+ NL802154_DEVKEY_ATTR_FRAME_COUNTER,
+ NL802154_DEVKEY_ATTR_EXTENDED_ADDR,
+ NL802154_DEVKEY_ATTR_ID,
+ NL802154_DEVKEY_ATTR_PAD,
+
+ /* keep last */
+ __NL802154_DEVKEY_ATTR_AFTER_LAST,
+ NL802154_DEVKEY_ATTR_MAX = __NL802154_DEVKEY_ATTR_AFTER_LAST - 1
+};
+
+enum nl802154_key {
+ NL802154_KEY_ATTR_UNSPEC,
+
+ NL802154_KEY_ATTR_ID,
+ NL802154_KEY_ATTR_USAGE_FRAMES,
+ NL802154_KEY_ATTR_USAGE_CMDS,
+ NL802154_KEY_ATTR_BYTES,
+
+ /* keep last */
+ __NL802154_KEY_ATTR_AFTER_LAST,
+ NL802154_KEY_ATTR_MAX = __NL802154_KEY_ATTR_AFTER_LAST - 1
+};
+
+#define NL802154_KEY_SIZE 16
+#define NL802154_CMD_FRAME_NR_IDS 256
+
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
+#endif /* __NL802154_H */
diff --git a/include/net/nsh.h b/include/net/nsh.h
new file mode 100644
index 000000000..350b1ad11
--- /dev/null
+++ b/include/net/nsh.h
@@ -0,0 +1,310 @@
+#ifndef __NET_NSH_H
+#define __NET_NSH_H 1
+
+#include <linux/skbuff.h>
+
+/*
+ * Network Service Header:
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Ver|O|U| TTL | Length |U|U|U|U|MD Type| Next Protocol |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Service Path Identifier (SPI) | Service Index |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * ~ Mandatory/Optional Context Headers ~
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Version: The version field is used to ensure backward compatibility
+ * going forward with future NSH specification updates. It MUST be set
+ * to 0x0 by the sender, in this first revision of NSH. Given the
+ * widespread implementation of existing hardware that uses the first
+ * nibble after an MPLS label stack for ECMP decision processing, this
+ * document reserves version 01b and this value MUST NOT be used in
+ * future versions of the protocol. Please see [RFC7325] for further
+ * discussion of MPLS-related forwarding requirements.
+ *
+ * O bit: Setting this bit indicates an Operations, Administration, and
+ * Maintenance (OAM) packet. The actual format and processing of SFC
+ * OAM packets is outside the scope of this specification (see for
+ * example [I-D.ietf-sfc-oam-framework] for one approach).
+ *
+ * The O bit MUST be set for OAM packets and MUST NOT be set for non-OAM
+ * packets. The O bit MUST NOT be modified along the SFP.
+ *
+ * SF/SFF/SFC Proxy/Classifier implementations that do not support SFC
+ * OAM procedures SHOULD discard packets with O bit set, but MAY support
+ * a configurable parameter to enable forwarding received SFC OAM
+ * packets unmodified to the next element in the chain. Forwarding OAM
+ * packets unmodified by SFC elements that do not support SFC OAM
+ * procedures may be acceptable for a subset of OAM functions, but can
+ * result in unexpected outcomes for others, thus it is recommended to
+ * analyze the impact of forwarding an OAM packet for all OAM functions
+ * prior to enabling this behavior. The configurable parameter MUST be
+ * disabled by default.
+ *
+ * TTL: Indicates the maximum SFF hops for an SFP. This field is used
+ * for service plane loop detection. The initial TTL value SHOULD be
+ * configurable via the control plane; the configured initial value can
+ * be specific to one or more SFPs. If no initial value is explicitly
+ * provided, the default initial TTL value of 63 MUST be used. Each SFF
+ * involved in forwarding an NSH packet MUST decrement the TTL value by
+ * 1 prior to NSH forwarding lookup. Decrementing by 1 from an incoming
+ * value of 0 shall result in a TTL value of 63. The packet MUST NOT be
+ * forwarded if TTL is, after decrement, 0.
+ *
+ * All other flag fields, marked U, are unassigned and available for
+ * future use, see Section 11.2.1. Unassigned bits MUST be set to zero
+ * upon origination, and MUST be ignored and preserved unmodified by
+ * other NSH supporting elements. Elements which do not understand the
+ * meaning of any of these bits MUST NOT modify their actions based on
+ * those unknown bits.
+ *
+ * Length: The total length, in 4-byte words, of NSH including the Base
+ * Header, the Service Path Header, the Fixed Length Context Header or
+ * Variable Length Context Header(s). The length MUST be 0x6 for MD
+ * Type equal to 0x1, and MUST be 0x2 or greater for MD Type equal to
+ * 0x2. The length of the NSH header MUST be an integer multiple of 4
+ * bytes, thus variable length metadata is always padded out to a
+ * multiple of 4 bytes.
+ *
+ * MD Type: Indicates the format of NSH beyond the mandatory Base Header
+ * and the Service Path Header. MD Type defines the format of the
+ * metadata being carried.
+ *
+ * 0x0 - This is a reserved value. Implementations SHOULD silently
+ * discard packets with MD Type 0x0.
+ *
+ * 0x1 - This indicates that the format of the header includes a fixed
+ * length Context Header (see Figure 4 below).
+ *
+ * 0x2 - This does not mandate any headers beyond the Base Header and
+ * Service Path Header, but may contain optional variable length Context
+ * Header(s). The semantics of the variable length Context Header(s)
+ * are not defined in this document. The format of the optional
+ * variable length Context Headers is provided in Section 2.5.1.
+ *
+ * 0xF - This value is reserved for experimentation and testing, as per
+ * [RFC3692]. Implementations not explicitly configured to be part of
+ * an experiment SHOULD silently discard packets with MD Type 0xF.
+ *
+ * Next Protocol: indicates the protocol type of the encapsulated data.
+ * NSH does not alter the inner payload, and the semantics on the inner
+ * protocol remain unchanged due to NSH service function chaining.
+ * Please see the IANA Considerations section below, Section 11.2.5.
+ *
+ * This document defines the following Next Protocol values:
+ *
+ * 0x1: IPv4
+ * 0x2: IPv6
+ * 0x3: Ethernet
+ * 0x4: NSH
+ * 0x5: MPLS
+ * 0xFE: Experiment 1
+ * 0xFF: Experiment 2
+ *
+ * Packets with Next Protocol values not supported SHOULD be silently
+ * dropped by default, although an implementation MAY provide a
+ * configuration parameter to forward them. Additionally, an
+ * implementation not explicitly configured for a specific experiment
+ * [RFC3692] SHOULD silently drop packets with Next Protocol values 0xFE
+ * and 0xFF.
+ *
+ * Service Path Identifier (SPI): Identifies a service path.
+ * Participating nodes MUST use this identifier for Service Function
+ * Path selection. The initial classifier MUST set the appropriate SPI
+ * for a given classification result.
+ *
+ * Service Index (SI): Provides location within the SFP. The initial
+ * classifier for a given SFP SHOULD set the SI to 255, however the
+ * control plane MAY configure the initial value of SI as appropriate
+ * (i.e., taking into account the length of the service function path).
+ * The Service Index MUST be decremented by a value of 1 by Service
+ * Functions or by SFC Proxy nodes after performing required services
+ * and the new decremented SI value MUST be used in the egress packet's
+ * NSH. The initial Classifier MUST send the packet to the first SFF in
+ * the identified SFP for forwarding along an SFP. If re-classification
+ * occurs, and that re-classification results in a new SPI, the
+ * (re)classifier is, in effect, the initial classifier for the
+ * resultant SPI.
+ *
+ * The SI is used in conjunction the with Service Path Identifier for
+ * Service Function Path Selection and for determining the next SFF/SF
+ * in the path. The SI is also valuable when troubleshooting or
+ * reporting service paths. Additionally, while the TTL field is the
+ * main mechanism for service plane loop detection, the SI can also be
+ * used for detecting service plane loops.
+ *
+ * When the Base Header specifies MD Type = 0x1, a Fixed Length Context
+ * Header (16-bytes) MUST be present immediately following the Service
+ * Path Header. The value of a Fixed Length Context
+ * Header that carries no metadata MUST be set to zero.
+ *
+ * When the base header specifies MD Type = 0x2, zero or more Variable
+ * Length Context Headers MAY be added, immediately following the
+ * Service Path Header (see Figure 5). Therefore, Length = 0x2,
+ * indicates that only the Base Header followed by the Service Path
+ * Header are present. The optional Variable Length Context Headers
+ * MUST be of an integer number of 4-bytes. The base header Length
+ * field MUST be used to determine the offset to locate the original
+ * packet or frame for SFC nodes that require access to that
+ * information.
+ *
+ * The format of the optional variable length Context Headers
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Metadata Class | Type |U| Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Variable Metadata |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Metadata Class (MD Class): Defines the scope of the 'Type' field to
+ * provide a hierarchical namespace. The IANA Considerations
+ * Section 11.2.4 defines how the MD Class values can be allocated to
+ * standards bodies, vendors, and others.
+ *
+ * Type: Indicates the explicit type of metadata being carried. The
+ * definition of the Type is the responsibility of the MD Class owner.
+ *
+ * Unassigned bit: One unassigned bit is available for future use. This
+ * bit MUST NOT be set, and MUST be ignored on receipt.
+ *
+ * Length: Indicates the length of the variable metadata, in bytes. In
+ * case the metadata length is not an integer number of 4-byte words,
+ * the sender MUST add pad bytes immediately following the last metadata
+ * byte to extend the metadata to an integer number of 4-byte words.
+ * The receiver MUST round up the length field to the nearest 4-byte
+ * word boundary, to locate and process the next field in the packet.
+ * The receiver MUST access only those bytes in the metadata indicated
+ * by the length field (i.e., actual number of bytes) and MUST ignore
+ * the remaining bytes up to the nearest 4-byte word boundary. The
+ * Length may be 0 or greater.
+ *
+ * A value of 0 denotes a Context Header without a Variable Metadata
+ * field.
+ *
+ * [0] https://datatracker.ietf.org/doc/draft-ietf-sfc-nsh/
+ */
+
+/**
+ * struct nsh_md1_ctx - Keeps track of NSH context data
+ * @nshc<1-4>: NSH Contexts.
+ */
+struct nsh_md1_ctx {
+ __be32 context[4];
+};
+
+struct nsh_md2_tlv {
+ __be16 md_class;
+ u8 type;
+ u8 length;
+ u8 md_value[];
+};
+
+struct nshhdr {
+ __be16 ver_flags_ttl_len;
+ u8 mdtype;
+ u8 np;
+ __be32 path_hdr;
+ union {
+ struct nsh_md1_ctx md1;
+ struct nsh_md2_tlv md2;
+ };
+};
+
+/* Masking NSH header fields. */
+#define NSH_VER_MASK 0xc000
+#define NSH_VER_SHIFT 14
+#define NSH_FLAGS_MASK 0x3000
+#define NSH_FLAGS_SHIFT 12
+#define NSH_TTL_MASK 0x0fc0
+#define NSH_TTL_SHIFT 6
+#define NSH_LEN_MASK 0x003f
+#define NSH_LEN_SHIFT 0
+
+#define NSH_MDTYPE_MASK 0x0f
+#define NSH_MDTYPE_SHIFT 0
+
+#define NSH_SPI_MASK 0xffffff00
+#define NSH_SPI_SHIFT 8
+#define NSH_SI_MASK 0x000000ff
+#define NSH_SI_SHIFT 0
+
+/* MD Type Registry. */
+#define NSH_M_TYPE1 0x01
+#define NSH_M_TYPE2 0x02
+#define NSH_M_EXP1 0xFE
+#define NSH_M_EXP2 0xFF
+
+/* NSH Base Header Length */
+#define NSH_BASE_HDR_LEN 8
+
+/* NSH MD Type 1 header Length. */
+#define NSH_M_TYPE1_LEN 24
+
+/* NSH header maximum Length. */
+#define NSH_HDR_MAX_LEN 256
+
+/* NSH context headers maximum Length. */
+#define NSH_CTX_HDRS_MAX_LEN 248
+
+static inline struct nshhdr *nsh_hdr(struct sk_buff *skb)
+{
+ return (struct nshhdr *)skb_network_header(skb);
+}
+
+static inline u16 nsh_hdr_len(const struct nshhdr *nsh)
+{
+ return ((ntohs(nsh->ver_flags_ttl_len) & NSH_LEN_MASK)
+ >> NSH_LEN_SHIFT) << 2;
+}
+
+static inline u8 nsh_get_ver(const struct nshhdr *nsh)
+{
+ return (ntohs(nsh->ver_flags_ttl_len) & NSH_VER_MASK)
+ >> NSH_VER_SHIFT;
+}
+
+static inline u8 nsh_get_flags(const struct nshhdr *nsh)
+{
+ return (ntohs(nsh->ver_flags_ttl_len) & NSH_FLAGS_MASK)
+ >> NSH_FLAGS_SHIFT;
+}
+
+static inline u8 nsh_get_ttl(const struct nshhdr *nsh)
+{
+ return (ntohs(nsh->ver_flags_ttl_len) & NSH_TTL_MASK)
+ >> NSH_TTL_SHIFT;
+}
+
+static inline void __nsh_set_xflag(struct nshhdr *nsh, u16 xflag, u16 xmask)
+{
+ nsh->ver_flags_ttl_len
+ = (nsh->ver_flags_ttl_len & ~htons(xmask)) | htons(xflag);
+}
+
+static inline void nsh_set_flags_and_ttl(struct nshhdr *nsh, u8 flags, u8 ttl)
+{
+ __nsh_set_xflag(nsh, ((flags << NSH_FLAGS_SHIFT) & NSH_FLAGS_MASK) |
+ ((ttl << NSH_TTL_SHIFT) & NSH_TTL_MASK),
+ NSH_FLAGS_MASK | NSH_TTL_MASK);
+}
+
+static inline void nsh_set_flags_ttl_len(struct nshhdr *nsh, u8 flags,
+ u8 ttl, u8 len)
+{
+ len = len >> 2;
+ __nsh_set_xflag(nsh, ((flags << NSH_FLAGS_SHIFT) & NSH_FLAGS_MASK) |
+ ((ttl << NSH_TTL_SHIFT) & NSH_TTL_MASK) |
+ ((len << NSH_LEN_SHIFT) & NSH_LEN_MASK),
+ NSH_FLAGS_MASK | NSH_TTL_MASK | NSH_LEN_MASK);
+}
+
+int nsh_push(struct sk_buff *skb, const struct nshhdr *pushed_nh);
+int nsh_pop(struct sk_buff *skb);
+
+#endif /* __NET_NSH_H */
diff --git a/include/net/p8022.h b/include/net/p8022.h
new file mode 100644
index 000000000..b690ffcad
--- /dev/null
+++ b/include/net/p8022.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_P8022_H
+#define _NET_P8022_H
+
+struct net_device;
+struct packet_type;
+struct sk_buff;
+
+struct datalink_proto *
+register_8022_client(unsigned char type,
+ int (*func)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt,
+ struct net_device *orig_dev));
+void unregister_8022_client(struct datalink_proto *proto);
+
+struct datalink_proto *make_8023_client(void);
+void destroy_8023_client(struct datalink_proto *dl);
+#endif
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
new file mode 100644
index 000000000..ad0bafc87
--- /dev/null
+++ b/include/net/page_pool.h
@@ -0,0 +1,386 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * page_pool.h
+ * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
+ * Copyright (C) 2016 Red Hat, Inc.
+ */
+
+/**
+ * DOC: page_pool allocator
+ *
+ * This page_pool allocator is optimized for the XDP mode that
+ * uses one-frame-per-page, but have fallbacks that act like the
+ * regular page allocator APIs.
+ *
+ * Basic use involve replacing alloc_pages() calls with the
+ * page_pool_alloc_pages() call. Drivers should likely use
+ * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
+ *
+ * API keeps track of in-flight pages, in-order to let API user know
+ * when it is safe to dealloactor page_pool object. Thus, API users
+ * must make sure to call page_pool_release_page() when a page is
+ * "leaving" the page_pool. Or call page_pool_put_page() where
+ * appropiate. For maintaining correct accounting.
+ *
+ * API user must only call page_pool_put_page() once on a page, as it
+ * will either recycle the page, or in case of elevated refcnt, it
+ * will release the DMA mapping and in-flight state accounting. We
+ * hope to lift this requirement in the future.
+ */
+#ifndef _NET_PAGE_POOL_H
+#define _NET_PAGE_POOL_H
+
+#include <linux/mm.h> /* Needed by ptr_ring */
+#include <linux/ptr_ring.h>
+#include <linux/dma-direction.h>
+
+#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
+ * map/unmap
+ */
+#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
+ * from page_pool will be
+ * DMA-synced-for-device according to
+ * the length provided by the device
+ * driver.
+ * Please note DMA-sync-for-CPU is still
+ * device driver responsibility
+ */
+#define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */
+#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
+ PP_FLAG_DMA_SYNC_DEV |\
+ PP_FLAG_PAGE_FRAG)
+
+/*
+ * Fast allocation side cache array/stack
+ *
+ * The cache size and refill watermark is related to the network
+ * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
+ * ring is usually refilled and the max consumed elements will be 64,
+ * thus a natural max size of objects needed in the cache.
+ *
+ * Keeping room for more objects, is due to XDP_DROP use-case. As
+ * XDP_DROP allows the opportunity to recycle objects directly into
+ * this array, as it shares the same softirq/NAPI protection. If
+ * cache is already full (or partly full) then the XDP_DROP recycles
+ * would have to take a slower code path.
+ */
+#define PP_ALLOC_CACHE_SIZE 128
+#define PP_ALLOC_CACHE_REFILL 64
+struct pp_alloc_cache {
+ u32 count;
+ struct page *cache[PP_ALLOC_CACHE_SIZE];
+};
+
+struct page_pool_params {
+ unsigned int flags;
+ unsigned int order;
+ unsigned int pool_size;
+ int nid; /* Numa node id to allocate from pages from */
+ struct device *dev; /* device, for DMA pre-mapping purposes */
+ enum dma_data_direction dma_dir; /* DMA mapping direction */
+ unsigned int max_len; /* max DMA sync memory size */
+ unsigned int offset; /* DMA addr offset */
+ void (*init_callback)(struct page *page, void *arg);
+ void *init_arg;
+};
+
+#ifdef CONFIG_PAGE_POOL_STATS
+struct page_pool_alloc_stats {
+ u64 fast; /* fast path allocations */
+ u64 slow; /* slow-path order 0 allocations */
+ u64 slow_high_order; /* slow-path high order allocations */
+ u64 empty; /* failed refills due to empty ptr ring, forcing
+ * slow path allocation
+ */
+ u64 refill; /* allocations via successful refill */
+ u64 waive; /* failed refills due to numa zone mismatch */
+};
+
+struct page_pool_recycle_stats {
+ u64 cached; /* recycling placed page in the cache. */
+ u64 cache_full; /* cache was full */
+ u64 ring; /* recycling placed page back into ptr ring */
+ u64 ring_full; /* page was released from page-pool because
+ * PTR ring was full.
+ */
+ u64 released_refcnt; /* page released because of elevated
+ * refcnt
+ */
+};
+
+/* This struct wraps the above stats structs so users of the
+ * page_pool_get_stats API can pass a single argument when requesting the
+ * stats for the page pool.
+ */
+struct page_pool_stats {
+ struct page_pool_alloc_stats alloc_stats;
+ struct page_pool_recycle_stats recycle_stats;
+};
+
+int page_pool_ethtool_stats_get_count(void);
+u8 *page_pool_ethtool_stats_get_strings(u8 *data);
+u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
+
+/*
+ * Drivers that wish to harvest page pool stats and report them to users
+ * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
+ * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
+ */
+bool page_pool_get_stats(struct page_pool *pool,
+ struct page_pool_stats *stats);
+#else
+
+static inline int page_pool_ethtool_stats_get_count(void)
+{
+ return 0;
+}
+
+static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
+{
+ return data;
+}
+
+static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
+{
+ return data;
+}
+
+#endif
+
+struct page_pool {
+ struct page_pool_params p;
+
+ struct delayed_work release_dw;
+ void (*disconnect)(void *);
+ unsigned long defer_start;
+ unsigned long defer_warn;
+
+ u32 pages_state_hold_cnt;
+ unsigned int frag_offset;
+ struct page *frag_page;
+ long frag_users;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+ /* these stats are incremented while in softirq context */
+ struct page_pool_alloc_stats alloc_stats;
+#endif
+ u32 xdp_mem_id;
+
+ /*
+ * Data structure for allocation side
+ *
+ * Drivers allocation side usually already perform some kind
+ * of resource protection. Piggyback on this protection, and
+ * require driver to protect allocation side.
+ *
+ * For NIC drivers this means, allocate a page_pool per
+ * RX-queue. As the RX-queue is already protected by
+ * Softirq/BH scheduling and napi_schedule. NAPI schedule
+ * guarantee that a single napi_struct will only be scheduled
+ * on a single CPU (see napi_schedule).
+ */
+ struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
+
+ /* Data structure for storing recycled pages.
+ *
+ * Returning/freeing pages is more complicated synchronization
+ * wise, because free's can happen on remote CPUs, with no
+ * association with allocation resource.
+ *
+ * Use ptr_ring, as it separates consumer and producer
+ * effeciently, it a way that doesn't bounce cache-lines.
+ *
+ * TODO: Implement bulk return pages into this structure.
+ */
+ struct ptr_ring ring;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+ /* recycle stats are per-cpu to avoid locking */
+ struct page_pool_recycle_stats __percpu *recycle_stats;
+#endif
+ atomic_t pages_state_release_cnt;
+
+ /* A page_pool is strictly tied to a single RX-queue being
+ * protected by NAPI, due to above pp_alloc_cache. This
+ * refcnt serves purpose is to simplify drivers error handling.
+ */
+ refcount_t user_cnt;
+
+ u64 destroy_cnt;
+};
+
+struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
+
+static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
+{
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ return page_pool_alloc_pages(pool, gfp);
+}
+
+struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
+ unsigned int size, gfp_t gfp);
+
+static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int size)
+{
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ return page_pool_alloc_frag(pool, offset, size, gfp);
+}
+
+/* get the stored dma direction. A driver might decide to treat this locally and
+ * avoid the extra cache line from page_pool to determine the direction
+ */
+static
+inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
+{
+ return pool->p.dma_dir;
+}
+
+bool page_pool_return_skb_page(struct page *page);
+
+struct page_pool *page_pool_create(const struct page_pool_params *params);
+
+struct xdp_mem_info;
+
+#ifdef CONFIG_PAGE_POOL
+void page_pool_destroy(struct page_pool *pool);
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
+ struct xdp_mem_info *mem);
+void page_pool_release_page(struct page_pool *pool, struct page *page);
+void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+ int count);
+#else
+static inline void page_pool_destroy(struct page_pool *pool)
+{
+}
+
+static inline void page_pool_use_xdp_mem(struct page_pool *pool,
+ void (*disconnect)(void *),
+ struct xdp_mem_info *mem)
+{
+}
+static inline void page_pool_release_page(struct page_pool *pool,
+ struct page *page)
+{
+}
+
+static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+ int count)
+{
+}
+#endif
+
+void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size,
+ bool allow_direct);
+
+static inline void page_pool_fragment_page(struct page *page, long nr)
+{
+ atomic_long_set(&page->pp_frag_count, nr);
+}
+
+static inline long page_pool_defrag_page(struct page *page, long nr)
+{
+ long ret;
+
+ /* If nr == pp_frag_count then we have cleared all remaining
+ * references to the page. No need to actually overwrite it, instead
+ * we can leave this to be overwritten by the calling function.
+ *
+ * The main advantage to doing this is that an atomic_read is
+ * generally a much cheaper operation than an atomic update,
+ * especially when dealing with a page that may be partitioned
+ * into only 2 or 3 pieces.
+ */
+ if (atomic_long_read(&page->pp_frag_count) == nr)
+ return 0;
+
+ ret = atomic_long_sub_return(nr, &page->pp_frag_count);
+ WARN_ON(ret < 0);
+ return ret;
+}
+
+static inline bool page_pool_is_last_frag(struct page_pool *pool,
+ struct page *page)
+{
+ /* If fragments aren't enabled or count is 0 we were the last user */
+ return !(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
+ (page_pool_defrag_page(page, 1) == 0);
+}
+
+static inline void page_pool_put_page(struct page_pool *pool,
+ struct page *page,
+ unsigned int dma_sync_size,
+ bool allow_direct)
+{
+ /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
+ * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
+ */
+#ifdef CONFIG_PAGE_POOL
+ if (!page_pool_is_last_frag(pool, page))
+ return;
+
+ page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
+#endif
+}
+
+/* Same as above but will try to sync the entire area pool->max_len */
+static inline void page_pool_put_full_page(struct page_pool *pool,
+ struct page *page, bool allow_direct)
+{
+ page_pool_put_page(pool, page, -1, allow_direct);
+}
+
+/* Same as above but the caller must guarantee safe context. e.g NAPI */
+static inline void page_pool_recycle_direct(struct page_pool *pool,
+ struct page *page)
+{
+ page_pool_put_full_page(pool, page, true);
+}
+
+#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
+ (sizeof(dma_addr_t) > sizeof(unsigned long))
+
+static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
+{
+ dma_addr_t ret = page->dma_addr;
+
+ if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
+ ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
+
+ return ret;
+}
+
+static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+{
+ page->dma_addr = addr;
+ if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
+ page->dma_addr_upper = upper_32_bits(addr);
+}
+
+static inline bool is_page_pool_compiled_in(void)
+{
+#ifdef CONFIG_PAGE_POOL
+ return true;
+#else
+ return false;
+#endif
+}
+
+static inline bool page_pool_put(struct page_pool *pool)
+{
+ return refcount_dec_and_test(&pool->user_cnt);
+}
+
+/* Caller must provide appropriate safe context, e.g. NAPI. */
+void page_pool_update_nid(struct page_pool *pool, int new_nid);
+static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
+{
+ if (unlikely(pool->p.nid != new_nid))
+ page_pool_update_nid(pool, new_nid);
+}
+
+#endif /* _NET_PAGE_POOL_H */
diff --git a/include/net/phonet/gprs.h b/include/net/phonet/gprs.h
new file mode 100644
index 000000000..6b7f57b2f
--- /dev/null
+++ b/include/net/phonet/gprs.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * File: pep_gprs.h
+ *
+ * GPRS over Phonet pipe end point socket
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * Author: Rémi Denis-Courmont
+ */
+
+#ifndef NET_PHONET_GPRS_H
+#define NET_PHONET_GPRS_H
+
+struct sock;
+struct sk_buff;
+
+int pep_writeable(struct sock *sk);
+int pep_write(struct sock *sk, struct sk_buff *skb);
+struct sk_buff *pep_read(struct sock *sk);
+
+int gprs_attach(struct sock *sk);
+void gprs_detach(struct sock *sk);
+
+#endif
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
new file mode 100644
index 000000000..645dddf5c
--- /dev/null
+++ b/include/net/phonet/pep.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * File: pep.h
+ *
+ * Phonet Pipe End Point sockets definitions
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ */
+
+#ifndef NET_PHONET_PEP_H
+#define NET_PHONET_PEP_H
+
+#include <linux/skbuff.h>
+#include <net/phonet/phonet.h>
+
+struct pep_sock {
+ struct pn_sock pn_sk;
+
+ /* XXX: union-ify listening vs connected stuff ? */
+ /* Listening socket stuff: */
+ struct hlist_head hlist;
+
+ /* Connected socket stuff: */
+ struct sock *listener;
+ struct sk_buff_head ctrlreq_queue;
+#define PNPIPE_CTRLREQ_MAX 10
+ atomic_t tx_credits;
+ int ifindex;
+ u16 peer_type; /* peer type/subtype */
+ u8 pipe_handle;
+
+ u8 rx_credits;
+ u8 rx_fc; /* RX flow control */
+ u8 tx_fc; /* TX flow control */
+ u8 init_enable; /* auto-enable at creation */
+ u8 aligned;
+};
+
+static inline struct pep_sock *pep_sk(struct sock *sk)
+{
+ return (struct pep_sock *)sk;
+}
+
+extern const struct proto_ops phonet_stream_ops;
+
+/* Pipe protocol definitions */
+struct pnpipehdr {
+ u8 utid; /* transaction ID */
+ u8 message_id;
+ u8 pipe_handle;
+ union {
+ u8 state_after_connect; /* connect request */
+ u8 state_after_reset; /* reset request */
+ u8 error_code; /* any response */
+ u8 pep_type; /* status indication */
+ u8 data0; /* anything else */
+ };
+ u8 data[];
+};
+#define other_pep_type data[0]
+
+static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb)
+{
+ return (struct pnpipehdr *)skb_transport_header(skb);
+}
+
+#define MAX_PNPIPE_HEADER (MAX_PHONET_HEADER + 4)
+
+enum {
+ PNS_PIPE_CREATE_REQ = 0x00,
+ PNS_PIPE_CREATE_RESP,
+ PNS_PIPE_REMOVE_REQ,
+ PNS_PIPE_REMOVE_RESP,
+
+ PNS_PIPE_DATA = 0x20,
+ PNS_PIPE_ALIGNED_DATA,
+
+ PNS_PEP_CONNECT_REQ = 0x40,
+ PNS_PEP_CONNECT_RESP,
+ PNS_PEP_DISCONNECT_REQ,
+ PNS_PEP_DISCONNECT_RESP,
+ PNS_PEP_RESET_REQ,
+ PNS_PEP_RESET_RESP,
+ PNS_PEP_ENABLE_REQ,
+ PNS_PEP_ENABLE_RESP,
+ PNS_PEP_CTRL_REQ,
+ PNS_PEP_CTRL_RESP,
+ PNS_PEP_DISABLE_REQ = 0x4C,
+ PNS_PEP_DISABLE_RESP,
+
+ PNS_PEP_STATUS_IND = 0x60,
+ PNS_PIPE_CREATED_IND,
+ PNS_PIPE_RESET_IND = 0x63,
+ PNS_PIPE_ENABLED_IND,
+ PNS_PIPE_REDIRECTED_IND,
+ PNS_PIPE_DISABLED_IND = 0x66,
+};
+
+#define PN_PIPE_INVALID_HANDLE 0xff
+#define PN_PEP_TYPE_COMMON 0x00
+
+/* Phonet pipe status indication */
+enum {
+ PN_PEP_IND_FLOW_CONTROL,
+ PN_PEP_IND_ID_MCFC_GRANT_CREDITS,
+};
+
+/* Phonet pipe error codes */
+enum {
+ PN_PIPE_NO_ERROR,
+ PN_PIPE_ERR_INVALID_PARAM,
+ PN_PIPE_ERR_INVALID_HANDLE,
+ PN_PIPE_ERR_INVALID_CTRL_ID,
+ PN_PIPE_ERR_NOT_ALLOWED,
+ PN_PIPE_ERR_PEP_IN_USE,
+ PN_PIPE_ERR_OVERLOAD,
+ PN_PIPE_ERR_DEV_DISCONNECTED,
+ PN_PIPE_ERR_TIMEOUT,
+ PN_PIPE_ERR_ALL_PIPES_IN_USE,
+ PN_PIPE_ERR_GENERAL,
+ PN_PIPE_ERR_NOT_SUPPORTED,
+};
+
+/* Phonet pipe states */
+enum {
+ PN_PIPE_DISABLE,
+ PN_PIPE_ENABLE,
+};
+
+/* Phonet pipe sub-block types */
+enum {
+ PN_PIPE_SB_CREATE_REQ_PEP_SUB_TYPE,
+ PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE,
+ PN_PIPE_SB_REDIRECT_REQ_PEP_SUB_TYPE,
+ PN_PIPE_SB_NEGOTIATED_FC,
+ PN_PIPE_SB_REQUIRED_FC_TX,
+ PN_PIPE_SB_PREFERRED_FC_RX,
+ PN_PIPE_SB_ALIGNED_DATA,
+};
+
+/* Phonet pipe flow control models */
+enum {
+ PN_NO_FLOW_CONTROL,
+ PN_LEGACY_FLOW_CONTROL,
+ PN_ONE_CREDIT_FLOW_CONTROL,
+ PN_MULTI_CREDIT_FLOW_CONTROL,
+ PN_MAX_FLOW_CONTROL,
+};
+
+#define pn_flow_safe(fc) ((fc) >> 1)
+
+/* Phonet pipe flow control states */
+enum {
+ PEP_IND_EMPTY,
+ PEP_IND_BUSY,
+ PEP_IND_READY,
+};
+
+#endif
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
new file mode 100644
index 000000000..862f1719b
--- /dev/null
+++ b/include/net/phonet/phonet.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * File: af_phonet.h
+ *
+ * Phonet sockets kernel definitions
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ */
+
+#ifndef AF_PHONET_H
+#define AF_PHONET_H
+
+#include <linux/phonet.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+
+/*
+ * The lower layers may not require more space, ever. Make sure it's
+ * enough.
+ */
+#define MAX_PHONET_HEADER (8 + MAX_HEADER)
+
+/*
+ * Every Phonet* socket has this structure first in its
+ * protocol-specific structure under name c.
+ */
+struct pn_sock {
+ struct sock sk;
+ u16 sobject;
+ u16 dobject;
+ u8 resource;
+};
+
+static inline struct pn_sock *pn_sk(struct sock *sk)
+{
+ return (struct pn_sock *)sk;
+}
+
+extern const struct proto_ops phonet_dgram_ops;
+
+void pn_sock_init(void);
+struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *sa);
+void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb);
+void phonet_get_local_port_range(int *min, int *max);
+int pn_sock_hash(struct sock *sk);
+void pn_sock_unhash(struct sock *sk);
+int pn_sock_get_port(struct sock *sk, unsigned short sport);
+
+struct sock *pn_find_sock_by_res(struct net *net, u8 res);
+int pn_sock_bind_res(struct sock *sock, u8 res);
+int pn_sock_unbind_res(struct sock *sk, u8 res);
+void pn_sock_unbind_all_res(struct sock *sk);
+
+int pn_skb_send(struct sock *sk, struct sk_buff *skb,
+ const struct sockaddr_pn *target);
+
+static inline struct phonethdr *pn_hdr(struct sk_buff *skb)
+{
+ return (struct phonethdr *)skb_network_header(skb);
+}
+
+static inline struct phonetmsg *pn_msg(struct sk_buff *skb)
+{
+ return (struct phonetmsg *)skb_transport_header(skb);
+}
+
+/*
+ * Get the other party's sockaddr from received skb. The skb begins
+ * with a Phonet header.
+ */
+static inline
+void pn_skb_get_src_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa)
+{
+ struct phonethdr *ph = pn_hdr(skb);
+ u16 obj = pn_object(ph->pn_sdev, ph->pn_sobj);
+
+ sa->spn_family = AF_PHONET;
+ pn_sockaddr_set_object(sa, obj);
+ pn_sockaddr_set_resource(sa, ph->pn_res);
+ memset(sa->spn_zero, 0, sizeof(sa->spn_zero));
+}
+
+static inline
+void pn_skb_get_dst_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa)
+{
+ struct phonethdr *ph = pn_hdr(skb);
+ u16 obj = pn_object(ph->pn_rdev, ph->pn_robj);
+
+ sa->spn_family = AF_PHONET;
+ pn_sockaddr_set_object(sa, obj);
+ pn_sockaddr_set_resource(sa, ph->pn_res);
+ memset(sa->spn_zero, 0, sizeof(sa->spn_zero));
+}
+
+/* Protocols in Phonet protocol family. */
+struct phonet_protocol {
+ const struct proto_ops *ops;
+ struct proto *prot;
+ int sock_type;
+};
+
+int phonet_proto_register(unsigned int protocol,
+ const struct phonet_protocol *pp);
+void phonet_proto_unregister(unsigned int protocol,
+ const struct phonet_protocol *pp);
+
+int phonet_sysctl_init(void);
+void phonet_sysctl_exit(void);
+int isi_register(void);
+void isi_unregister(void);
+
+#endif
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
new file mode 100644
index 000000000..e9dc8dca5
--- /dev/null
+++ b/include/net/phonet/pn_dev.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * File: pn_dev.h
+ *
+ * Phonet network device
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ */
+
+#ifndef PN_DEV_H
+#define PN_DEV_H
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+struct net;
+
+struct phonet_device_list {
+ struct list_head list;
+ struct mutex lock;
+};
+
+struct phonet_device_list *phonet_device_list(struct net *net);
+
+struct phonet_device {
+ struct list_head list;
+ struct net_device *netdev;
+ DECLARE_BITMAP(addrs, 64);
+ struct rcu_head rcu;
+};
+
+int phonet_device_init(void);
+void phonet_device_exit(void);
+int phonet_netlink_register(void);
+struct net_device *phonet_device_get(struct net *net);
+
+int phonet_address_add(struct net_device *dev, u8 addr);
+int phonet_address_del(struct net_device *dev, u8 addr);
+u8 phonet_address_get(struct net_device *dev, u8 addr);
+int phonet_address_lookup(struct net *net, u8 addr);
+void phonet_address_notify(int event, struct net_device *dev, u8 addr);
+
+int phonet_route_add(struct net_device *dev, u8 daddr);
+int phonet_route_del(struct net_device *dev, u8 daddr);
+void rtm_phonet_notify(int event, struct net_device *dev, u8 dst);
+struct net_device *phonet_route_get_rcu(struct net *net, u8 daddr);
+struct net_device *phonet_route_output(struct net *net, u8 daddr);
+
+#define PN_NO_ADDR 0xff
+
+extern const struct seq_operations pn_sock_seq_ops;
+extern const struct seq_operations pn_res_seq_ops;
+
+#endif
diff --git a/include/net/pie.h b/include/net/pie.h
new file mode 100644
index 000000000..3fe2361e0
--- /dev/null
+++ b/include/net/pie.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __NET_SCHED_PIE_H
+#define __NET_SCHED_PIE_H
+
+#include <linux/ktime.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/inet_ecn.h>
+#include <net/pkt_sched.h>
+
+#define MAX_PROB (U64_MAX >> BITS_PER_BYTE)
+#define DTIME_INVALID U64_MAX
+#define QUEUE_THRESHOLD 16384
+#define DQCOUNT_INVALID -1
+#define PIE_SCALE 8
+
+/**
+ * struct pie_params - contains pie parameters
+ * @target: target delay in pschedtime
+ * @tudpate: interval at which drop probability is calculated
+ * @limit: total number of packets that can be in the queue
+ * @alpha: parameter to control drop probability
+ * @beta: parameter to control drop probability
+ * @ecn: is ECN marking of packets enabled
+ * @bytemode: is drop probability scaled based on pkt size
+ * @dq_rate_estimator: is Little's law used for qdelay calculation
+ */
+struct pie_params {
+ psched_time_t target;
+ u32 tupdate;
+ u32 limit;
+ u32 alpha;
+ u32 beta;
+ u8 ecn;
+ u8 bytemode;
+ u8 dq_rate_estimator;
+};
+
+/**
+ * struct pie_vars - contains pie variables
+ * @qdelay: current queue delay
+ * @qdelay_old: queue delay in previous qdelay calculation
+ * @burst_time: burst time allowance
+ * @dq_tstamp: timestamp at which dq rate was last calculated
+ * @prob: drop probability
+ * @accu_prob: accumulated drop probability
+ * @dq_count: number of bytes dequeued in a measurement cycle
+ * @avg_dq_rate: calculated average dq rate
+ * @backlog_old: queue backlog during previous qdelay calculation
+ */
+struct pie_vars {
+ psched_time_t qdelay;
+ psched_time_t qdelay_old;
+ psched_time_t burst_time;
+ psched_time_t dq_tstamp;
+ u64 prob;
+ u64 accu_prob;
+ u64 dq_count;
+ u32 avg_dq_rate;
+ u32 backlog_old;
+};
+
+/**
+ * struct pie_stats - contains pie stats
+ * @packets_in: total number of packets enqueued
+ * @dropped: packets dropped due to pie action
+ * @overlimit: packets dropped due to lack of space in queue
+ * @ecn_mark: packets marked with ECN
+ * @maxq: maximum queue size
+ */
+struct pie_stats {
+ u32 packets_in;
+ u32 dropped;
+ u32 overlimit;
+ u32 ecn_mark;
+ u32 maxq;
+};
+
+/**
+ * struct pie_skb_cb - contains private skb vars
+ * @enqueue_time: timestamp when the packet is enqueued
+ * @mem_usage: size of the skb during enqueue
+ */
+struct pie_skb_cb {
+ psched_time_t enqueue_time;
+ u32 mem_usage;
+};
+
+static inline void pie_params_init(struct pie_params *params)
+{
+ params->target = PSCHED_NS2TICKS(15 * NSEC_PER_MSEC); /* 15 ms */
+ params->tupdate = usecs_to_jiffies(15 * USEC_PER_MSEC); /* 15 ms */
+ params->limit = 1000;
+ params->alpha = 2;
+ params->beta = 20;
+ params->ecn = false;
+ params->bytemode = false;
+ params->dq_rate_estimator = false;
+}
+
+static inline void pie_vars_init(struct pie_vars *vars)
+{
+ vars->burst_time = PSCHED_NS2TICKS(150 * NSEC_PER_MSEC); /* 150 ms */
+ vars->dq_tstamp = DTIME_INVALID;
+ vars->accu_prob = 0;
+ vars->dq_count = DQCOUNT_INVALID;
+ vars->avg_dq_rate = 0;
+}
+
+static inline struct pie_skb_cb *get_pie_cb(const struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct pie_skb_cb));
+ return (struct pie_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static inline psched_time_t pie_get_enqueue_time(const struct sk_buff *skb)
+{
+ return get_pie_cb(skb)->enqueue_time;
+}
+
+static inline void pie_set_enqueue_time(struct sk_buff *skb)
+{
+ get_pie_cb(skb)->enqueue_time = psched_get_time();
+}
+
+bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
+ struct pie_vars *vars, u32 backlog, u32 packet_size);
+
+void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
+ struct pie_vars *vars, u32 backlog);
+
+void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
+ u32 backlog);
+
+#endif
diff --git a/include/net/ping.h b/include/net/ping.h
new file mode 100644
index 000000000..bc7779262
--- /dev/null
+++ b/include/net/ping.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the "ping" module.
+ */
+#ifndef _PING_H
+#define _PING_H
+
+#include <net/icmp.h>
+#include <net/netns/hash.h>
+
+/* PING_HTABLE_SIZE must be power of 2 */
+#define PING_HTABLE_SIZE 64
+#define PING_HTABLE_MASK (PING_HTABLE_SIZE-1)
+
+#define GID_T_MAX (((gid_t)~0U) - 1)
+
+/* Compatibility glue so we can support IPv6 when it's compiled as a module */
+struct pingv6_ops {
+ int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len,
+ int *addr_len);
+ void (*ip6_datagram_recv_common_ctl)(struct sock *sk,
+ struct msghdr *msg,
+ struct sk_buff *skb);
+ void (*ip6_datagram_recv_specific_ctl)(struct sock *sk,
+ struct msghdr *msg,
+ struct sk_buff *skb);
+ int (*icmpv6_err_convert)(u8 type, u8 code, int *err);
+ void (*ipv6_icmp_error)(struct sock *sk, struct sk_buff *skb, int err,
+ __be16 port, u32 info, u8 *payload);
+ int (*ipv6_chk_addr)(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, int strict);
+};
+
+struct ping_iter_state {
+ struct seq_net_private p;
+ int bucket;
+ sa_family_t family;
+};
+
+extern struct proto ping_prot;
+#if IS_ENABLED(CONFIG_IPV6)
+extern struct pingv6_ops pingv6_ops;
+#endif
+
+struct pingfakehdr {
+ struct icmphdr icmph;
+ struct msghdr *msg;
+ sa_family_t family;
+ __wsum wcheck;
+};
+
+int ping_get_port(struct sock *sk, unsigned short ident);
+int ping_hash(struct sock *sk);
+void ping_unhash(struct sock *sk);
+
+int ping_init_sock(struct sock *sk);
+void ping_close(struct sock *sk, long timeout);
+int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+void ping_err(struct sk_buff *skb, int offset, u32 info);
+int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd,
+ struct sk_buff *);
+
+int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int flags, int *addr_len);
+int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
+ void *user_icmph, size_t icmph_len);
+int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+enum skb_drop_reason ping_rcv(struct sk_buff *skb);
+
+#ifdef CONFIG_PROC_FS
+void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family);
+void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos);
+void ping_seq_stop(struct seq_file *seq, void *v);
+
+int __init ping_proc_init(void);
+void ping_proc_exit(void);
+#endif
+
+void __init ping_init(void);
+int __init pingv6_init(void);
+void pingv6_exit(void);
+
+#endif /* _PING_H */
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
new file mode 100644
index 000000000..4cabb32a2
--- /dev/null
+++ b/include/net/pkt_cls.h
@@ -0,0 +1,1069 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_PKT_CLS_H
+#define __NET_PKT_CLS_H
+
+#include <linux/pkt_cls.h>
+#include <linux/workqueue.h>
+#include <net/sch_generic.h>
+#include <net/act_api.h>
+#include <net/net_namespace.h>
+
+/* TC action not accessible from user space */
+#define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1)
+
+/* Basic packet classifier frontend definitions. */
+
+struct tcf_walker {
+ int stop;
+ int skip;
+ int count;
+ bool nonempty;
+ unsigned long cookie;
+ int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
+};
+
+int register_tcf_proto_ops(struct tcf_proto_ops *ops);
+void unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+
+struct tcf_block_ext_info {
+ enum flow_block_binder_type binder_type;
+ tcf_chain_head_change_t *chain_head_change;
+ void *chain_head_change_priv;
+ u32 block_index;
+};
+
+struct tcf_qevent {
+ struct tcf_block *block;
+ struct tcf_block_ext_info info;
+ struct tcf_proto __rcu *filter_chain;
+};
+
+struct tcf_block_cb;
+bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
+
+#ifdef CONFIG_NET_CLS
+struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
+ u32 chain_index);
+void tcf_chain_put_by_act(struct tcf_chain *chain);
+struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
+ struct tcf_chain *chain);
+struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
+ struct tcf_proto *tp);
+void tcf_block_netif_keep_dst(struct tcf_block *block);
+int tcf_block_get(struct tcf_block **p_block,
+ struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+ struct netlink_ext_ack *extack);
+int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
+ struct tcf_block_ext_info *ei,
+ struct netlink_ext_ack *extack);
+void tcf_block_put(struct tcf_block *block);
+void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
+ struct tcf_block_ext_info *ei);
+
+static inline bool tcf_block_shared(struct tcf_block *block)
+{
+ return block->index;
+}
+
+static inline bool tcf_block_non_null_shared(struct tcf_block *block)
+{
+ return block && block->index;
+}
+
+static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
+{
+ WARN_ON(tcf_block_shared(block));
+ return block->q;
+}
+
+int tcf_classify(struct sk_buff *skb,
+ const struct tcf_block *block,
+ const struct tcf_proto *tp, struct tcf_result *res,
+ bool compat_mode);
+
+static inline bool tc_cls_stats_dump(struct tcf_proto *tp,
+ struct tcf_walker *arg,
+ void *filter)
+{
+ if (arg->count >= arg->skip && arg->fn(tp, filter, arg) < 0) {
+ arg->stop = 1;
+ return false;
+ }
+
+ arg->count++;
+ return true;
+}
+
+#else
+static inline bool tcf_block_shared(struct tcf_block *block)
+{
+ return false;
+}
+
+static inline bool tcf_block_non_null_shared(struct tcf_block *block)
+{
+ return false;
+}
+
+static inline
+int tcf_block_get(struct tcf_block **p_block,
+ struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static inline
+int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
+ struct tcf_block_ext_info *ei,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static inline void tcf_block_put(struct tcf_block *block)
+{
+}
+
+static inline
+void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
+ struct tcf_block_ext_info *ei)
+{
+}
+
+static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
+{
+ return NULL;
+}
+
+static inline
+int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
+ void *cb_priv)
+{
+ return 0;
+}
+
+static inline
+void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
+ void *cb_priv)
+{
+}
+
+static inline int tcf_classify(struct sk_buff *skb,
+ const struct tcf_block *block,
+ const struct tcf_proto *tp,
+ struct tcf_result *res, bool compat_mode)
+{
+ return TC_ACT_UNSPEC;
+}
+
+#endif
+
+static inline unsigned long
+__cls_set_class(unsigned long *clp, unsigned long cl)
+{
+ return xchg(clp, cl);
+}
+
+static inline void
+__tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
+{
+ unsigned long cl;
+
+ cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
+ cl = __cls_set_class(&r->class, cl);
+ if (cl)
+ q->ops->cl_ops->unbind_tcf(q, cl);
+}
+
+static inline void
+tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
+{
+ struct Qdisc *q = tp->chain->block->q;
+
+ /* Check q as it is not set for shared blocks. In that case,
+ * setting class is not supported.
+ */
+ if (!q)
+ return;
+ sch_tree_lock(q);
+ __tcf_bind_filter(q, r, base);
+ sch_tree_unlock(q);
+}
+
+static inline void
+__tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
+{
+ unsigned long cl;
+
+ if ((cl = __cls_set_class(&r->class, 0)) != 0)
+ q->ops->cl_ops->unbind_tcf(q, cl);
+}
+
+static inline void
+tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
+{
+ struct Qdisc *q = tp->chain->block->q;
+
+ if (!q)
+ return;
+ __tcf_unbind_filter(q, r);
+}
+
+static inline void tc_cls_bind_class(u32 classid, unsigned long cl,
+ void *q, struct tcf_result *res,
+ unsigned long base)
+{
+ if (res->classid == classid) {
+ if (cl)
+ __tcf_bind_filter(q, res, base);
+ else
+ __tcf_unbind_filter(q, res);
+ }
+}
+
+struct tcf_exts {
+#ifdef CONFIG_NET_CLS_ACT
+ __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
+ int nr_actions;
+ struct tc_action **actions;
+ struct net *net;
+ netns_tracker ns_tracker;
+#endif
+ /* Map to export classifier specific extension TLV types to the
+ * generic extensions API. Unsupported extensions must be set to 0.
+ */
+ int action;
+ int police;
+};
+
+static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
+ int action, int police)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ exts->type = 0;
+ exts->nr_actions = 0;
+ /* Note: we do not own yet a reference on net.
+ * This reference might be taken later from tcf_exts_get_net().
+ */
+ exts->net = net;
+ exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
+ GFP_KERNEL);
+ if (!exts->actions)
+ return -ENOMEM;
+#endif
+ exts->action = action;
+ exts->police = police;
+ return 0;
+}
+
+/* Return false if the netns is being destroyed in cleanup_net(). Callers
+ * need to do cleanup synchronously in this case, otherwise may race with
+ * tc_action_net_exit(). Return true for other cases.
+ */
+static inline bool tcf_exts_get_net(struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ exts->net = maybe_get_net(exts->net);
+ if (exts->net)
+ netns_tracker_alloc(exts->net, &exts->ns_tracker, GFP_KERNEL);
+ return exts->net != NULL;
+#else
+ return true;
+#endif
+}
+
+static inline void tcf_exts_put_net(struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (exts->net)
+ put_net_track(exts->net, &exts->ns_tracker);
+#endif
+}
+
+#ifdef CONFIG_NET_CLS_ACT
+#define tcf_exts_for_each_action(i, a, exts) \
+ for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
+#else
+#define tcf_exts_for_each_action(i, a, exts) \
+ for (; 0; (void)(i), (void)(a), (void)(exts))
+#endif
+
+#define tcf_act_for_each_action(i, a, actions) \
+ for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++)
+
+static inline void
+tcf_exts_hw_stats_update(const struct tcf_exts *exts,
+ u64 bytes, u64 packets, u64 drops, u64 lastuse,
+ u8 used_hw_stats, bool used_hw_stats_valid)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ int i;
+
+ for (i = 0; i < exts->nr_actions; i++) {
+ struct tc_action *a = exts->actions[i];
+
+ /* if stats from hw, just skip */
+ if (tcf_action_update_hw_stats(a)) {
+ preempt_disable();
+ tcf_action_stats_update(a, bytes, packets, drops,
+ lastuse, true);
+ preempt_enable();
+
+ a->used_hw_stats = used_hw_stats;
+ a->used_hw_stats_valid = used_hw_stats_valid;
+ }
+ }
+#endif
+}
+
+/**
+ * tcf_exts_has_actions - check if at least one action is present
+ * @exts: tc filter extensions handle
+ *
+ * Returns true if at least one action is present.
+ */
+static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return exts->nr_actions;
+#else
+ return false;
+#endif
+}
+
+/**
+ * tcf_exts_exec - execute tc filter extensions
+ * @skb: socket buffer
+ * @exts: tc filter extensions handle
+ * @res: desired result
+ *
+ * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
+ * a negative number if the filter must be considered unmatched or
+ * a positive action code (TC_ACT_*) which must be returned to the
+ * underlying layer.
+ */
+static inline int
+tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
+ struct tcf_result *res)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
+#endif
+ return TC_ACT_OK;
+}
+
+int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
+ struct nlattr **tb, struct nlattr *rate_tlv,
+ struct tcf_exts *exts, u32 flags,
+ struct netlink_ext_ack *extack);
+int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
+ struct nlattr *rate_tlv, struct tcf_exts *exts,
+ u32 flags, u32 fl_flags, struct netlink_ext_ack *extack);
+void tcf_exts_destroy(struct tcf_exts *exts);
+void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
+int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
+int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts);
+int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
+
+/**
+ * struct tcf_pkt_info - packet information
+ *
+ * @ptr: start of the pkt data
+ * @nexthdr: offset of the next header
+ */
+struct tcf_pkt_info {
+ unsigned char * ptr;
+ int nexthdr;
+};
+
+#ifdef CONFIG_NET_EMATCH
+
+struct tcf_ematch_ops;
+
+/**
+ * struct tcf_ematch - extended match (ematch)
+ *
+ * @matchid: identifier to allow userspace to reidentify a match
+ * @flags: flags specifying attributes and the relation to other matches
+ * @ops: the operations lookup table of the corresponding ematch module
+ * @datalen: length of the ematch specific configuration data
+ * @data: ematch specific data
+ * @net: the network namespace
+ */
+struct tcf_ematch {
+ struct tcf_ematch_ops * ops;
+ unsigned long data;
+ unsigned int datalen;
+ u16 matchid;
+ u16 flags;
+ struct net *net;
+};
+
+static inline int tcf_em_is_container(struct tcf_ematch *em)
+{
+ return !em->ops;
+}
+
+static inline int tcf_em_is_simple(struct tcf_ematch *em)
+{
+ return em->flags & TCF_EM_SIMPLE;
+}
+
+static inline int tcf_em_is_inverted(struct tcf_ematch *em)
+{
+ return em->flags & TCF_EM_INVERT;
+}
+
+static inline int tcf_em_last_match(struct tcf_ematch *em)
+{
+ return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
+}
+
+static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
+{
+ if (tcf_em_last_match(em))
+ return 1;
+
+ if (result == 0 && em->flags & TCF_EM_REL_AND)
+ return 1;
+
+ if (result != 0 && em->flags & TCF_EM_REL_OR)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * struct tcf_ematch_tree - ematch tree handle
+ *
+ * @hdr: ematch tree header supplied by userspace
+ * @matches: array of ematches
+ */
+struct tcf_ematch_tree {
+ struct tcf_ematch_tree_hdr hdr;
+ struct tcf_ematch * matches;
+
+};
+
+/**
+ * struct tcf_ematch_ops - ematch module operations
+ *
+ * @kind: identifier (kind) of this ematch module
+ * @datalen: length of expected configuration data (optional)
+ * @change: called during validation (optional)
+ * @match: called during ematch tree evaluation, must return 1/0
+ * @destroy: called during destroyage (optional)
+ * @dump: called during dumping process (optional)
+ * @owner: owner, must be set to THIS_MODULE
+ * @link: link to previous/next ematch module (internal use)
+ */
+struct tcf_ematch_ops {
+ int kind;
+ int datalen;
+ int (*change)(struct net *net, void *,
+ int, struct tcf_ematch *);
+ int (*match)(struct sk_buff *, struct tcf_ematch *,
+ struct tcf_pkt_info *);
+ void (*destroy)(struct tcf_ematch *);
+ int (*dump)(struct sk_buff *, struct tcf_ematch *);
+ struct module *owner;
+ struct list_head link;
+};
+
+int tcf_em_register(struct tcf_ematch_ops *);
+void tcf_em_unregister(struct tcf_ematch_ops *);
+int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
+ struct tcf_ematch_tree *);
+void tcf_em_tree_destroy(struct tcf_ematch_tree *);
+int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
+int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
+ struct tcf_pkt_info *);
+
+/**
+ * tcf_em_tree_match - evaulate an ematch tree
+ *
+ * @skb: socket buffer of the packet in question
+ * @tree: ematch tree to be used for evaluation
+ * @info: packet information examined by classifier
+ *
+ * This function matches @skb against the ematch tree in @tree by going
+ * through all ematches respecting their logic relations returning
+ * as soon as the result is obvious.
+ *
+ * Returns 1 if the ematch tree as-one matches, no ematches are configured
+ * or ematch is not enabled in the kernel, otherwise 0 is returned.
+ */
+static inline int tcf_em_tree_match(struct sk_buff *skb,
+ struct tcf_ematch_tree *tree,
+ struct tcf_pkt_info *info)
+{
+ if (tree->hdr.nmatches)
+ return __tcf_em_tree_match(skb, tree, info);
+ else
+ return 1;
+}
+
+#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
+
+#else /* CONFIG_NET_EMATCH */
+
+struct tcf_ematch_tree {
+};
+
+#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
+#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
+#define tcf_em_tree_dump(skb, t, tlv) (0)
+#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
+
+#endif /* CONFIG_NET_EMATCH */
+
+static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
+{
+ switch (layer) {
+ case TCF_LAYER_LINK:
+ return skb_mac_header(skb);
+ case TCF_LAYER_NETWORK:
+ return skb_network_header(skb);
+ case TCF_LAYER_TRANSPORT:
+ return skb_transport_header(skb);
+ }
+
+ return NULL;
+}
+
+static inline int tcf_valid_offset(const struct sk_buff *skb,
+ const unsigned char *ptr, const int len)
+{
+ return likely((ptr + len) <= skb_tail_pointer(skb) &&
+ ptr >= skb->head &&
+ (ptr <= (ptr + len)));
+}
+
+static inline int
+tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
+ struct netlink_ext_ack *extack)
+{
+ char indev[IFNAMSIZ];
+ struct net_device *dev;
+
+ if (nla_strscpy(indev, indev_tlv, IFNAMSIZ) < 0) {
+ NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
+ "Interface name too long");
+ return -EINVAL;
+ }
+ dev = __dev_get_by_name(net, indev);
+ if (!dev) {
+ NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
+ "Network device not found");
+ return -ENODEV;
+ }
+ return dev->ifindex;
+}
+
+static inline bool
+tcf_match_indev(struct sk_buff *skb, int ifindex)
+{
+ if (!ifindex)
+ return true;
+ if (!skb->skb_iif)
+ return false;
+ return ifindex == skb->skb_iif;
+}
+
+int tc_setup_offload_action(struct flow_action *flow_action,
+ const struct tcf_exts *exts,
+ struct netlink_ext_ack *extack);
+void tc_cleanup_offload_action(struct flow_action *flow_action);
+int tc_setup_action(struct flow_action *flow_action,
+ struct tc_action *actions[],
+ struct netlink_ext_ack *extack);
+
+int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
+ void *type_data, bool err_stop, bool rtnl_held);
+int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
+ enum tc_setup_type type, void *type_data, bool err_stop,
+ u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
+int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
+ enum tc_setup_type type, void *type_data, bool err_stop,
+ u32 *old_flags, unsigned int *old_in_hw_count,
+ u32 *new_flags, unsigned int *new_in_hw_count,
+ bool rtnl_held);
+int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
+ enum tc_setup_type type, void *type_data, bool err_stop,
+ u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
+int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
+ bool add, flow_setup_cb_t *cb,
+ enum tc_setup_type type, void *type_data,
+ void *cb_priv, u32 *flags, unsigned int *in_hw_count);
+unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
+
+#ifdef CONFIG_NET_CLS_ACT
+int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+ enum flow_block_binder_type binder_type,
+ struct nlattr *block_index_attr,
+ struct netlink_ext_ack *extack);
+void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
+int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
+ struct netlink_ext_ack *extack);
+struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
+ struct sk_buff **to_free, int *ret);
+int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
+#else
+static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+ enum flow_block_binder_type binder_type,
+ struct nlattr *block_index_attr,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
+{
+}
+
+static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static inline struct sk_buff *
+tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
+ struct sk_buff **to_free, int *ret)
+{
+ return skb;
+}
+
+static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
+{
+ return 0;
+}
+#endif
+
+struct tc_cls_u32_knode {
+ struct tcf_exts *exts;
+ struct tcf_result *res;
+ struct tc_u32_sel *sel;
+ u32 handle;
+ u32 val;
+ u32 mask;
+ u32 link_handle;
+ u8 fshift;
+};
+
+struct tc_cls_u32_hnode {
+ u32 handle;
+ u32 prio;
+ unsigned int divisor;
+};
+
+enum tc_clsu32_command {
+ TC_CLSU32_NEW_KNODE,
+ TC_CLSU32_REPLACE_KNODE,
+ TC_CLSU32_DELETE_KNODE,
+ TC_CLSU32_NEW_HNODE,
+ TC_CLSU32_REPLACE_HNODE,
+ TC_CLSU32_DELETE_HNODE,
+};
+
+struct tc_cls_u32_offload {
+ struct flow_cls_common_offload common;
+ /* knode values */
+ enum tc_clsu32_command command;
+ union {
+ struct tc_cls_u32_knode knode;
+ struct tc_cls_u32_hnode hnode;
+ };
+};
+
+static inline bool tc_can_offload(const struct net_device *dev)
+{
+ return dev->features & NETIF_F_HW_TC;
+}
+
+static inline bool tc_can_offload_extack(const struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ bool can = tc_can_offload(dev);
+
+ if (!can)
+ NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
+
+ return can;
+}
+
+static inline bool
+tc_cls_can_offload_and_chain0(const struct net_device *dev,
+ struct flow_cls_common_offload *common)
+{
+ if (!tc_can_offload_extack(dev, common->extack))
+ return false;
+ if (common->chain_index) {
+ NL_SET_ERR_MSG(common->extack,
+ "Driver supports only offload of chain 0");
+ return false;
+ }
+ return true;
+}
+
+static inline bool tc_skip_hw(u32 flags)
+{
+ return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
+}
+
+static inline bool tc_skip_sw(u32 flags)
+{
+ return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
+}
+
+/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
+static inline bool tc_flags_valid(u32 flags)
+{
+ if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
+ TCA_CLS_FLAGS_VERBOSE))
+ return false;
+
+ flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
+ if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
+ return false;
+
+ return true;
+}
+
+static inline bool tc_in_hw(u32 flags)
+{
+ return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
+}
+
+static inline void
+tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
+ const struct tcf_proto *tp, u32 flags,
+ struct netlink_ext_ack *extack)
+{
+ cls_common->chain_index = tp->chain->index;
+ cls_common->protocol = tp->protocol;
+ cls_common->prio = tp->prio >> 16;
+ if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
+ cls_common->extack = extack;
+}
+
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
+{
+ struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+
+ if (tc_skb_ext)
+ memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
+ return tc_skb_ext;
+}
+#endif
+
+enum tc_matchall_command {
+ TC_CLSMATCHALL_REPLACE,
+ TC_CLSMATCHALL_DESTROY,
+ TC_CLSMATCHALL_STATS,
+};
+
+struct tc_cls_matchall_offload {
+ struct flow_cls_common_offload common;
+ enum tc_matchall_command command;
+ struct flow_rule *rule;
+ struct flow_stats stats;
+ unsigned long cookie;
+};
+
+enum tc_clsbpf_command {
+ TC_CLSBPF_OFFLOAD,
+ TC_CLSBPF_STATS,
+};
+
+struct tc_cls_bpf_offload {
+ struct flow_cls_common_offload common;
+ enum tc_clsbpf_command command;
+ struct tcf_exts *exts;
+ struct bpf_prog *prog;
+ struct bpf_prog *oldprog;
+ const char *name;
+ bool exts_integrated;
+};
+
+struct tc_mqprio_qopt_offload {
+ /* struct tc_mqprio_qopt must always be the first element */
+ struct tc_mqprio_qopt qopt;
+ u16 mode;
+ u16 shaper;
+ u32 flags;
+ u64 min_rate[TC_QOPT_MAX_QUEUE];
+ u64 max_rate[TC_QOPT_MAX_QUEUE];
+};
+
+/* This structure holds cookie structure that is passed from user
+ * to the kernel for actions and classifiers
+ */
+struct tc_cookie {
+ u8 *data;
+ u32 len;
+ struct rcu_head rcu;
+};
+
+struct tc_qopt_offload_stats {
+ struct gnet_stats_basic_sync *bstats;
+ struct gnet_stats_queue *qstats;
+};
+
+enum tc_mq_command {
+ TC_MQ_CREATE,
+ TC_MQ_DESTROY,
+ TC_MQ_STATS,
+ TC_MQ_GRAFT,
+};
+
+struct tc_mq_opt_offload_graft_params {
+ unsigned long queue;
+ u32 child_handle;
+};
+
+struct tc_mq_qopt_offload {
+ enum tc_mq_command command;
+ u32 handle;
+ union {
+ struct tc_qopt_offload_stats stats;
+ struct tc_mq_opt_offload_graft_params graft_params;
+ };
+};
+
+enum tc_htb_command {
+ /* Root */
+ TC_HTB_CREATE, /* Initialize HTB offload. */
+ TC_HTB_DESTROY, /* Destroy HTB offload. */
+
+ /* Classes */
+ /* Allocate qid and create leaf. */
+ TC_HTB_LEAF_ALLOC_QUEUE,
+ /* Convert leaf to inner, preserve and return qid, create new leaf. */
+ TC_HTB_LEAF_TO_INNER,
+ /* Delete leaf, while siblings remain. */
+ TC_HTB_LEAF_DEL,
+ /* Delete leaf, convert parent to leaf, preserving qid. */
+ TC_HTB_LEAF_DEL_LAST,
+ /* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */
+ TC_HTB_LEAF_DEL_LAST_FORCE,
+ /* Modify parameters of a node. */
+ TC_HTB_NODE_MODIFY,
+
+ /* Class qdisc */
+ TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */
+};
+
+struct tc_htb_qopt_offload {
+ struct netlink_ext_ack *extack;
+ enum tc_htb_command command;
+ u32 parent_classid;
+ u16 classid;
+ u16 qid;
+ u64 rate;
+ u64 ceil;
+};
+
+#define TC_HTB_CLASSID_ROOT U32_MAX
+
+enum tc_red_command {
+ TC_RED_REPLACE,
+ TC_RED_DESTROY,
+ TC_RED_STATS,
+ TC_RED_XSTATS,
+ TC_RED_GRAFT,
+};
+
+struct tc_red_qopt_offload_params {
+ u32 min;
+ u32 max;
+ u32 probability;
+ u32 limit;
+ bool is_ecn;
+ bool is_harddrop;
+ bool is_nodrop;
+ struct gnet_stats_queue *qstats;
+};
+
+struct tc_red_qopt_offload {
+ enum tc_red_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_red_qopt_offload_params set;
+ struct tc_qopt_offload_stats stats;
+ struct red_stats *xstats;
+ u32 child_handle;
+ };
+};
+
+enum tc_gred_command {
+ TC_GRED_REPLACE,
+ TC_GRED_DESTROY,
+ TC_GRED_STATS,
+};
+
+struct tc_gred_vq_qopt_offload_params {
+ bool present;
+ u32 limit;
+ u32 prio;
+ u32 min;
+ u32 max;
+ bool is_ecn;
+ bool is_harddrop;
+ u32 probability;
+ /* Only need backlog, see struct tc_prio_qopt_offload_params */
+ u32 *backlog;
+};
+
+struct tc_gred_qopt_offload_params {
+ bool grio_on;
+ bool wred_on;
+ unsigned int dp_cnt;
+ unsigned int dp_def;
+ struct gnet_stats_queue *qstats;
+ struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
+};
+
+struct tc_gred_qopt_offload_stats {
+ struct gnet_stats_basic_sync bstats[MAX_DPs];
+ struct gnet_stats_queue qstats[MAX_DPs];
+ struct red_stats *xstats[MAX_DPs];
+};
+
+struct tc_gred_qopt_offload {
+ enum tc_gred_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_gred_qopt_offload_params set;
+ struct tc_gred_qopt_offload_stats stats;
+ };
+};
+
+enum tc_prio_command {
+ TC_PRIO_REPLACE,
+ TC_PRIO_DESTROY,
+ TC_PRIO_STATS,
+ TC_PRIO_GRAFT,
+};
+
+struct tc_prio_qopt_offload_params {
+ int bands;
+ u8 priomap[TC_PRIO_MAX + 1];
+ /* At the point of un-offloading the Qdisc, the reported backlog and
+ * qlen need to be reduced by the portion that is in HW.
+ */
+ struct gnet_stats_queue *qstats;
+};
+
+struct tc_prio_qopt_offload_graft_params {
+ u8 band;
+ u32 child_handle;
+};
+
+struct tc_prio_qopt_offload {
+ enum tc_prio_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_prio_qopt_offload_params replace_params;
+ struct tc_qopt_offload_stats stats;
+ struct tc_prio_qopt_offload_graft_params graft_params;
+ };
+};
+
+enum tc_root_command {
+ TC_ROOT_GRAFT,
+};
+
+struct tc_root_qopt_offload {
+ enum tc_root_command command;
+ u32 handle;
+ bool ingress;
+};
+
+enum tc_ets_command {
+ TC_ETS_REPLACE,
+ TC_ETS_DESTROY,
+ TC_ETS_STATS,
+ TC_ETS_GRAFT,
+};
+
+struct tc_ets_qopt_offload_replace_params {
+ unsigned int bands;
+ u8 priomap[TC_PRIO_MAX + 1];
+ unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */
+ unsigned int weights[TCQ_ETS_MAX_BANDS];
+ struct gnet_stats_queue *qstats;
+};
+
+struct tc_ets_qopt_offload_graft_params {
+ u8 band;
+ u32 child_handle;
+};
+
+struct tc_ets_qopt_offload {
+ enum tc_ets_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_ets_qopt_offload_replace_params replace_params;
+ struct tc_qopt_offload_stats stats;
+ struct tc_ets_qopt_offload_graft_params graft_params;
+ };
+};
+
+enum tc_tbf_command {
+ TC_TBF_REPLACE,
+ TC_TBF_DESTROY,
+ TC_TBF_STATS,
+ TC_TBF_GRAFT,
+};
+
+struct tc_tbf_qopt_offload_replace_params {
+ struct psched_ratecfg rate;
+ u32 max_size;
+ struct gnet_stats_queue *qstats;
+};
+
+struct tc_tbf_qopt_offload {
+ enum tc_tbf_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_tbf_qopt_offload_replace_params replace_params;
+ struct tc_qopt_offload_stats stats;
+ u32 child_handle;
+ };
+};
+
+enum tc_fifo_command {
+ TC_FIFO_REPLACE,
+ TC_FIFO_DESTROY,
+ TC_FIFO_STATS,
+};
+
+struct tc_fifo_qopt_offload {
+ enum tc_fifo_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_qopt_offload_stats stats;
+ };
+};
+
+#ifdef CONFIG_NET_CLS_ACT
+DECLARE_STATIC_KEY_FALSE(tc_skb_ext_tc);
+void tc_skb_ext_tc_enable(void);
+void tc_skb_ext_tc_disable(void);
+#define tc_skb_ext_tc_enabled() static_branch_unlikely(&tc_skb_ext_tc)
+#else /* CONFIG_NET_CLS_ACT */
+static inline void tc_skb_ext_tc_enable(void) { }
+static inline void tc_skb_ext_tc_disable(void) { }
+#define tc_skb_ext_tc_enabled() false
+#endif
+
+#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
new file mode 100644
index 000000000..f99a513b4
--- /dev/null
+++ b/include/net/pkt_sched.h
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_PKT_SCHED_H
+#define __NET_PKT_SCHED_H
+
+#include <linux/jiffies.h>
+#include <linux/ktime.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <net/sch_generic.h>
+#include <net/net_namespace.h>
+#include <uapi/linux/pkt_sched.h>
+
+#define DEFAULT_TX_QUEUE_LEN 1000
+#define STAB_SIZE_LOG_MAX 30
+
+struct qdisc_walker {
+ int stop;
+ int skip;
+ int count;
+ int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
+};
+
+static inline void *qdisc_priv(struct Qdisc *q)
+{
+ return &q->privdata;
+}
+
+static inline struct Qdisc *qdisc_from_priv(void *priv)
+{
+ return container_of(priv, struct Qdisc, privdata);
+}
+
+/*
+ Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth
+
+ Normal IP packet size ~ 512byte, hence:
+
+ 0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for
+ 10Mbit ethernet.
+
+ 10msec resolution -> <50Kbit/sec.
+
+ The result: [34]86 is not good choice for QoS router :-(
+
+ The things are not so bad, because we may use artificial
+ clock evaluated by integration of network data flow
+ in the most critical places.
+ */
+
+typedef u64 psched_time_t;
+typedef long psched_tdiff_t;
+
+/* Avoid doing 64 bit divide */
+#define PSCHED_SHIFT 6
+#define PSCHED_TICKS2NS(x) ((s64)(x) << PSCHED_SHIFT)
+#define PSCHED_NS2TICKS(x) ((x) >> PSCHED_SHIFT)
+
+#define PSCHED_TICKS_PER_SEC PSCHED_NS2TICKS(NSEC_PER_SEC)
+#define PSCHED_PASTPERFECT 0
+
+static inline psched_time_t psched_get_time(void)
+{
+ return PSCHED_NS2TICKS(ktime_get_ns());
+}
+
+struct qdisc_watchdog {
+ u64 last_expires;
+ struct hrtimer timer;
+ struct Qdisc *qdisc;
+};
+
+void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
+ clockid_t clockid);
+void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
+
+void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
+ u64 delta_ns);
+
+static inline void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd,
+ u64 expires)
+{
+ return qdisc_watchdog_schedule_range_ns(wd, expires, 0ULL);
+}
+
+static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
+ psched_time_t expires)
+{
+ qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
+}
+
+void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
+
+extern struct Qdisc_ops pfifo_qdisc_ops;
+extern struct Qdisc_ops bfifo_qdisc_ops;
+extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
+
+int fifo_set_limit(struct Qdisc *q, unsigned int limit);
+struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
+ unsigned int limit,
+ struct netlink_ext_ack *extack);
+
+int register_qdisc(struct Qdisc_ops *qops);
+void unregister_qdisc(struct Qdisc_ops *qops);
+void qdisc_get_default(char *id, size_t len);
+int qdisc_set_default(const char *id);
+
+void qdisc_hash_add(struct Qdisc *q, bool invisible);
+void qdisc_hash_del(struct Qdisc *q);
+struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
+struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle);
+struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
+ struct nlattr *tab,
+ struct netlink_ext_ack *extack);
+void qdisc_put_rtab(struct qdisc_rate_table *tab);
+void qdisc_put_stab(struct qdisc_size_table *tab);
+void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
+bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+ struct net_device *dev, struct netdev_queue *txq,
+ spinlock_t *root_lock, bool validate);
+
+void __qdisc_run(struct Qdisc *q);
+
+static inline void qdisc_run(struct Qdisc *q)
+{
+ if (qdisc_run_begin(q)) {
+ __qdisc_run(q);
+ qdisc_run_end(q);
+ }
+}
+
+extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
+
+/* Calculate maximal size of packet seen by hard_start_xmit
+ routine of this device.
+ */
+static inline unsigned int psched_mtu(const struct net_device *dev)
+{
+ return READ_ONCE(dev->mtu) + dev->hard_header_len;
+}
+
+static inline struct net *qdisc_net(struct Qdisc *q)
+{
+ return dev_net(q->dev_queue->dev);
+}
+
+struct tc_query_caps_base {
+ enum tc_setup_type type;
+ void *caps;
+};
+
+struct tc_cbs_qopt_offload {
+ u8 enable;
+ s32 queue;
+ s32 hicredit;
+ s32 locredit;
+ s32 idleslope;
+ s32 sendslope;
+};
+
+struct tc_etf_qopt_offload {
+ u8 enable;
+ s32 queue;
+};
+
+struct tc_taprio_caps {
+ bool supports_queue_max_sdu:1;
+};
+
+struct tc_taprio_sched_entry {
+ u8 command; /* TC_TAPRIO_CMD_* */
+
+ /* The gate_mask in the offloading side refers to traffic classes */
+ u32 gate_mask;
+ u32 interval;
+};
+
+struct tc_taprio_qopt_offload {
+ u8 enable;
+ ktime_t base_time;
+ u64 cycle_time;
+ u64 cycle_time_extension;
+ u32 max_sdu[TC_MAX_QUEUE];
+
+ size_t num_entries;
+ struct tc_taprio_sched_entry entries[];
+};
+
+#if IS_ENABLED(CONFIG_NET_SCH_TAPRIO)
+
+/* Reference counting */
+struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
+ *offload);
+void taprio_offload_free(struct tc_taprio_qopt_offload *offload);
+
+#else
+
+/* Reference counting */
+static inline struct tc_taprio_qopt_offload *
+taprio_offload_get(struct tc_taprio_qopt_offload *offload)
+{
+ return NULL;
+}
+
+static inline void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
+{
+}
+
+#endif
+
+/* Ensure skb_mstamp_ns, which might have been populated with the txtime, is
+ * not mistaken for a software timestamp, because this will otherwise prevent
+ * the dispatch of hardware timestamps to the socket.
+ */
+static inline void skb_txtime_consumed(struct sk_buff *skb)
+{
+ skb->tstamp = ktime_set(0, 0);
+}
+
+struct tc_skb_cb {
+ struct qdisc_skb_cb qdisc_cb;
+
+ u16 mru;
+ u8 post_ct:1;
+ u8 post_ct_snat:1;
+ u8 post_ct_dnat:1;
+ u16 zone; /* Only valid if post_ct = true */
+};
+
+static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
+{
+ struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
+
+ BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
+ return cb;
+}
+
+static inline bool tc_qdisc_stats_dump(struct Qdisc *sch,
+ unsigned long cl,
+ struct qdisc_walker *arg)
+{
+ if (arg->count >= arg->skip && arg->fn(sch, cl, arg) < 0) {
+ arg->stop = 1;
+ return false;
+ }
+
+ arg->count++;
+ return true;
+}
+
+#endif
diff --git a/include/net/pptp.h b/include/net/pptp.h
new file mode 100644
index 000000000..e63176bdd
--- /dev/null
+++ b/include/net/pptp.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_PPTP_H
+#define _NET_PPTP_H
+
+#include <linux/types.h>
+#include <net/gre.h>
+
+#define PPP_LCP_ECHOREQ 0x09
+#define PPP_LCP_ECHOREP 0x0A
+#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
+
+#define MISSING_WINDOW 20
+#define WRAPPED(curseq, lastseq)\
+ ((((curseq) & 0xffffff00) == 0) &&\
+ (((lastseq) & 0xffffff00) == 0xffffff00))
+
+#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header))
+struct pptp_gre_header {
+ struct gre_base_hdr gre_hd;
+ __be16 payload_len;
+ __be16 call_id;
+ __be32 seq;
+ __be32 ack;
+} __packed;
+
+
+#endif
diff --git a/include/net/protocol.h b/include/net/protocol.h
new file mode 100644
index 000000000..6aef8cb11
--- /dev/null
+++ b/include/net/protocol.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the protocol dispatcher.
+ *
+ * Version: @(#)protocol.h 1.0.2 05/07/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Changes:
+ * Alan Cox : Added a name field and a frag handler
+ * field for later.
+ * Alan Cox : Cleaned up, and sorted types.
+ * Pedro Roque : inet6 protocols
+ */
+
+#ifndef _PROTOCOL_H
+#define _PROTOCOL_H
+
+#include <linux/in6.h>
+#include <linux/skbuff.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <linux/ipv6.h>
+#endif
+#include <linux/netdevice.h>
+
+/* This is one larger than the largest protocol value that can be
+ * found in an ipv4 or ipv6 header. Since in both cases the protocol
+ * value is presented in a __u8, this is defined to be 256.
+ */
+#define MAX_INET_PROTOS 256
+
+/* This is used to register protocols. */
+struct net_protocol {
+ int (*handler)(struct sk_buff *skb);
+
+ /* This returns an error if we weren't able to handle the error. */
+ int (*err_handler)(struct sk_buff *skb, u32 info);
+
+ unsigned int no_policy:1,
+ /* does the protocol do more stringent
+ * icmp tag validation than simple
+ * socket lookup?
+ */
+ icmp_strict_tag_validation:1;
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+struct inet6_protocol {
+ int (*handler)(struct sk_buff *skb);
+
+ /* This returns an error if we weren't able to handle the error. */
+ int (*err_handler)(struct sk_buff *skb,
+ struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset,
+ __be32 info);
+
+ unsigned int flags; /* INET6_PROTO_xxx */
+};
+
+#define INET6_PROTO_NOPOLICY 0x1
+#define INET6_PROTO_FINAL 0x2
+#endif
+
+struct net_offload {
+ struct offload_callbacks callbacks;
+ unsigned int flags; /* Flags used by IPv6 for now */
+};
+/* This should be set for any extension header which is compatible with GSO. */
+#define INET6_PROTO_GSO_EXTHDR 0x1
+
+/* This is used to register socket interfaces for IP protocols. */
+struct inet_protosw {
+ struct list_head list;
+
+ /* These two fields form the lookup key. */
+ unsigned short type; /* This is the 2nd argument to socket(2). */
+ unsigned short protocol; /* This is the L4 protocol number. */
+
+ struct proto *prot;
+ const struct proto_ops *ops;
+
+ unsigned char flags; /* See INET_PROTOSW_* below. */
+};
+#define INET_PROTOSW_REUSE 0x01 /* Are ports automatically reusable? */
+#define INET_PROTOSW_PERMANENT 0x02 /* Permanent protocols are unremovable. */
+#define INET_PROTOSW_ICSK 0x04 /* Is this an inet_connection_sock? */
+
+extern struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
+extern const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS];
+extern const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS];
+
+#if IS_ENABLED(CONFIG_IPV6)
+extern struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
+#endif
+
+int inet_add_protocol(const struct net_protocol *prot, unsigned char num);
+int inet_del_protocol(const struct net_protocol *prot, unsigned char num);
+int inet_add_offload(const struct net_offload *prot, unsigned char num);
+int inet_del_offload(const struct net_offload *prot, unsigned char num);
+void inet_register_protosw(struct inet_protosw *p);
+void inet_unregister_protosw(struct inet_protosw *p);
+
+#if IS_ENABLED(CONFIG_IPV6)
+int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
+int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
+int inet6_register_protosw(struct inet_protosw *p);
+void inet6_unregister_protosw(struct inet_protosw *p);
+#endif
+int inet6_add_offload(const struct net_offload *prot, unsigned char num);
+int inet6_del_offload(const struct net_offload *prot, unsigned char num);
+
+#endif /* _PROTOCOL_H */
diff --git a/include/net/psample.h b/include/net/psample.h
new file mode 100644
index 000000000..0509d2d6b
--- /dev/null
+++ b/include/net/psample.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_PSAMPLE_H
+#define __NET_PSAMPLE_H
+
+#include <uapi/linux/psample.h>
+#include <linux/list.h>
+
+struct psample_group {
+ struct list_head list;
+ struct net *net;
+ u32 group_num;
+ u32 refcount;
+ u32 seq;
+ struct rcu_head rcu;
+};
+
+struct psample_metadata {
+ u32 trunc_size;
+ int in_ifindex;
+ int out_ifindex;
+ u16 out_tc;
+ u64 out_tc_occ; /* bytes */
+ u64 latency; /* nanoseconds */
+ u8 out_tc_valid:1,
+ out_tc_occ_valid:1,
+ latency_valid:1,
+ unused:5;
+};
+
+struct psample_group *psample_group_get(struct net *net, u32 group_num);
+void psample_group_take(struct psample_group *group);
+void psample_group_put(struct psample_group *group);
+
+struct sk_buff;
+
+#if IS_ENABLED(CONFIG_PSAMPLE)
+
+void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
+ u32 sample_rate, const struct psample_metadata *md);
+
+#else
+
+static inline void psample_sample_packet(struct psample_group *group,
+ struct sk_buff *skb, u32 sample_rate,
+ const struct psample_metadata *md)
+{
+}
+
+#endif
+
+#endif /* __NET_PSAMPLE_H */
diff --git a/include/net/psnap.h b/include/net/psnap.h
new file mode 100644
index 000000000..88802b075
--- /dev/null
+++ b/include/net/psnap.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_PSNAP_H
+#define _NET_PSNAP_H
+
+struct datalink_proto;
+struct sk_buff;
+struct packet_type;
+struct net_device;
+
+struct datalink_proto *
+register_snap_client(const unsigned char *desc,
+ int (*rcvfunc)(struct sk_buff *, struct net_device *,
+ struct packet_type *,
+ struct net_device *orig_dev));
+void unregister_snap_client(struct datalink_proto *proto);
+
+#endif
diff --git a/include/net/raw.h b/include/net/raw.h
new file mode 100644
index 000000000..3af5289fd
--- /dev/null
+++ b/include/net/raw.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the RAW-IP module.
+ *
+ * Version: @(#)raw.h 1.0.2 05/07/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ */
+#ifndef _RAW_H
+#define _RAW_H
+
+#include <net/inet_sock.h>
+#include <net/protocol.h>
+#include <net/netns/hash.h>
+#include <linux/hash.h>
+#include <linux/icmp.h>
+
+extern struct proto raw_prot;
+
+extern struct raw_hashinfo raw_v4_hashinfo;
+bool raw_v4_match(struct net *net, struct sock *sk, unsigned short num,
+ __be32 raddr, __be32 laddr, int dif, int sdif);
+
+int raw_abort(struct sock *sk, int err);
+void raw_icmp_error(struct sk_buff *, int, u32);
+int raw_local_deliver(struct sk_buff *, int);
+
+int raw_rcv(struct sock *, struct sk_buff *);
+
+#define RAW_HTABLE_LOG 8
+#define RAW_HTABLE_SIZE (1U << RAW_HTABLE_LOG)
+
+struct raw_hashinfo {
+ spinlock_t lock;
+
+ struct hlist_head ht[RAW_HTABLE_SIZE] ____cacheline_aligned;
+};
+
+static inline u32 raw_hashfunc(const struct net *net, u32 proto)
+{
+ return hash_32(net_hash_mix(net) ^ proto, RAW_HTABLE_LOG);
+}
+
+static inline void raw_hashinfo_init(struct raw_hashinfo *hashinfo)
+{
+ int i;
+
+ spin_lock_init(&hashinfo->lock);
+ for (i = 0; i < RAW_HTABLE_SIZE; i++)
+ INIT_HLIST_HEAD(&hashinfo->ht[i]);
+}
+
+#ifdef CONFIG_PROC_FS
+int raw_proc_init(void);
+void raw_proc_exit(void);
+
+struct raw_iter_state {
+ struct seq_net_private p;
+ int bucket;
+};
+
+static inline struct raw_iter_state *raw_seq_private(struct seq_file *seq)
+{
+ return seq->private;
+}
+void *raw_seq_start(struct seq_file *seq, loff_t *pos);
+void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos);
+void raw_seq_stop(struct seq_file *seq, void *v);
+#endif
+
+int raw_hash_sk(struct sock *sk);
+void raw_unhash_sk(struct sock *sk);
+void raw_init(void);
+
+struct raw_sock {
+ /* inet_sock has to be the first member */
+ struct inet_sock inet;
+ struct icmp_filter filter;
+ u32 ipmr_table;
+};
+
+static inline struct raw_sock *raw_sk(const struct sock *sk)
+{
+ return (struct raw_sock *)sk;
+}
+
+static inline bool raw_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+ int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ return inet_bound_dev_eq(READ_ONCE(net->ipv4.sysctl_raw_l3mdev_accept),
+ bound_dev_if, dif, sdif);
+#else
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
+#endif /* _RAW_H */
diff --git a/include/net/rawv6.h b/include/net/rawv6.h
new file mode 100644
index 000000000..bc7090962
--- /dev/null
+++ b/include/net/rawv6.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_RAWV6_H
+#define _NET_RAWV6_H
+
+#include <net/protocol.h>
+#include <net/raw.h>
+
+extern struct raw_hashinfo raw_v6_hashinfo;
+bool raw_v6_match(struct net *net, struct sock *sk, unsigned short num,
+ const struct in6_addr *loc_addr,
+ const struct in6_addr *rmt_addr, int dif, int sdif);
+
+int raw_abort(struct sock *sk, int err);
+
+void raw6_icmp_error(struct sk_buff *, int nexthdr,
+ u8 type, u8 code, int inner_offset, __be32);
+bool raw6_local_deliver(struct sk_buff *, int);
+
+int rawv6_rcv(struct sock *sk, struct sk_buff *skb);
+
+#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+int rawv6_mh_filter_register(int (*filter)(struct sock *sock,
+ struct sk_buff *skb));
+int rawv6_mh_filter_unregister(int (*filter)(struct sock *sock,
+ struct sk_buff *skb));
+#endif
+
+#endif
diff --git a/include/net/red.h b/include/net/red.h
new file mode 100644
index 000000000..425364de0
--- /dev/null
+++ b/include/net/red.h
@@ -0,0 +1,465 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_SCHED_RED_H
+#define __NET_SCHED_RED_H
+
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+#include <net/dsfield.h>
+#include <linux/reciprocal_div.h>
+
+/* Random Early Detection (RED) algorithm.
+ =======================================
+
+ Source: Sally Floyd and Van Jacobson, "Random Early Detection Gateways
+ for Congestion Avoidance", 1993, IEEE/ACM Transactions on Networking.
+
+ This file codes a "divisionless" version of RED algorithm
+ as written down in Fig.17 of the paper.
+
+ Short description.
+ ------------------
+
+ When a new packet arrives we calculate the average queue length:
+
+ avg = (1-W)*avg + W*current_queue_len,
+
+ W is the filter time constant (chosen as 2^(-Wlog)), it controls
+ the inertia of the algorithm. To allow larger bursts, W should be
+ decreased.
+
+ if (avg > th_max) -> packet marked (dropped).
+ if (avg < th_min) -> packet passes.
+ if (th_min < avg < th_max) we calculate probability:
+
+ Pb = max_P * (avg - th_min)/(th_max-th_min)
+
+ and mark (drop) packet with this probability.
+ Pb changes from 0 (at avg==th_min) to max_P (avg==th_max).
+ max_P should be small (not 1), usually 0.01..0.02 is good value.
+
+ max_P is chosen as a number, so that max_P/(th_max-th_min)
+ is a negative power of two in order arithmetics to contain
+ only shifts.
+
+
+ Parameters, settable by user:
+ -----------------------------
+
+ qth_min - bytes (should be < qth_max/2)
+ qth_max - bytes (should be at least 2*qth_min and less limit)
+ Wlog - bits (<32) log(1/W).
+ Plog - bits (<32)
+
+ Plog is related to max_P by formula:
+
+ max_P = (qth_max-qth_min)/2^Plog;
+
+ F.e. if qth_max=128K and qth_min=32K, then Plog=22
+ corresponds to max_P=0.02
+
+ Scell_log
+ Stab
+
+ Lookup table for log((1-W)^(t/t_ave).
+
+
+ NOTES:
+
+ Upper bound on W.
+ -----------------
+
+ If you want to allow bursts of L packets of size S,
+ you should choose W:
+
+ L + 1 - th_min/S < (1-(1-W)^L)/W
+
+ th_min/S = 32 th_min/S = 4
+
+ log(W) L
+ -1 33
+ -2 35
+ -3 39
+ -4 46
+ -5 57
+ -6 75
+ -7 101
+ -8 135
+ -9 190
+ etc.
+ */
+
+/*
+ * Adaptative RED : An Algorithm for Increasing the Robustness of RED's AQM
+ * (Sally FLoyd, Ramakrishna Gummadi, and Scott Shenker) August 2001
+ *
+ * Every 500 ms:
+ * if (avg > target and max_p <= 0.5)
+ * increase max_p : max_p += alpha;
+ * else if (avg < target and max_p >= 0.01)
+ * decrease max_p : max_p *= beta;
+ *
+ * target :[qth_min + 0.4*(qth_min - qth_max),
+ * qth_min + 0.6*(qth_min - qth_max)].
+ * alpha : min(0.01, max_p / 4)
+ * beta : 0.9
+ * max_P is a Q0.32 fixed point number (with 32 bits mantissa)
+ * max_P between 0.01 and 0.5 (1% - 50%) [ Its no longer a negative power of two ]
+ */
+#define RED_ONE_PERCENT ((u32)DIV_ROUND_CLOSEST(1ULL<<32, 100))
+
+#define MAX_P_MIN (1 * RED_ONE_PERCENT)
+#define MAX_P_MAX (50 * RED_ONE_PERCENT)
+#define MAX_P_ALPHA(val) min(MAX_P_MIN, val / 4)
+
+#define RED_STAB_SIZE 256
+#define RED_STAB_MASK (RED_STAB_SIZE - 1)
+
+struct red_stats {
+ u32 prob_drop; /* Early probability drops */
+ u32 prob_mark; /* Early probability marks */
+ u32 forced_drop; /* Forced drops, qavg > max_thresh */
+ u32 forced_mark; /* Forced marks, qavg > max_thresh */
+ u32 pdrop; /* Drops due to queue limits */
+};
+
+struct red_parms {
+ /* Parameters */
+ u32 qth_min; /* Min avg length threshold: Wlog scaled */
+ u32 qth_max; /* Max avg length threshold: Wlog scaled */
+ u32 Scell_max;
+ u32 max_P; /* probability, [0 .. 1.0] 32 scaled */
+ /* reciprocal_value(max_P / qth_delta) */
+ struct reciprocal_value max_P_reciprocal;
+ u32 qth_delta; /* max_th - min_th */
+ u32 target_min; /* min_th + 0.4*(max_th - min_th) */
+ u32 target_max; /* min_th + 0.6*(max_th - min_th) */
+ u8 Scell_log;
+ u8 Wlog; /* log(W) */
+ u8 Plog; /* random number bits */
+ u8 Stab[RED_STAB_SIZE];
+};
+
+struct red_vars {
+ /* Variables */
+ int qcount; /* Number of packets since last random
+ number generation */
+ u32 qR; /* Cached random number */
+
+ unsigned long qavg; /* Average queue length: Wlog scaled */
+ ktime_t qidlestart; /* Start of current idle period */
+};
+
+static inline u32 red_maxp(u8 Plog)
+{
+ return Plog < 32 ? (~0U >> Plog) : ~0U;
+}
+
+static inline void red_set_vars(struct red_vars *v)
+{
+ /* Reset average queue length, the value is strictly bound
+ * to the parameters below, reseting hurts a bit but leaving
+ * it might result in an unreasonable qavg for a while. --TGR
+ */
+ v->qavg = 0;
+
+ v->qcount = -1;
+}
+
+static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
+ u8 Scell_log, u8 *stab)
+{
+ if (fls(qth_min) + Wlog >= 32)
+ return false;
+ if (fls(qth_max) + Wlog >= 32)
+ return false;
+ if (Scell_log >= 32)
+ return false;
+ if (qth_max < qth_min)
+ return false;
+ if (stab) {
+ int i;
+
+ for (i = 0; i < RED_STAB_SIZE; i++)
+ if (stab[i] >= 32)
+ return false;
+ }
+ return true;
+}
+
+static inline int red_get_flags(unsigned char qopt_flags,
+ unsigned char historic_mask,
+ struct nlattr *flags_attr,
+ unsigned char supported_mask,
+ struct nla_bitfield32 *p_flags,
+ unsigned char *p_userbits,
+ struct netlink_ext_ack *extack)
+{
+ struct nla_bitfield32 flags;
+
+ if (qopt_flags && flags_attr) {
+ NL_SET_ERR_MSG_MOD(extack, "flags should be passed either through qopt, or through a dedicated attribute");
+ return -EINVAL;
+ }
+
+ if (flags_attr) {
+ flags = nla_get_bitfield32(flags_attr);
+ } else {
+ flags.selector = historic_mask;
+ flags.value = qopt_flags & historic_mask;
+ }
+
+ *p_flags = flags;
+ *p_userbits = qopt_flags & ~historic_mask;
+ return 0;
+}
+
+static inline int red_validate_flags(unsigned char flags,
+ struct netlink_ext_ack *extack)
+{
+ if ((flags & TC_RED_NODROP) && !(flags & TC_RED_ECN)) {
+ NL_SET_ERR_MSG_MOD(extack, "nodrop mode is only meaningful with ECN");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline void red_set_parms(struct red_parms *p,
+ u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
+ u8 Scell_log, u8 *stab, u32 max_P)
+{
+ int delta = qth_max - qth_min;
+ u32 max_p_delta;
+
+ p->qth_min = qth_min << Wlog;
+ p->qth_max = qth_max << Wlog;
+ p->Wlog = Wlog;
+ p->Plog = Plog;
+ if (delta <= 0)
+ delta = 1;
+ p->qth_delta = delta;
+ if (!max_P) {
+ max_P = red_maxp(Plog);
+ max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */
+ }
+ p->max_P = max_P;
+ max_p_delta = max_P / delta;
+ max_p_delta = max(max_p_delta, 1U);
+ p->max_P_reciprocal = reciprocal_value(max_p_delta);
+
+ /* RED Adaptative target :
+ * [min_th + 0.4*(min_th - max_th),
+ * min_th + 0.6*(min_th - max_th)].
+ */
+ delta /= 5;
+ p->target_min = qth_min + 2*delta;
+ p->target_max = qth_min + 3*delta;
+
+ p->Scell_log = Scell_log;
+ p->Scell_max = (255 << Scell_log);
+
+ if (stab)
+ memcpy(p->Stab, stab, sizeof(p->Stab));
+}
+
+static inline int red_is_idling(const struct red_vars *v)
+{
+ return v->qidlestart != 0;
+}
+
+static inline void red_start_of_idle_period(struct red_vars *v)
+{
+ v->qidlestart = ktime_get();
+}
+
+static inline void red_end_of_idle_period(struct red_vars *v)
+{
+ v->qidlestart = 0;
+}
+
+static inline void red_restart(struct red_vars *v)
+{
+ red_end_of_idle_period(v);
+ v->qavg = 0;
+ v->qcount = -1;
+}
+
+static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p,
+ const struct red_vars *v)
+{
+ s64 delta = ktime_us_delta(ktime_get(), v->qidlestart);
+ long us_idle = min_t(s64, delta, p->Scell_max);
+ int shift;
+
+ /*
+ * The problem: ideally, average length queue recalculation should
+ * be done over constant clock intervals. This is too expensive, so
+ * that the calculation is driven by outgoing packets.
+ * When the queue is idle we have to model this clock by hand.
+ *
+ * SF+VJ proposed to "generate":
+ *
+ * m = idletime / (average_pkt_size / bandwidth)
+ *
+ * dummy packets as a burst after idle time, i.e.
+ *
+ * v->qavg *= (1-W)^m
+ *
+ * This is an apparently overcomplicated solution (f.e. we have to
+ * precompute a table to make this calculation in reasonable time)
+ * I believe that a simpler model may be used here,
+ * but it is field for experiments.
+ */
+
+ shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK];
+
+ if (shift)
+ return v->qavg >> shift;
+ else {
+ /* Approximate initial part of exponent with linear function:
+ *
+ * (1-W)^m ~= 1-mW + ...
+ *
+ * Seems, it is the best solution to
+ * problem of too coarse exponent tabulation.
+ */
+ us_idle = (v->qavg * (u64)us_idle) >> p->Scell_log;
+
+ if (us_idle < (v->qavg >> 1))
+ return v->qavg - us_idle;
+ else
+ return v->qavg >> 1;
+ }
+}
+
+static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p,
+ const struct red_vars *v,
+ unsigned int backlog)
+{
+ /*
+ * NOTE: v->qavg is fixed point number with point at Wlog.
+ * The formula below is equvalent to floating point
+ * version:
+ *
+ * qavg = qavg*(1-W) + backlog*W;
+ *
+ * --ANK (980924)
+ */
+ return v->qavg + (backlog - (v->qavg >> p->Wlog));
+}
+
+static inline unsigned long red_calc_qavg(const struct red_parms *p,
+ const struct red_vars *v,
+ unsigned int backlog)
+{
+ if (!red_is_idling(v))
+ return red_calc_qavg_no_idle_time(p, v, backlog);
+ else
+ return red_calc_qavg_from_idle_time(p, v);
+}
+
+
+static inline u32 red_random(const struct red_parms *p)
+{
+ return reciprocal_divide(get_random_u32(), p->max_P_reciprocal);
+}
+
+static inline int red_mark_probability(const struct red_parms *p,
+ const struct red_vars *v,
+ unsigned long qavg)
+{
+ /* The formula used below causes questions.
+
+ OK. qR is random number in the interval
+ (0..1/max_P)*(qth_max-qth_min)
+ i.e. 0..(2^Plog). If we used floating point
+ arithmetics, it would be: (2^Plog)*rnd_num,
+ where rnd_num is less 1.
+
+ Taking into account, that qavg have fixed
+ point at Wlog, two lines
+ below have the following floating point equivalent:
+
+ max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount
+
+ Any questions? --ANK (980924)
+ */
+ return !(((qavg - p->qth_min) >> p->Wlog) * v->qcount < v->qR);
+}
+
+enum {
+ RED_BELOW_MIN_THRESH,
+ RED_BETWEEN_TRESH,
+ RED_ABOVE_MAX_TRESH,
+};
+
+static inline int red_cmp_thresh(const struct red_parms *p, unsigned long qavg)
+{
+ if (qavg < p->qth_min)
+ return RED_BELOW_MIN_THRESH;
+ else if (qavg >= p->qth_max)
+ return RED_ABOVE_MAX_TRESH;
+ else
+ return RED_BETWEEN_TRESH;
+}
+
+enum {
+ RED_DONT_MARK,
+ RED_PROB_MARK,
+ RED_HARD_MARK,
+};
+
+static inline int red_action(const struct red_parms *p,
+ struct red_vars *v,
+ unsigned long qavg)
+{
+ switch (red_cmp_thresh(p, qavg)) {
+ case RED_BELOW_MIN_THRESH:
+ v->qcount = -1;
+ return RED_DONT_MARK;
+
+ case RED_BETWEEN_TRESH:
+ if (++v->qcount) {
+ if (red_mark_probability(p, v, qavg)) {
+ v->qcount = 0;
+ v->qR = red_random(p);
+ return RED_PROB_MARK;
+ }
+ } else
+ v->qR = red_random(p);
+
+ return RED_DONT_MARK;
+
+ case RED_ABOVE_MAX_TRESH:
+ v->qcount = -1;
+ return RED_HARD_MARK;
+ }
+
+ BUG();
+ return RED_DONT_MARK;
+}
+
+static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v)
+{
+ unsigned long qavg;
+ u32 max_p_delta;
+
+ qavg = v->qavg;
+ if (red_is_idling(v))
+ qavg = red_calc_qavg_from_idle_time(p, v);
+
+ /* v->qavg is fixed point number with point at Wlog */
+ qavg >>= p->Wlog;
+
+ if (qavg > p->target_max && p->max_P <= MAX_P_MAX)
+ p->max_P += MAX_P_ALPHA(p->max_P); /* maxp = maxp + alpha */
+ else if (qavg < p->target_min && p->max_P >= MAX_P_MIN)
+ p->max_P = (p->max_P/10)*9; /* maxp = maxp * Beta */
+
+ max_p_delta = DIV_ROUND_CLOSEST(p->max_P, p->qth_delta);
+ max_p_delta = max(max_p_delta, 1U);
+ p->max_P_reciprocal = reciprocal_value(max_p_delta);
+}
+#endif
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
new file mode 100644
index 000000000..b2cb4a9eb
--- /dev/null
+++ b/include/net/regulatory.h
@@ -0,0 +1,240 @@
+
+#ifndef __NET_REGULATORY_H
+#define __NET_REGULATORY_H
+/*
+ * regulatory support structures
+ *
+ * Copyright 2008-2009 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/ieee80211.h>
+#include <linux/nl80211.h>
+#include <linux/rcupdate.h>
+
+/**
+ * enum environment_cap - Environment parsed from country IE
+ * @ENVIRON_ANY: indicates country IE applies to both indoor and
+ * outdoor operation.
+ * @ENVIRON_INDOOR: indicates country IE applies only to indoor operation
+ * @ENVIRON_OUTDOOR: indicates country IE applies only to outdoor operation
+ */
+enum environment_cap {
+ ENVIRON_ANY,
+ ENVIRON_INDOOR,
+ ENVIRON_OUTDOOR,
+};
+
+/**
+ * struct regulatory_request - used to keep track of regulatory requests
+ *
+ * @rcu_head: RCU head struct used to free the request
+ * @wiphy_idx: this is set if this request's initiator is
+ * %REGDOM_SET_BY_COUNTRY_IE or %REGDOM_SET_BY_DRIVER. This
+ * can be used by the wireless core to deal with conflicts
+ * and potentially inform users of which devices specifically
+ * cased the conflicts.
+ * @initiator: indicates who sent this request, could be any of
+ * those set in nl80211_reg_initiator (%NL80211_REGDOM_SET_BY_*)
+ * @alpha2: the ISO / IEC 3166 alpha2 country code of the requested
+ * regulatory domain. We have a few special codes:
+ * 00 - World regulatory domain
+ * 99 - built by driver but a specific alpha2 cannot be determined
+ * 98 - result of an intersection between two regulatory domains
+ * 97 - regulatory domain has not yet been configured
+ * @dfs_region: If CRDA responded with a regulatory domain that requires
+ * DFS master operation on a known DFS region (NL80211_DFS_*),
+ * dfs_region represents that region. Drivers can use this and the
+ * @alpha2 to adjust their device's DFS parameters as required.
+ * @user_reg_hint_type: if the @initiator was of type
+ * %NL80211_REGDOM_SET_BY_USER, this classifies the type
+ * of hint passed. This could be any of the %NL80211_USER_REG_HINT_*
+ * types.
+ * @intersect: indicates whether the wireless core should intersect
+ * the requested regulatory domain with the presently set regulatory
+ * domain.
+ * @processed: indicates whether or not this requests has already been
+ * processed. When the last request is processed it means that the
+ * currently regulatory domain set on cfg80211 is updated from
+ * CRDA and can be used by other regulatory requests. When a
+ * the last request is not yet processed we must yield until it
+ * is processed before processing any new requests.
+ * @country_ie_checksum: checksum of the last processed and accepted
+ * country IE
+ * @country_ie_env: lets us know if the AP is telling us we are outdoor,
+ * indoor, or if it doesn't matter
+ * @list: used to insert into the reg_requests_list linked list
+ */
+struct regulatory_request {
+ struct rcu_head rcu_head;
+ int wiphy_idx;
+ enum nl80211_reg_initiator initiator;
+ enum nl80211_user_reg_hint_type user_reg_hint_type;
+ char alpha2[3];
+ enum nl80211_dfs_regions dfs_region;
+ bool intersect;
+ bool processed;
+ enum environment_cap country_ie_env;
+ struct list_head list;
+};
+
+/**
+ * enum ieee80211_regulatory_flags - device regulatory flags
+ *
+ * @REGULATORY_CUSTOM_REG: tells us the driver for this device
+ * has its own custom regulatory domain and cannot identify the
+ * ISO / IEC 3166 alpha2 it belongs to. When this is enabled
+ * we will disregard the first regulatory hint (when the
+ * initiator is %REGDOM_SET_BY_CORE). Drivers that use
+ * wiphy_apply_custom_regulatory() should have this flag set
+ * or the regulatory core will set it for the wiphy.
+ * If you use regulatory_hint() *after* using
+ * wiphy_apply_custom_regulatory() the wireless core will
+ * clear the REGULATORY_CUSTOM_REG for your wiphy as it would be
+ * implied that the device somehow gained knowledge of its region.
+ * @REGULATORY_STRICT_REG: tells us that the wiphy for this device
+ * has regulatory domain that it wishes to be considered as the
+ * superset for regulatory rules. After this device gets its regulatory
+ * domain programmed further regulatory hints shall only be considered
+ * for this device to enhance regulatory compliance, forcing the
+ * device to only possibly use subsets of the original regulatory
+ * rules. For example if channel 13 and 14 are disabled by this
+ * device's regulatory domain no user specified regulatory hint which
+ * has these channels enabled would enable them for this wiphy,
+ * the device's original regulatory domain will be trusted as the
+ * base. You can program the superset of regulatory rules for this
+ * wiphy with regulatory_hint() for cards programmed with an
+ * ISO3166-alpha2 country code. wiphys that use regulatory_hint()
+ * will have their wiphy->regd programmed once the regulatory
+ * domain is set, and all other regulatory hints will be ignored
+ * until their own regulatory domain gets programmed.
+ * @REGULATORY_DISABLE_BEACON_HINTS: enable this if your driver needs to
+ * ensure that passive scan flags and beaconing flags may not be lifted by
+ * cfg80211 due to regulatory beacon hints. For more information on beacon
+ * hints read the documenation for regulatory_hint_found_beacon()
+ * @REGULATORY_COUNTRY_IE_FOLLOW_POWER: for devices that have a preference
+ * that even though they may have programmed their own custom power
+ * setting prior to wiphy registration, they want to ensure their channel
+ * power settings are updated for this connection with the power settings
+ * derived from the regulatory domain. The regulatory domain used will be
+ * based on the ISO3166-alpha2 from country IE provided through
+ * regulatory_hint_country_ie()
+ * @REGULATORY_COUNTRY_IE_IGNORE: for devices that have a preference to ignore
+ * all country IE information processed by the regulatory core. This will
+ * override %REGULATORY_COUNTRY_IE_FOLLOW_POWER as all country IEs will
+ * be ignored.
+ * @REGULATORY_ENABLE_RELAX_NO_IR: for devices that wish to allow the
+ * NO_IR relaxation, which enables transmissions on channels on which
+ * otherwise initiating radiation is not allowed. This will enable the
+ * relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration
+ * option
+ * @REGULATORY_WIPHY_SELF_MANAGED: for devices that employ wiphy-specific
+ * regdom management. These devices will ignore all regdom changes not
+ * originating from their own wiphy.
+ * A self-managed wiphys only employs regulatory information obtained from
+ * the FW and driver and does not use other cfg80211 sources like
+ * beacon-hints, country-code IEs and hints from other devices on the same
+ * system. Conversely, a self-managed wiphy does not share its regulatory
+ * hints with other devices in the system. If a system contains several
+ * devices, one or more of which are self-managed, there might be
+ * contradictory regulatory settings between them. Usage of flag is
+ * generally discouraged. Only use it if the FW/driver is incompatible
+ * with non-locally originated hints.
+ * This flag is incompatible with the flags: %REGULATORY_CUSTOM_REG,
+ * %REGULATORY_STRICT_REG, %REGULATORY_COUNTRY_IE_FOLLOW_POWER,
+ * %REGULATORY_COUNTRY_IE_IGNORE and %REGULATORY_DISABLE_BEACON_HINTS.
+ * Mixing any of the above flags with this flag will result in a failure
+ * to register the wiphy. This flag implies
+ * %REGULATORY_DISABLE_BEACON_HINTS and %REGULATORY_COUNTRY_IE_IGNORE.
+ */
+enum ieee80211_regulatory_flags {
+ REGULATORY_CUSTOM_REG = BIT(0),
+ REGULATORY_STRICT_REG = BIT(1),
+ REGULATORY_DISABLE_BEACON_HINTS = BIT(2),
+ REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(3),
+ REGULATORY_COUNTRY_IE_IGNORE = BIT(4),
+ REGULATORY_ENABLE_RELAX_NO_IR = BIT(5),
+ /* reuse bit 6 next time */
+ REGULATORY_WIPHY_SELF_MANAGED = BIT(7),
+};
+
+struct ieee80211_freq_range {
+ u32 start_freq_khz;
+ u32 end_freq_khz;
+ u32 max_bandwidth_khz;
+};
+
+struct ieee80211_power_rule {
+ u32 max_antenna_gain;
+ u32 max_eirp;
+};
+
+/**
+ * struct ieee80211_wmm_ac - used to store per ac wmm regulatory limitation
+ *
+ * The information provided in this structure is required for QoS
+ * transmit queue configuration. Cf. IEEE 802.11 7.3.2.29.
+ *
+ * @cw_min: minimum contention window [a value of the form
+ * 2^n-1 in the range 1..32767]
+ * @cw_max: maximum contention window [like @cw_min]
+ * @cot: maximum burst time in units of 32 usecs, 0 meaning disabled
+ * @aifsn: arbitration interframe space [0..255]
+ *
+ */
+struct ieee80211_wmm_ac {
+ u16 cw_min;
+ u16 cw_max;
+ u16 cot;
+ u8 aifsn;
+};
+
+struct ieee80211_wmm_rule {
+ struct ieee80211_wmm_ac client[IEEE80211_NUM_ACS];
+ struct ieee80211_wmm_ac ap[IEEE80211_NUM_ACS];
+};
+
+struct ieee80211_reg_rule {
+ struct ieee80211_freq_range freq_range;
+ struct ieee80211_power_rule power_rule;
+ struct ieee80211_wmm_rule wmm_rule;
+ u32 flags;
+ u32 dfs_cac_ms;
+ bool has_wmm;
+};
+
+struct ieee80211_regdomain {
+ struct rcu_head rcu_head;
+ u32 n_reg_rules;
+ char alpha2[3];
+ enum nl80211_dfs_regions dfs_region;
+ struct ieee80211_reg_rule reg_rules[];
+};
+
+#define REG_RULE_EXT(start, end, bw, gain, eirp, dfs_cac, reg_flags) \
+{ \
+ .freq_range.start_freq_khz = MHZ_TO_KHZ(start), \
+ .freq_range.end_freq_khz = MHZ_TO_KHZ(end), \
+ .freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw), \
+ .power_rule.max_antenna_gain = DBI_TO_MBI(gain), \
+ .power_rule.max_eirp = DBM_TO_MBM(eirp), \
+ .flags = reg_flags, \
+ .dfs_cac_ms = dfs_cac, \
+}
+
+#define REG_RULE(start, end, bw, gain, eirp, reg_flags) \
+ REG_RULE_EXT(start, end, bw, gain, eirp, 0, reg_flags)
+
+#endif
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
new file mode 100644
index 000000000..144c39db9
--- /dev/null
+++ b/include/net/request_sock.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * NET Generic infrastructure for Network protocols.
+ *
+ * Definitions for request_sock
+ *
+ * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * From code originally in include/net/tcp.h
+ */
+#ifndef _REQUEST_SOCK_H
+#define _REQUEST_SOCK_H
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/refcount.h>
+
+#include <net/sock.h>
+
+struct request_sock;
+struct sk_buff;
+struct dst_entry;
+struct proto;
+
+struct request_sock_ops {
+ int family;
+ unsigned int obj_size;
+ struct kmem_cache *slab;
+ char *slab_name;
+ int (*rtx_syn_ack)(const struct sock *sk,
+ struct request_sock *req);
+ void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req);
+ void (*send_reset)(const struct sock *sk,
+ struct sk_buff *skb);
+ void (*destructor)(struct request_sock *req);
+ void (*syn_ack_timeout)(const struct request_sock *req);
+};
+
+int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
+
+struct saved_syn {
+ u32 mac_hdrlen;
+ u32 network_hdrlen;
+ u32 tcp_hdrlen;
+ u8 data[];
+};
+
+/* struct request_sock - mini sock to represent a connection request
+ */
+struct request_sock {
+ struct sock_common __req_common;
+#define rsk_refcnt __req_common.skc_refcnt
+#define rsk_hash __req_common.skc_hash
+#define rsk_listener __req_common.skc_listener
+#define rsk_window_clamp __req_common.skc_window_clamp
+#define rsk_rcv_wnd __req_common.skc_rcv_wnd
+
+ struct request_sock *dl_next;
+ u16 mss;
+ u8 num_retrans; /* number of retransmits */
+ u8 syncookie:1; /* syncookie: encode tcpopts in timestamp */
+ u8 num_timeout:7; /* number of timeouts */
+ u32 ts_recent;
+ struct timer_list rsk_timer;
+ const struct request_sock_ops *rsk_ops;
+ struct sock *sk;
+ struct saved_syn *saved_syn;
+ u32 secid;
+ u32 peer_secid;
+ u32 timeout;
+};
+
+static inline struct request_sock *inet_reqsk(const struct sock *sk)
+{
+ return (struct request_sock *)sk;
+}
+
+static inline struct sock *req_to_sk(struct request_sock *req)
+{
+ return (struct sock *)req;
+}
+
+static inline struct request_sock *
+reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
+ bool attach_listener)
+{
+ struct request_sock *req;
+
+ req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
+ if (!req)
+ return NULL;
+ req->rsk_listener = NULL;
+ if (attach_listener) {
+ if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
+ kmem_cache_free(ops->slab, req);
+ return NULL;
+ }
+ req->rsk_listener = sk_listener;
+ }
+ req->rsk_ops = ops;
+ req_to_sk(req)->sk_prot = sk_listener->sk_prot;
+ sk_node_init(&req_to_sk(req)->sk_node);
+ sk_tx_queue_clear(req_to_sk(req));
+ req->saved_syn = NULL;
+ req->timeout = 0;
+ req->num_timeout = 0;
+ req->num_retrans = 0;
+ req->sk = NULL;
+ refcount_set(&req->rsk_refcnt, 0);
+
+ return req;
+}
+
+static inline void __reqsk_free(struct request_sock *req)
+{
+ req->rsk_ops->destructor(req);
+ if (req->rsk_listener)
+ sock_put(req->rsk_listener);
+ kfree(req->saved_syn);
+ kmem_cache_free(req->rsk_ops->slab, req);
+}
+
+static inline void reqsk_free(struct request_sock *req)
+{
+ WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
+ __reqsk_free(req);
+}
+
+static inline void reqsk_put(struct request_sock *req)
+{
+ if (refcount_dec_and_test(&req->rsk_refcnt))
+ reqsk_free(req);
+}
+
+/*
+ * For a TCP Fast Open listener -
+ * lock - protects the access to all the reqsk, which is co-owned by
+ * the listener and the child socket.
+ * qlen - pending TFO requests (still in TCP_SYN_RECV).
+ * max_qlen - max TFO reqs allowed before TFO is disabled.
+ *
+ * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
+ * structure above. But there is some implementation difficulty due to
+ * listen_sock being part of request_sock_queue hence will be freed when
+ * a listener is stopped. But TFO related fields may continue to be
+ * accessed even after a listener is closed, until its sk_refcnt drops
+ * to 0 implying no more outstanding TFO reqs. One solution is to keep
+ * listen_opt around until sk_refcnt drops to 0. But there is some other
+ * complexity that needs to be resolved. E.g., a listener can be disabled
+ * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
+ */
+struct fastopen_queue {
+ struct request_sock *rskq_rst_head; /* Keep track of past TFO */
+ struct request_sock *rskq_rst_tail; /* requests that caused RST.
+ * This is part of the defense
+ * against spoofing attack.
+ */
+ spinlock_t lock;
+ int qlen; /* # of pending (TCP_SYN_RECV) reqs */
+ int max_qlen; /* != 0 iff TFO is currently enabled */
+
+ struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
+};
+
+/** struct request_sock_queue - queue of request_socks
+ *
+ * @rskq_accept_head - FIFO head of established children
+ * @rskq_accept_tail - FIFO tail of established children
+ * @rskq_defer_accept - User waits for some data after accept()
+ *
+ */
+struct request_sock_queue {
+ spinlock_t rskq_lock;
+ u8 rskq_defer_accept;
+
+ u32 synflood_warned;
+ atomic_t qlen;
+ atomic_t young;
+
+ struct request_sock *rskq_accept_head;
+ struct request_sock *rskq_accept_tail;
+ struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
+ * if TFO is enabled.
+ */
+};
+
+void reqsk_queue_alloc(struct request_sock_queue *queue);
+
+void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
+ bool reset);
+
+static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
+{
+ return READ_ONCE(queue->rskq_accept_head) == NULL;
+}
+
+static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
+ struct sock *parent)
+{
+ struct request_sock *req;
+
+ spin_lock_bh(&queue->rskq_lock);
+ req = queue->rskq_accept_head;
+ if (req) {
+ sk_acceptq_removed(parent);
+ WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
+ if (queue->rskq_accept_head == NULL)
+ queue->rskq_accept_tail = NULL;
+ }
+ spin_unlock_bh(&queue->rskq_lock);
+ return req;
+}
+
+static inline void reqsk_queue_removed(struct request_sock_queue *queue,
+ const struct request_sock *req)
+{
+ if (req->num_timeout == 0)
+ atomic_dec(&queue->young);
+ atomic_dec(&queue->qlen);
+}
+
+static inline void reqsk_queue_added(struct request_sock_queue *queue)
+{
+ atomic_inc(&queue->young);
+ atomic_inc(&queue->qlen);
+}
+
+static inline int reqsk_queue_len(const struct request_sock_queue *queue)
+{
+ return atomic_read(&queue->qlen);
+}
+
+static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
+{
+ return atomic_read(&queue->young);
+}
+
+#endif /* _REQUEST_SOCK_H */
diff --git a/include/net/rose.h b/include/net/rose.h
new file mode 100644
index 000000000..23267b4ef
--- /dev/null
+++ b/include/net/rose.h
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Declarations of Rose type objects.
+ *
+ * Jonathan Naylor G4KLX 25/8/96
+ */
+
+#ifndef _ROSE_H
+#define _ROSE_H
+
+#include <linux/rose.h>
+#include <net/ax25.h>
+#include <net/sock.h>
+
+#define ROSE_ADDR_LEN 5
+
+#define ROSE_MIN_LEN 3
+
+#define ROSE_CALL_REQ_ADDR_LEN_OFF 3
+#define ROSE_CALL_REQ_ADDR_LEN_VAL 0xAA /* each address is 10 digits */
+#define ROSE_CALL_REQ_DEST_ADDR_OFF 4
+#define ROSE_CALL_REQ_SRC_ADDR_OFF 9
+#define ROSE_CALL_REQ_FACILITIES_OFF 14
+
+#define ROSE_GFI 0x10
+#define ROSE_Q_BIT 0x80
+#define ROSE_D_BIT 0x40
+#define ROSE_M_BIT 0x10
+
+#define ROSE_CALL_REQUEST 0x0B
+#define ROSE_CALL_ACCEPTED 0x0F
+#define ROSE_CLEAR_REQUEST 0x13
+#define ROSE_CLEAR_CONFIRMATION 0x17
+#define ROSE_DATA 0x00
+#define ROSE_INTERRUPT 0x23
+#define ROSE_INTERRUPT_CONFIRMATION 0x27
+#define ROSE_RR 0x01
+#define ROSE_RNR 0x05
+#define ROSE_REJ 0x09
+#define ROSE_RESET_REQUEST 0x1B
+#define ROSE_RESET_CONFIRMATION 0x1F
+#define ROSE_REGISTRATION_REQUEST 0xF3
+#define ROSE_REGISTRATION_CONFIRMATION 0xF7
+#define ROSE_RESTART_REQUEST 0xFB
+#define ROSE_RESTART_CONFIRMATION 0xFF
+#define ROSE_DIAGNOSTIC 0xF1
+#define ROSE_ILLEGAL 0xFD
+
+/* Define Link State constants. */
+
+enum {
+ ROSE_STATE_0, /* Ready */
+ ROSE_STATE_1, /* Awaiting Call Accepted */
+ ROSE_STATE_2, /* Awaiting Clear Confirmation */
+ ROSE_STATE_3, /* Data Transfer */
+ ROSE_STATE_4, /* Awaiting Reset Confirmation */
+ ROSE_STATE_5 /* Deferred Call Acceptance */
+};
+
+#define ROSE_DEFAULT_T0 180000 /* Default T10 T20 value */
+#define ROSE_DEFAULT_T1 200000 /* Default T11 T21 value */
+#define ROSE_DEFAULT_T2 180000 /* Default T12 T22 value */
+#define ROSE_DEFAULT_T3 180000 /* Default T13 T23 value */
+#define ROSE_DEFAULT_HB 5000 /* Default Holdback value */
+#define ROSE_DEFAULT_IDLE 0 /* No Activity Timeout - none */
+#define ROSE_DEFAULT_ROUTING 1 /* Default routing flag */
+#define ROSE_DEFAULT_FAIL_TIMEOUT 120000 /* Time until link considered usable */
+#define ROSE_DEFAULT_MAXVC 50 /* Maximum number of VCs per neighbour */
+#define ROSE_DEFAULT_WINDOW_SIZE 7 /* Default window size */
+
+#define ROSE_MODULUS 8
+#define ROSE_MAX_PACKET_SIZE 251 /* Maximum packet size */
+
+#define ROSE_COND_ACK_PENDING 0x01
+#define ROSE_COND_PEER_RX_BUSY 0x02
+#define ROSE_COND_OWN_RX_BUSY 0x04
+
+#define FAC_NATIONAL 0x00
+#define FAC_CCITT 0x0F
+
+#define FAC_NATIONAL_RAND 0x7F
+#define FAC_NATIONAL_FLAGS 0x3F
+#define FAC_NATIONAL_DEST_DIGI 0xE9
+#define FAC_NATIONAL_SRC_DIGI 0xEB
+#define FAC_NATIONAL_FAIL_CALL 0xED
+#define FAC_NATIONAL_FAIL_ADD 0xEE
+#define FAC_NATIONAL_DIGIS 0xEF
+
+#define FAC_CCITT_DEST_NSAP 0xC9
+#define FAC_CCITT_SRC_NSAP 0xCB
+
+struct rose_neigh {
+ struct rose_neigh *next;
+ ax25_address callsign;
+ ax25_digi *digipeat;
+ ax25_cb *ax25;
+ struct net_device *dev;
+ unsigned short count;
+ unsigned short use;
+ unsigned int number;
+ char restarted;
+ char dce_mode;
+ char loopback;
+ struct sk_buff_head queue;
+ struct timer_list t0timer;
+ struct timer_list ftimer;
+};
+
+struct rose_node {
+ struct rose_node *next;
+ rose_address address;
+ unsigned short mask;
+ unsigned char count;
+ char loopback;
+ struct rose_neigh *neighbour[3];
+};
+
+struct rose_route {
+ struct rose_route *next;
+ unsigned int lci1, lci2;
+ rose_address src_addr, dest_addr;
+ ax25_address src_call, dest_call;
+ struct rose_neigh *neigh1, *neigh2;
+ unsigned int rand;
+};
+
+struct rose_sock {
+ struct sock sock;
+ rose_address source_addr, dest_addr;
+ ax25_address source_call, dest_call;
+ unsigned char source_ndigis, dest_ndigis;
+ ax25_address source_digis[ROSE_MAX_DIGIS];
+ ax25_address dest_digis[ROSE_MAX_DIGIS];
+ struct rose_neigh *neighbour;
+ struct net_device *device;
+ netdevice_tracker dev_tracker;
+ unsigned int lci, rand;
+ unsigned char state, condition, qbitincl, defer;
+ unsigned char cause, diagnostic;
+ unsigned short vs, vr, va, vl;
+ unsigned long t1, t2, t3, hb, idle;
+#ifdef M_BIT
+ unsigned short fraglen;
+ struct sk_buff_head frag_queue;
+#endif
+ struct sk_buff_head ack_queue;
+ struct rose_facilities_struct facilities;
+ struct timer_list timer;
+ struct timer_list idletimer;
+};
+
+#define rose_sk(sk) ((struct rose_sock *)(sk))
+
+/* af_rose.c */
+extern ax25_address rose_callsign;
+extern int sysctl_rose_restart_request_timeout;
+extern int sysctl_rose_call_request_timeout;
+extern int sysctl_rose_reset_request_timeout;
+extern int sysctl_rose_clear_request_timeout;
+extern int sysctl_rose_no_activity_timeout;
+extern int sysctl_rose_ack_hold_back_timeout;
+extern int sysctl_rose_routing_control;
+extern int sysctl_rose_link_fail_timeout;
+extern int sysctl_rose_maximum_vcs;
+extern int sysctl_rose_window_size;
+
+int rosecmp(const rose_address *, const rose_address *);
+int rosecmpm(const rose_address *, const rose_address *, unsigned short);
+char *rose2asc(char *buf, const rose_address *);
+struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
+void rose_kill_by_neigh(struct rose_neigh *);
+unsigned int rose_new_lci(struct rose_neigh *);
+int rose_rx_call_request(struct sk_buff *, struct net_device *,
+ struct rose_neigh *, unsigned int);
+void rose_destroy_socket(struct sock *);
+
+/* rose_dev.c */
+void rose_setup(struct net_device *);
+
+/* rose_in.c */
+int rose_process_rx_frame(struct sock *, struct sk_buff *);
+
+/* rose_link.c */
+void rose_start_ftimer(struct rose_neigh *);
+void rose_stop_ftimer(struct rose_neigh *);
+void rose_stop_t0timer(struct rose_neigh *);
+int rose_ftimer_running(struct rose_neigh *);
+void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *,
+ unsigned short);
+void rose_transmit_clear_request(struct rose_neigh *, unsigned int,
+ unsigned char, unsigned char);
+void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
+
+/* rose_loopback.c */
+void rose_loopback_init(void);
+void rose_loopback_clear(void);
+int rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
+
+/* rose_out.c */
+void rose_kick(struct sock *);
+void rose_enquiry_response(struct sock *);
+
+/* rose_route.c */
+extern struct rose_neigh *rose_loopback_neigh;
+extern const struct seq_operations rose_neigh_seqops;
+extern const struct seq_operations rose_node_seqops;
+extern struct seq_operations rose_route_seqops;
+
+void rose_add_loopback_neigh(void);
+int __must_check rose_add_loopback_node(const rose_address *);
+void rose_del_loopback_node(const rose_address *);
+void rose_rt_device_down(struct net_device *);
+void rose_link_device_down(struct net_device *);
+struct net_device *rose_dev_first(void);
+struct net_device *rose_dev_get(rose_address *);
+struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
+struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *,
+ unsigned char *, int);
+int rose_rt_ioctl(unsigned int, void __user *);
+void rose_link_failed(ax25_cb *, int);
+int rose_route_frame(struct sk_buff *, ax25_cb *);
+void rose_rt_free(void);
+
+/* rose_subr.c */
+void rose_clear_queues(struct sock *);
+void rose_frames_acked(struct sock *, unsigned short);
+void rose_requeue_frames(struct sock *);
+int rose_validate_nr(struct sock *, unsigned short);
+void rose_write_internal(struct sock *, int);
+int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
+int rose_parse_facilities(unsigned char *, unsigned int,
+ struct rose_facilities_struct *);
+void rose_disconnect(struct sock *, int, int, int);
+
+/* rose_timer.c */
+void rose_start_heartbeat(struct sock *);
+void rose_start_t1timer(struct sock *);
+void rose_start_t2timer(struct sock *);
+void rose_start_t3timer(struct sock *);
+void rose_start_hbtimer(struct sock *);
+void rose_start_idletimer(struct sock *);
+void rose_stop_heartbeat(struct sock *);
+void rose_stop_timer(struct sock *);
+void rose_stop_idletimer(struct sock *);
+
+/* sysctl_net_rose.c */
+void rose_register_sysctl(void);
+void rose_unregister_sysctl(void);
+
+#endif
diff --git a/include/net/route.h b/include/net/route.h
new file mode 100644
index 000000000..af8431b25
--- /dev/null
+++ b/include/net/route.h
@@ -0,0 +1,402 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP router.
+ *
+ * Version: @(#)route.h 1.0.4 05/27/93
+ *
+ * Authors: Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Fixes:
+ * Alan Cox : Reformatted. Added ip_rt_local()
+ * Alan Cox : Support for TCP parameters.
+ * Alexey Kuznetsov: Major changes for new routing code.
+ * Mike McLagan : Routing by source
+ * Robert Olsson : Added rt_cache statistics
+ */
+#ifndef _ROUTE_H
+#define _ROUTE_H
+
+#include <net/dst.h>
+#include <net/inetpeer.h>
+#include <net/flow.h>
+#include <net/inet_sock.h>
+#include <net/ip_fib.h>
+#include <net/arp.h>
+#include <net/ndisc.h>
+#include <linux/in_route.h>
+#include <linux/rtnetlink.h>
+#include <linux/rcupdate.h>
+#include <linux/route.h>
+#include <linux/ip.h>
+#include <linux/cache.h>
+#include <linux/security.h>
+
+#define RTO_ONLINK 0x01
+
+#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
+#define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
+
+static inline __u8 ip_sock_rt_scope(const struct sock *sk)
+{
+ if (sock_flag(sk, SOCK_LOCALROUTE))
+ return RT_SCOPE_LINK;
+
+ return RT_SCOPE_UNIVERSE;
+}
+
+static inline __u8 ip_sock_rt_tos(const struct sock *sk)
+{
+ return RT_TOS(inet_sk(sk)->tos);
+}
+
+struct ip_tunnel_info;
+struct fib_nh;
+struct fib_info;
+struct uncached_list;
+struct rtable {
+ struct dst_entry dst;
+
+ int rt_genid;
+ unsigned int rt_flags;
+ __u16 rt_type;
+ __u8 rt_is_input;
+ __u8 rt_uses_gateway;
+
+ int rt_iif;
+
+ u8 rt_gw_family;
+ /* Info on neighbour */
+ union {
+ __be32 rt_gw4;
+ struct in6_addr rt_gw6;
+ };
+
+ /* Miscellaneous cached information */
+ u32 rt_mtu_locked:1,
+ rt_pmtu:31;
+
+ struct list_head rt_uncached;
+ struct uncached_list *rt_uncached_list;
+};
+
+static inline bool rt_is_input_route(const struct rtable *rt)
+{
+ return rt->rt_is_input != 0;
+}
+
+static inline bool rt_is_output_route(const struct rtable *rt)
+{
+ return rt->rt_is_input == 0;
+}
+
+static inline __be32 rt_nexthop(const struct rtable *rt, __be32 daddr)
+{
+ if (rt->rt_gw_family == AF_INET)
+ return rt->rt_gw4;
+ return daddr;
+}
+
+struct ip_rt_acct {
+ __u32 o_bytes;
+ __u32 o_packets;
+ __u32 i_bytes;
+ __u32 i_packets;
+};
+
+struct rt_cache_stat {
+ unsigned int in_slow_tot;
+ unsigned int in_slow_mc;
+ unsigned int in_no_route;
+ unsigned int in_brd;
+ unsigned int in_martian_dst;
+ unsigned int in_martian_src;
+ unsigned int out_slow_tot;
+ unsigned int out_slow_mc;
+};
+
+extern struct ip_rt_acct __percpu *ip_rt_acct;
+
+struct in_device;
+
+int ip_rt_init(void);
+void rt_cache_flush(struct net *net);
+void rt_flush_dev(struct net_device *dev);
+struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *flp,
+ const struct sk_buff *skb);
+struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *flp,
+ struct fib_result *res,
+ const struct sk_buff *skb);
+
+static inline struct rtable *__ip_route_output_key(struct net *net,
+ struct flowi4 *flp)
+{
+ return ip_route_output_key_hash(net, flp, NULL);
+}
+
+struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
+ const struct sock *sk);
+struct rtable *ip_route_output_tunnel(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net *net, __be32 *saddr,
+ const struct ip_tunnel_info *info,
+ u8 protocol, bool use_cache);
+
+struct dst_entry *ipv4_blackhole_route(struct net *net,
+ struct dst_entry *dst_orig);
+
+static inline struct rtable *ip_route_output_key(struct net *net, struct flowi4 *flp)
+{
+ return ip_route_output_flow(net, flp, NULL);
+}
+
+static inline struct rtable *ip_route_output(struct net *net, __be32 daddr,
+ __be32 saddr, u8 tos, int oif)
+{
+ struct flowi4 fl4 = {
+ .flowi4_oif = oif,
+ .flowi4_tos = tos,
+ .daddr = daddr,
+ .saddr = saddr,
+ };
+ return ip_route_output_key(net, &fl4);
+}
+
+static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi4 *fl4,
+ struct sock *sk,
+ __be32 daddr, __be32 saddr,
+ __be16 dport, __be16 sport,
+ __u8 proto, __u8 tos, int oif)
+{
+ flowi4_init_output(fl4, oif, sk ? READ_ONCE(sk->sk_mark) : 0, tos,
+ RT_SCOPE_UNIVERSE, proto,
+ sk ? inet_sk_flowi_flags(sk) : 0,
+ daddr, saddr, dport, sport, sock_net_uid(net, sk));
+ if (sk)
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
+ return ip_route_output_flow(net, fl4, sk);
+}
+
+static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4 *fl4,
+ __be32 daddr, __be32 saddr,
+ __be32 gre_key, __u8 tos, int oif)
+{
+ memset(fl4, 0, sizeof(*fl4));
+ fl4->flowi4_oif = oif;
+ fl4->daddr = daddr;
+ fl4->saddr = saddr;
+ fl4->flowi4_tos = tos;
+ fl4->flowi4_proto = IPPROTO_GRE;
+ fl4->fl4_gre_key = gre_key;
+ return ip_route_output_key(net, fl4);
+}
+int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev,
+ struct in_device *in_dev, u32 *itag);
+int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
+ u8 tos, struct net_device *devin);
+int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src,
+ u8 tos, struct net_device *devin,
+ const struct sk_buff *hint);
+
+static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
+ u8 tos, struct net_device *devin)
+{
+ int err;
+
+ rcu_read_lock();
+ err = ip_route_input_noref(skb, dst, src, tos, devin);
+ if (!err) {
+ skb_dst_force(skb);
+ if (!skb_dst(skb))
+ err = -EINVAL;
+ }
+ rcu_read_unlock();
+
+ return err;
+}
+
+void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif,
+ u8 protocol);
+void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
+void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u8 protocol);
+void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
+void ip_rt_send_redirect(struct sk_buff *skb);
+
+unsigned int inet_addr_type(struct net *net, __be32 addr);
+unsigned int inet_addr_type_table(struct net *net, __be32 addr, u32 tb_id);
+unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
+ __be32 addr);
+unsigned int inet_addr_type_dev_table(struct net *net,
+ const struct net_device *dev,
+ __be32 addr);
+void ip_rt_multicast_event(struct in_device *);
+int ip_rt_ioctl(struct net *, unsigned int cmd, struct rtentry *rt);
+void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
+struct rtable *rt_dst_alloc(struct net_device *dev,
+ unsigned int flags, u16 type, bool noxfrm);
+struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt);
+
+struct in_ifaddr;
+void fib_add_ifaddr(struct in_ifaddr *);
+void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
+void fib_modify_prefix_metric(struct in_ifaddr *ifa, u32 new_metric);
+
+void rt_add_uncached_list(struct rtable *rt);
+void rt_del_uncached_list(struct rtable *rt);
+
+int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
+ u32 table_id, struct fib_info *fi,
+ int *fa_index, int fa_start, unsigned int flags);
+
+static inline void ip_rt_put(struct rtable *rt)
+{
+ /* dst_release() accepts a NULL parameter.
+ * We rely on dst being first structure in struct rtable
+ */
+ BUILD_BUG_ON(offsetof(struct rtable, dst) != 0);
+ dst_release(&rt->dst);
+}
+
+#define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3)
+
+extern const __u8 ip_tos2prio[16];
+
+static inline char rt_tos2priority(u8 tos)
+{
+ return ip_tos2prio[IPTOS_TOS(tos)>>1];
+}
+
+/* ip_route_connect() and ip_route_newports() work in tandem whilst
+ * binding a socket for a new outgoing connection.
+ *
+ * In order to use IPSEC properly, we must, in the end, have a
+ * route that was looked up using all available keys including source
+ * and destination ports.
+ *
+ * However, if a source port needs to be allocated (the user specified
+ * a wildcard source port) we need to obtain addressing information
+ * in order to perform that allocation.
+ *
+ * So ip_route_connect() looks up a route using wildcarded source and
+ * destination ports in the key, simply so that we can get a pair of
+ * addresses to use for port allocation.
+ *
+ * Later, once the ports are allocated, ip_route_newports() will make
+ * another route lookup if needed to make sure we catch any IPSEC
+ * rules keyed on the port information.
+ *
+ * The callers allocate the flow key on their stack, and must pass in
+ * the same flowi4 object to both the ip_route_connect() and the
+ * ip_route_newports() calls.
+ */
+
+static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst,
+ __be32 src, int oif, u8 protocol,
+ __be16 sport, __be16 dport,
+ const struct sock *sk)
+{
+ __u8 flow_flags = 0;
+
+ if (inet_sk(sk)->transparent)
+ flow_flags |= FLOWI_FLAG_ANYSRC;
+
+ flowi4_init_output(fl4, oif, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk),
+ ip_sock_rt_scope(sk), protocol, flow_flags, dst,
+ src, dport, sport, sk->sk_uid);
+}
+
+static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst,
+ __be32 src, int oif, u8 protocol,
+ __be16 sport, __be16 dport,
+ struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+ struct rtable *rt;
+
+ ip_route_connect_init(fl4, dst, src, oif, protocol, sport, dport, sk);
+
+ if (!dst || !src) {
+ rt = __ip_route_output_key(net, fl4);
+ if (IS_ERR(rt))
+ return rt;
+ ip_rt_put(rt);
+ flowi4_update_output(fl4, oif, fl4->flowi4_tos, fl4->daddr,
+ fl4->saddr);
+ }
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
+ return ip_route_output_flow(net, fl4, sk);
+}
+
+static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable *rt,
+ __be16 orig_sport, __be16 orig_dport,
+ __be16 sport, __be16 dport,
+ struct sock *sk)
+{
+ if (sport != orig_sport || dport != orig_dport) {
+ fl4->fl4_dport = dport;
+ fl4->fl4_sport = sport;
+ ip_rt_put(rt);
+ flowi4_update_output(fl4, sk->sk_bound_dev_if,
+ RT_CONN_FLAGS(sk), fl4->daddr,
+ fl4->saddr);
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
+ return ip_route_output_flow(sock_net(sk), fl4, sk);
+ }
+ return rt;
+}
+
+static inline int inet_iif(const struct sk_buff *skb)
+{
+ struct rtable *rt = skb_rtable(skb);
+
+ if (rt && rt->rt_iif)
+ return rt->rt_iif;
+
+ return skb->skb_iif;
+}
+
+static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
+{
+ int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
+ struct net *net = dev_net(dst->dev);
+
+ if (hoplimit == 0)
+ hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
+ return hoplimit;
+}
+
+static inline struct neighbour *ip_neigh_gw4(struct net_device *dev,
+ __be32 daddr)
+{
+ struct neighbour *neigh;
+
+ neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)daddr);
+ if (unlikely(!neigh))
+ neigh = __neigh_create(&arp_tbl, &daddr, dev, false);
+
+ return neigh;
+}
+
+static inline struct neighbour *ip_neigh_for_gw(struct rtable *rt,
+ struct sk_buff *skb,
+ bool *is_v6gw)
+{
+ struct net_device *dev = rt->dst.dev;
+ struct neighbour *neigh;
+
+ if (likely(rt->rt_gw_family == AF_INET)) {
+ neigh = ip_neigh_gw4(dev, rt->rt_gw4);
+ } else if (rt->rt_gw_family == AF_INET6) {
+ neigh = ip_neigh_gw6(dev, &rt->rt_gw6);
+ *is_v6gw = true;
+ } else {
+ neigh = ip_neigh_gw4(dev, ip_hdr(skb)->daddr);
+ }
+ return neigh;
+}
+
+#endif /* _ROUTE_H */
diff --git a/include/net/rpl.h b/include/net/rpl.h
new file mode 100644
index 000000000..30fe780d1
--- /dev/null
+++ b/include/net/rpl.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RPL implementation
+ *
+ * Author:
+ * (C) 2020 Alexander Aring <alex.aring@gmail.com>
+ */
+
+#ifndef _NET_RPL_H
+#define _NET_RPL_H
+
+#include <linux/rpl.h>
+
+#if IS_ENABLED(CONFIG_IPV6_RPL_LWTUNNEL)
+extern int rpl_init(void);
+extern void rpl_exit(void);
+#else
+static inline int rpl_init(void)
+{
+ return 0;
+}
+
+static inline void rpl_exit(void) {}
+#endif
+
+size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
+ unsigned char cmpre);
+
+void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr,
+ const struct ipv6_rpl_sr_hdr *inhdr,
+ const struct in6_addr *daddr, unsigned char n);
+
+void ipv6_rpl_srh_compress(struct ipv6_rpl_sr_hdr *outhdr,
+ const struct ipv6_rpl_sr_hdr *inhdr,
+ const struct in6_addr *daddr, unsigned char n);
+
+#endif /* _NET_RPL_H */
diff --git a/include/net/rsi_91x.h b/include/net/rsi_91x.h
new file mode 100644
index 000000000..040f07b47
--- /dev/null
+++ b/include/net/rsi_91x.h
@@ -0,0 +1,56 @@
+/**
+ * Copyright (c) 2017 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_HEADER_H__
+#define __RSI_HEADER_H__
+
+#include <linux/skbuff.h>
+
+/* HAL queue information */
+#define RSI_COEX_Q 0x0
+#define RSI_BT_Q 0x2
+#define RSI_WLAN_Q 0x3
+#define RSI_WIFI_MGMT_Q 0x4
+#define RSI_WIFI_DATA_Q 0x5
+#define RSI_BT_MGMT_Q 0x6
+#define RSI_BT_DATA_Q 0x7
+
+enum rsi_coex_queues {
+ RSI_COEX_Q_INVALID = -1,
+ RSI_COEX_Q_COMMON = 0,
+ RSI_COEX_Q_BT,
+ RSI_COEX_Q_WLAN
+};
+
+enum rsi_host_intf {
+ RSI_HOST_INTF_SDIO = 0,
+ RSI_HOST_INTF_USB
+};
+
+struct rsi_proto_ops {
+ int (*coex_send_pkt)(void *priv, struct sk_buff *skb, u8 hal_queue);
+ enum rsi_host_intf (*get_host_intf)(void *priv);
+ void (*set_bt_context)(void *priv, void *context);
+};
+
+struct rsi_mod_ops {
+ int (*attach)(void *priv, struct rsi_proto_ops *ops);
+ void (*detach)(void *priv);
+ int (*recv_pkt)(void *priv, const u8 *msg);
+};
+
+extern const struct rsi_mod_ops rsi_bt_ops;
+#endif
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
new file mode 100644
index 000000000..9f881b74f
--- /dev/null
+++ b/include/net/rtnetlink.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_RTNETLINK_H
+#define __NET_RTNETLINK_H
+
+#include <linux/rtnetlink.h>
+#include <net/netlink.h>
+
+typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *,
+ struct netlink_ext_ack *);
+typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
+
+enum rtnl_link_flags {
+ RTNL_FLAG_DOIT_UNLOCKED = BIT(0),
+ RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1),
+};
+
+enum rtnl_kinds {
+ RTNL_KIND_NEW,
+ RTNL_KIND_DEL,
+ RTNL_KIND_GET,
+ RTNL_KIND_SET
+};
+#define RTNL_KIND_MASK 0x3
+
+static inline enum rtnl_kinds rtnl_msgtype_kind(int msgtype)
+{
+ return msgtype & RTNL_KIND_MASK;
+}
+
+void rtnl_register(int protocol, int msgtype,
+ rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
+int rtnl_register_module(struct module *owner, int protocol, int msgtype,
+ rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
+int rtnl_unregister(int protocol, int msgtype);
+void rtnl_unregister_all(int protocol);
+
+static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
+{
+ if (nlmsg_len(nlh) >= sizeof(struct rtgenmsg))
+ return ((struct rtgenmsg *) nlmsg_data(nlh))->rtgen_family;
+ else
+ return AF_UNSPEC;
+}
+
+/**
+ * struct rtnl_link_ops - rtnetlink link operations
+ *
+ * @list: Used internally
+ * @kind: Identifier
+ * @netns_refund: Physical device, move to init_net on netns exit
+ * @maxtype: Highest device specific netlink attribute number
+ * @policy: Netlink policy for device specific attribute validation
+ * @validate: Optional validation function for netlink/changelink parameters
+ * @alloc: netdev allocation function, can be %NULL and is then used
+ * in place of alloc_netdev_mqs(), in this case @priv_size
+ * and @setup are unused. Returns a netdev or ERR_PTR().
+ * @priv_size: sizeof net_device private space
+ * @setup: net_device setup function
+ * @newlink: Function for configuring and registering a new device
+ * @changelink: Function for changing parameters of an existing device
+ * @dellink: Function to remove a device
+ * @get_size: Function to calculate required room for dumping device
+ * specific netlink attributes
+ * @fill_info: Function to dump device specific netlink attributes
+ * @get_xstats_size: Function to calculate required room for dumping device
+ * specific statistics
+ * @fill_xstats: Function to dump device specific statistics
+ * @get_num_tx_queues: Function to determine number of transmit queues
+ * to create when creating a new device.
+ * @get_num_rx_queues: Function to determine number of receive queues
+ * to create when creating a new device.
+ * @get_link_net: Function to get the i/o netns of the device
+ * @get_linkxstats_size: Function to calculate the required room for
+ * dumping device-specific extended link stats
+ * @fill_linkxstats: Function to dump device-specific extended link stats
+ */
+struct rtnl_link_ops {
+ struct list_head list;
+
+ const char *kind;
+
+ size_t priv_size;
+ struct net_device *(*alloc)(struct nlattr *tb[],
+ const char *ifname,
+ unsigned char name_assign_type,
+ unsigned int num_tx_queues,
+ unsigned int num_rx_queues);
+ void (*setup)(struct net_device *dev);
+
+ bool netns_refund;
+ unsigned int maxtype;
+ const struct nla_policy *policy;
+ int (*validate)(struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack);
+
+ int (*newlink)(struct net *src_net,
+ struct net_device *dev,
+ struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack);
+ int (*changelink)(struct net_device *dev,
+ struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack);
+ void (*dellink)(struct net_device *dev,
+ struct list_head *head);
+
+ size_t (*get_size)(const struct net_device *dev);
+ int (*fill_info)(struct sk_buff *skb,
+ const struct net_device *dev);
+
+ size_t (*get_xstats_size)(const struct net_device *dev);
+ int (*fill_xstats)(struct sk_buff *skb,
+ const struct net_device *dev);
+ unsigned int (*get_num_tx_queues)(void);
+ unsigned int (*get_num_rx_queues)(void);
+
+ unsigned int slave_maxtype;
+ const struct nla_policy *slave_policy;
+ int (*slave_changelink)(struct net_device *dev,
+ struct net_device *slave_dev,
+ struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack);
+ size_t (*get_slave_size)(const struct net_device *dev,
+ const struct net_device *slave_dev);
+ int (*fill_slave_info)(struct sk_buff *skb,
+ const struct net_device *dev,
+ const struct net_device *slave_dev);
+ struct net *(*get_link_net)(const struct net_device *dev);
+ size_t (*get_linkxstats_size)(const struct net_device *dev,
+ int attr);
+ int (*fill_linkxstats)(struct sk_buff *skb,
+ const struct net_device *dev,
+ int *prividx, int attr);
+};
+
+int __rtnl_link_register(struct rtnl_link_ops *ops);
+void __rtnl_link_unregister(struct rtnl_link_ops *ops);
+
+int rtnl_link_register(struct rtnl_link_ops *ops);
+void rtnl_link_unregister(struct rtnl_link_ops *ops);
+
+/**
+ * struct rtnl_af_ops - rtnetlink address family operations
+ *
+ * @list: Used internally
+ * @family: Address family
+ * @fill_link_af: Function to fill IFLA_AF_SPEC with address family
+ * specific netlink attributes.
+ * @get_link_af_size: Function to calculate size of address family specific
+ * netlink attributes.
+ * @validate_link_af: Validate a IFLA_AF_SPEC attribute, must check attr
+ * for invalid configuration settings.
+ * @set_link_af: Function to parse a IFLA_AF_SPEC attribute and modify
+ * net_device accordingly.
+ */
+struct rtnl_af_ops {
+ struct list_head list;
+ int family;
+
+ int (*fill_link_af)(struct sk_buff *skb,
+ const struct net_device *dev,
+ u32 ext_filter_mask);
+ size_t (*get_link_af_size)(const struct net_device *dev,
+ u32 ext_filter_mask);
+
+ int (*validate_link_af)(const struct net_device *dev,
+ const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
+ int (*set_link_af)(struct net_device *dev,
+ const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
+ int (*fill_stats_af)(struct sk_buff *skb,
+ const struct net_device *dev);
+ size_t (*get_stats_af_size)(const struct net_device *dev);
+};
+
+void rtnl_af_register(struct rtnl_af_ops *ops);
+void rtnl_af_unregister(struct rtnl_af_ops *ops);
+
+struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
+struct net_device *rtnl_create_link(struct net *net, const char *ifname,
+ unsigned char name_assign_type,
+ const struct rtnl_link_ops *ops,
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack);
+int rtnl_delete_link(struct net_device *dev);
+int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
+
+int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
+ struct netlink_ext_ack *exterr);
+struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid);
+
+#define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
+
+#endif
diff --git a/include/net/rtnh.h b/include/net/rtnh.h
new file mode 100644
index 000000000..aa2cfc508
--- /dev/null
+++ b/include/net/rtnh.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_RTNH_H
+#define __NET_RTNH_H
+
+#include <linux/rtnetlink.h>
+#include <net/netlink.h>
+
+static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
+{
+ return remaining >= (int)sizeof(*rtnh) &&
+ rtnh->rtnh_len >= sizeof(*rtnh) &&
+ rtnh->rtnh_len <= remaining;
+}
+
+static inline struct rtnexthop *rtnh_next(const struct rtnexthop *rtnh,
+ int *remaining)
+{
+ int totlen = NLA_ALIGN(rtnh->rtnh_len);
+
+ *remaining -= totlen;
+ return (struct rtnexthop *) ((char *) rtnh + totlen);
+}
+
+static inline struct nlattr *rtnh_attrs(const struct rtnexthop *rtnh)
+{
+ return (struct nlattr *) ((char *) rtnh + NLA_ALIGN(sizeof(*rtnh)));
+}
+
+static inline int rtnh_attrlen(const struct rtnexthop *rtnh)
+{
+ return rtnh->rtnh_len - NLA_ALIGN(sizeof(*rtnh));
+}
+
+#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
new file mode 100644
index 000000000..b3e312840
--- /dev/null
+++ b/include/net/sch_generic.h
@@ -0,0 +1,1308 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_SCHED_GENERIC_H
+#define __NET_SCHED_GENERIC_H
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/pkt_sched.h>
+#include <linux/pkt_cls.h>
+#include <linux/percpu.h>
+#include <linux/dynamic_queue_limits.h>
+#include <linux/list.h>
+#include <linux/refcount.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/atomic.h>
+#include <linux/hashtable.h>
+#include <net/gen_stats.h>
+#include <net/rtnetlink.h>
+#include <net/flow_offload.h>
+
+struct Qdisc_ops;
+struct qdisc_walker;
+struct tcf_walker;
+struct module;
+struct bpf_flow_keys;
+
+struct qdisc_rate_table {
+ struct tc_ratespec rate;
+ u32 data[256];
+ struct qdisc_rate_table *next;
+ int refcnt;
+};
+
+enum qdisc_state_t {
+ __QDISC_STATE_SCHED,
+ __QDISC_STATE_DEACTIVATED,
+ __QDISC_STATE_MISSED,
+ __QDISC_STATE_DRAINING,
+};
+
+enum qdisc_state2_t {
+ /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly.
+ * Use qdisc_run_begin/end() or qdisc_is_running() instead.
+ */
+ __QDISC_STATE2_RUNNING,
+};
+
+#define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED)
+#define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING)
+
+#define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \
+ QDISC_STATE_DRAINING)
+
+struct qdisc_size_table {
+ struct rcu_head rcu;
+ struct list_head list;
+ struct tc_sizespec szopts;
+ int refcnt;
+ u16 data[];
+};
+
+/* similar to sk_buff_head, but skb->prev pointer is undefined. */
+struct qdisc_skb_head {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ __u32 qlen;
+ spinlock_t lock;
+};
+
+struct Qdisc {
+ int (*enqueue)(struct sk_buff *skb,
+ struct Qdisc *sch,
+ struct sk_buff **to_free);
+ struct sk_buff * (*dequeue)(struct Qdisc *sch);
+ unsigned int flags;
+#define TCQ_F_BUILTIN 1
+#define TCQ_F_INGRESS 2
+#define TCQ_F_CAN_BYPASS 4
+#define TCQ_F_MQROOT 8
+#define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
+ * q->dev_queue : It can test
+ * netif_xmit_frozen_or_stopped() before
+ * dequeueing next packet.
+ * Its true for MQ/MQPRIO slaves, or non
+ * multiqueue device.
+ */
+#define TCQ_F_WARN_NONWC (1 << 16)
+#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
+#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
+ * qdisc_tree_decrease_qlen() should stop.
+ */
+#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
+#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
+#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
+ u32 limit;
+ const struct Qdisc_ops *ops;
+ struct qdisc_size_table __rcu *stab;
+ struct hlist_node hash;
+ u32 handle;
+ u32 parent;
+
+ struct netdev_queue *dev_queue;
+
+ struct net_rate_estimator __rcu *rate_est;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
+ struct gnet_stats_queue __percpu *cpu_qstats;
+ int pad;
+ refcount_t refcnt;
+
+ /*
+ * For performance sake on SMP, we put highly modified fields at the end
+ */
+ struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
+ struct qdisc_skb_head q;
+ struct gnet_stats_basic_sync bstats;
+ struct gnet_stats_queue qstats;
+ unsigned long state;
+ unsigned long state2; /* must be written under qdisc spinlock */
+ struct Qdisc *next_sched;
+ struct sk_buff_head skb_bad_txq;
+
+ spinlock_t busylock ____cacheline_aligned_in_smp;
+ spinlock_t seqlock;
+
+ struct rcu_head rcu;
+ netdevice_tracker dev_tracker;
+ /* private data */
+ long privdata[] ____cacheline_aligned;
+};
+
+static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
+{
+ if (qdisc->flags & TCQ_F_BUILTIN)
+ return;
+ refcount_inc(&qdisc->refcnt);
+}
+
+static inline bool qdisc_refcount_dec_if_one(struct Qdisc *qdisc)
+{
+ if (qdisc->flags & TCQ_F_BUILTIN)
+ return true;
+ return refcount_dec_if_one(&qdisc->refcnt);
+}
+
+/* Intended to be used by unlocked users, when concurrent qdisc release is
+ * possible.
+ */
+
+static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
+{
+ if (qdisc->flags & TCQ_F_BUILTIN)
+ return qdisc;
+ if (refcount_inc_not_zero(&qdisc->refcnt))
+ return qdisc;
+ return NULL;
+}
+
+/* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc
+ * root_lock section, or provide their own memory barriers -- ordering
+ * against qdisc_run_begin/end() atomic bit operations.
+ */
+static inline bool qdisc_is_running(struct Qdisc *qdisc)
+{
+ if (qdisc->flags & TCQ_F_NOLOCK)
+ return spin_is_locked(&qdisc->seqlock);
+ return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
+}
+
+static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
+{
+ return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY);
+}
+
+static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
+{
+ return q->flags & TCQ_F_CPUSTATS;
+}
+
+static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
+{
+ if (qdisc_is_percpu_stats(qdisc))
+ return nolock_qdisc_is_empty(qdisc);
+ return !READ_ONCE(qdisc->q.qlen);
+}
+
+/* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with
+ * the qdisc root lock acquired.
+ */
+static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+{
+ if (qdisc->flags & TCQ_F_NOLOCK) {
+ if (spin_trylock(&qdisc->seqlock))
+ return true;
+
+ /* No need to insist if the MISSED flag was already set.
+ * Note that test_and_set_bit() also gives us memory ordering
+ * guarantees wrt potential earlier enqueue() and below
+ * spin_trylock(), both of which are necessary to prevent races
+ */
+ if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
+ return false;
+
+ /* Try to take the lock again to make sure that we will either
+ * grab it or the CPU that still has it will see MISSED set
+ * when testing it in qdisc_run_end()
+ */
+ return spin_trylock(&qdisc->seqlock);
+ }
+ return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
+}
+
+static inline void qdisc_run_end(struct Qdisc *qdisc)
+{
+ if (qdisc->flags & TCQ_F_NOLOCK) {
+ spin_unlock(&qdisc->seqlock);
+
+ /* spin_unlock() only has store-release semantic. The unlock
+ * and test_bit() ordering is a store-load ordering, so a full
+ * memory barrier is needed here.
+ */
+ smp_mb();
+
+ if (unlikely(test_bit(__QDISC_STATE_MISSED,
+ &qdisc->state)))
+ __netif_schedule(qdisc);
+ } else {
+ __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
+ }
+}
+
+static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
+{
+ return qdisc->flags & TCQ_F_ONETXQUEUE;
+}
+
+static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
+{
+#ifdef CONFIG_BQL
+ /* Non-BQL migrated drivers will return 0, too. */
+ return dql_avail(&txq->dql);
+#else
+ return 0;
+#endif
+}
+
+struct Qdisc_class_ops {
+ unsigned int flags;
+ /* Child qdisc manipulation */
+ struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
+ int (*graft)(struct Qdisc *, unsigned long cl,
+ struct Qdisc *, struct Qdisc **,
+ struct netlink_ext_ack *extack);
+ struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
+ void (*qlen_notify)(struct Qdisc *, unsigned long);
+
+ /* Class manipulation routines */
+ unsigned long (*find)(struct Qdisc *, u32 classid);
+ int (*change)(struct Qdisc *, u32, u32,
+ struct nlattr **, unsigned long *,
+ struct netlink_ext_ack *);
+ int (*delete)(struct Qdisc *, unsigned long,
+ struct netlink_ext_ack *);
+ void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
+
+ /* Filter manipulation */
+ struct tcf_block * (*tcf_block)(struct Qdisc *sch,
+ unsigned long arg,
+ struct netlink_ext_ack *extack);
+ unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
+ u32 classid);
+ void (*unbind_tcf)(struct Qdisc *, unsigned long);
+
+ /* rtnetlink specific */
+ int (*dump)(struct Qdisc *, unsigned long,
+ struct sk_buff *skb, struct tcmsg*);
+ int (*dump_stats)(struct Qdisc *, unsigned long,
+ struct gnet_dump *);
+};
+
+/* Qdisc_class_ops flag values */
+
+/* Implements API that doesn't require rtnl lock */
+enum qdisc_class_ops_flags {
+ QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
+};
+
+struct Qdisc_ops {
+ struct Qdisc_ops *next;
+ const struct Qdisc_class_ops *cl_ops;
+ char id[IFNAMSIZ];
+ int priv_size;
+ unsigned int static_flags;
+
+ int (*enqueue)(struct sk_buff *skb,
+ struct Qdisc *sch,
+ struct sk_buff **to_free);
+ struct sk_buff * (*dequeue)(struct Qdisc *);
+ struct sk_buff * (*peek)(struct Qdisc *);
+
+ int (*init)(struct Qdisc *sch, struct nlattr *arg,
+ struct netlink_ext_ack *extack);
+ void (*reset)(struct Qdisc *);
+ void (*destroy)(struct Qdisc *);
+ int (*change)(struct Qdisc *sch,
+ struct nlattr *arg,
+ struct netlink_ext_ack *extack);
+ void (*attach)(struct Qdisc *sch);
+ int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
+ void (*change_real_num_tx)(struct Qdisc *sch,
+ unsigned int new_real_tx);
+
+ int (*dump)(struct Qdisc *, struct sk_buff *);
+ int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
+
+ void (*ingress_block_set)(struct Qdisc *sch,
+ u32 block_index);
+ void (*egress_block_set)(struct Qdisc *sch,
+ u32 block_index);
+ u32 (*ingress_block_get)(struct Qdisc *sch);
+ u32 (*egress_block_get)(struct Qdisc *sch);
+
+ struct module *owner;
+};
+
+
+struct tcf_result {
+ union {
+ struct {
+ unsigned long class;
+ u32 classid;
+ };
+ const struct tcf_proto *goto_tp;
+
+ };
+};
+
+struct tcf_chain;
+
+struct tcf_proto_ops {
+ struct list_head head;
+ char kind[IFNAMSIZ];
+
+ int (*classify)(struct sk_buff *,
+ const struct tcf_proto *,
+ struct tcf_result *);
+ int (*init)(struct tcf_proto*);
+ void (*destroy)(struct tcf_proto *tp, bool rtnl_held,
+ struct netlink_ext_ack *extack);
+
+ void* (*get)(struct tcf_proto*, u32 handle);
+ void (*put)(struct tcf_proto *tp, void *f);
+ int (*change)(struct net *net, struct sk_buff *,
+ struct tcf_proto*, unsigned long,
+ u32 handle, struct nlattr **,
+ void **, u32,
+ struct netlink_ext_ack *);
+ int (*delete)(struct tcf_proto *tp, void *arg,
+ bool *last, bool rtnl_held,
+ struct netlink_ext_ack *);
+ bool (*delete_empty)(struct tcf_proto *tp);
+ void (*walk)(struct tcf_proto *tp,
+ struct tcf_walker *arg, bool rtnl_held);
+ int (*reoffload)(struct tcf_proto *tp, bool add,
+ flow_setup_cb_t *cb, void *cb_priv,
+ struct netlink_ext_ack *extack);
+ void (*hw_add)(struct tcf_proto *tp,
+ void *type_data);
+ void (*hw_del)(struct tcf_proto *tp,
+ void *type_data);
+ void (*bind_class)(void *, u32, unsigned long,
+ void *, unsigned long);
+ void * (*tmplt_create)(struct net *net,
+ struct tcf_chain *chain,
+ struct nlattr **tca,
+ struct netlink_ext_ack *extack);
+ void (*tmplt_destroy)(void *tmplt_priv);
+
+ /* rtnetlink specific */
+ int (*dump)(struct net*, struct tcf_proto*, void *,
+ struct sk_buff *skb, struct tcmsg*,
+ bool);
+ int (*terse_dump)(struct net *net,
+ struct tcf_proto *tp, void *fh,
+ struct sk_buff *skb,
+ struct tcmsg *t, bool rtnl_held);
+ int (*tmplt_dump)(struct sk_buff *skb,
+ struct net *net,
+ void *tmplt_priv);
+
+ struct module *owner;
+ int flags;
+};
+
+/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
+ * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
+ * conditions can occur when filters are inserted/deleted simultaneously.
+ */
+enum tcf_proto_ops_flags {
+ TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
+};
+
+struct tcf_proto {
+ /* Fast access part */
+ struct tcf_proto __rcu *next;
+ void __rcu *root;
+
+ /* called under RCU BH lock*/
+ int (*classify)(struct sk_buff *,
+ const struct tcf_proto *,
+ struct tcf_result *);
+ __be16 protocol;
+
+ /* All the rest */
+ u32 prio;
+ void *data;
+ const struct tcf_proto_ops *ops;
+ struct tcf_chain *chain;
+ /* Lock protects tcf_proto shared state and can be used by unlocked
+ * classifiers to protect their private data.
+ */
+ spinlock_t lock;
+ bool deleting;
+ refcount_t refcnt;
+ struct rcu_head rcu;
+ struct hlist_node destroy_ht_node;
+};
+
+struct qdisc_skb_cb {
+ struct {
+ unsigned int pkt_len;
+ u16 slave_dev_queue_mapping;
+ u16 tc_classid;
+ };
+#define QDISC_CB_PRIV_LEN 20
+ unsigned char data[QDISC_CB_PRIV_LEN];
+};
+
+typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
+
+struct tcf_chain {
+ /* Protects filter_chain. */
+ struct mutex filter_chain_lock;
+ struct tcf_proto __rcu *filter_chain;
+ struct list_head list;
+ struct tcf_block *block;
+ u32 index; /* chain index */
+ unsigned int refcnt;
+ unsigned int action_refcnt;
+ bool explicitly_created;
+ bool flushing;
+ const struct tcf_proto_ops *tmplt_ops;
+ void *tmplt_priv;
+ struct rcu_head rcu;
+};
+
+struct tcf_block {
+ /* Lock protects tcf_block and lifetime-management data of chains
+ * attached to the block (refcnt, action_refcnt, explicitly_created).
+ */
+ struct mutex lock;
+ struct list_head chain_list;
+ u32 index; /* block index for shared blocks */
+ u32 classid; /* which class this block belongs to */
+ refcount_t refcnt;
+ struct net *net;
+ struct Qdisc *q;
+ struct rw_semaphore cb_lock; /* protects cb_list and offload counters */
+ struct flow_block flow_block;
+ struct list_head owner_list;
+ bool keep_dst;
+ atomic_t offloadcnt; /* Number of oddloaded filters */
+ unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
+ unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
+ struct {
+ struct tcf_chain *chain;
+ struct list_head filter_chain_list;
+ } chain0;
+ struct rcu_head rcu;
+ DECLARE_HASHTABLE(proto_destroy_ht, 7);
+ struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
+};
+
+static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
+{
+ return lockdep_is_held(&chain->filter_chain_lock);
+}
+
+static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
+{
+ return lockdep_is_held(&tp->lock);
+}
+
+#define tcf_chain_dereference(p, chain) \
+ rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
+
+#define tcf_proto_dereference(p, tp) \
+ rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
+
+static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
+{
+ struct qdisc_skb_cb *qcb;
+
+ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
+ BUILD_BUG_ON(sizeof(qcb->data) < sz);
+}
+
+static inline int qdisc_qlen(const struct Qdisc *q)
+{
+ return q->q.qlen;
+}
+
+static inline int qdisc_qlen_sum(const struct Qdisc *q)
+{
+ __u32 qlen = q->qstats.qlen;
+ int i;
+
+ if (qdisc_is_percpu_stats(q)) {
+ for_each_possible_cpu(i)
+ qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
+ } else {
+ qlen += q->q.qlen;
+ }
+
+ return qlen;
+}
+
+static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
+{
+ return (struct qdisc_skb_cb *)skb->cb;
+}
+
+static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
+{
+ return &qdisc->q.lock;
+}
+
+static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
+{
+ struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
+
+ return q;
+}
+
+static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
+{
+ return rcu_dereference_bh(qdisc->dev_queue->qdisc);
+}
+
+static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
+{
+ return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping);
+}
+
+static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
+{
+ struct Qdisc *root = qdisc_root_sleeping(qdisc);
+
+ ASSERT_RTNL();
+ return qdisc_lock(root);
+}
+
+static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
+{
+ return qdisc->dev_queue->dev;
+}
+
+static inline void sch_tree_lock(struct Qdisc *q)
+{
+ if (q->flags & TCQ_F_MQROOT)
+ spin_lock_bh(qdisc_lock(q));
+ else
+ spin_lock_bh(qdisc_root_sleeping_lock(q));
+}
+
+static inline void sch_tree_unlock(struct Qdisc *q)
+{
+ if (q->flags & TCQ_F_MQROOT)
+ spin_unlock_bh(qdisc_lock(q));
+ else
+ spin_unlock_bh(qdisc_root_sleeping_lock(q));
+}
+
+extern struct Qdisc noop_qdisc;
+extern struct Qdisc_ops noop_qdisc_ops;
+extern struct Qdisc_ops pfifo_fast_ops;
+extern struct Qdisc_ops mq_qdisc_ops;
+extern struct Qdisc_ops noqueue_qdisc_ops;
+extern const struct Qdisc_ops *default_qdisc_ops;
+static inline const struct Qdisc_ops *
+get_default_qdisc_ops(const struct net_device *dev, int ntx)
+{
+ return ntx < dev->real_num_tx_queues ?
+ default_qdisc_ops : &pfifo_fast_ops;
+}
+
+struct Qdisc_class_common {
+ u32 classid;
+ struct hlist_node hnode;
+};
+
+struct Qdisc_class_hash {
+ struct hlist_head *hash;
+ unsigned int hashsize;
+ unsigned int hashmask;
+ unsigned int hashelems;
+};
+
+static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
+{
+ id ^= id >> 8;
+ id ^= id >> 4;
+ return id & mask;
+}
+
+static inline struct Qdisc_class_common *
+qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
+{
+ struct Qdisc_class_common *cl;
+ unsigned int h;
+
+ if (!id)
+ return NULL;
+
+ h = qdisc_class_hash(id, hash->hashmask);
+ hlist_for_each_entry(cl, &hash->hash[h], hnode) {
+ if (cl->classid == id)
+ return cl;
+ }
+ return NULL;
+}
+
+static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
+{
+ u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
+
+ return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
+}
+
+int qdisc_class_hash_init(struct Qdisc_class_hash *);
+void qdisc_class_hash_insert(struct Qdisc_class_hash *,
+ struct Qdisc_class_common *);
+void qdisc_class_hash_remove(struct Qdisc_class_hash *,
+ struct Qdisc_class_common *);
+void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
+void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
+
+int dev_qdisc_change_tx_queue_len(struct net_device *dev);
+void dev_qdisc_change_real_num_tx(struct net_device *dev,
+ unsigned int new_real_tx);
+void dev_init_scheduler(struct net_device *dev);
+void dev_shutdown(struct net_device *dev);
+void dev_activate(struct net_device *dev);
+void dev_deactivate(struct net_device *dev);
+void dev_deactivate_many(struct list_head *head);
+struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+ struct Qdisc *qdisc);
+void qdisc_reset(struct Qdisc *qdisc);
+void qdisc_destroy(struct Qdisc *qdisc);
+void qdisc_put(struct Qdisc *qdisc);
+void qdisc_put_unlocked(struct Qdisc *qdisc);
+void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
+#ifdef CONFIG_NET_SCHED
+int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
+ void *type_data);
+void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+ struct Qdisc *new, struct Qdisc *old,
+ enum tc_setup_type type, void *type_data,
+ struct netlink_ext_ack *extack);
+#else
+static inline int
+qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
+ void *type_data)
+{
+ q->flags &= ~TCQ_F_OFFLOADED;
+ return 0;
+}
+
+static inline void
+qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+ struct Qdisc *new, struct Qdisc *old,
+ enum tc_setup_type type, void *type_data,
+ struct netlink_ext_ack *extack)
+{
+}
+#endif
+void qdisc_offload_query_caps(struct net_device *dev,
+ enum tc_setup_type type,
+ void *caps, size_t caps_len);
+struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ const struct Qdisc_ops *ops,
+ struct netlink_ext_ack *extack);
+void qdisc_free(struct Qdisc *qdisc);
+struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
+ const struct Qdisc_ops *ops, u32 parentid,
+ struct netlink_ext_ack *extack);
+void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+ const struct qdisc_size_table *stab);
+int skb_do_redirect(struct sk_buff *);
+
+static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return skb->tc_at_ingress;
+#else
+ return false;
+#endif
+}
+
+static inline bool skb_skip_tc_classify(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (skb->tc_skip_classify) {
+ skb->tc_skip_classify = 0;
+ return true;
+ }
+#endif
+ return false;
+}
+
+/* Reset all TX qdiscs greater than index of a device. */
+static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
+{
+ struct Qdisc *qdisc;
+
+ for (; i < dev->num_tx_queues; i++) {
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
+ if (qdisc) {
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
+ }
+ }
+}
+
+/* Are all TX queues of the device empty? */
+static inline bool qdisc_all_tx_empty(const struct net_device *dev)
+{
+ unsigned int i;
+
+ rcu_read_lock();
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ const struct Qdisc *q = rcu_dereference(txq->qdisc);
+
+ if (!qdisc_is_empty(q)) {
+ rcu_read_unlock();
+ return false;
+ }
+ }
+ rcu_read_unlock();
+ return true;
+}
+
+/* Are any of the TX qdiscs changing? */
+static inline bool qdisc_tx_changing(const struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ if (rcu_access_pointer(txq->qdisc) !=
+ rcu_access_pointer(txq->qdisc_sleeping))
+ return true;
+ }
+ return false;
+}
+
+/* Is the device using the noop qdisc on all queues? */
+static inline bool qdisc_tx_is_noop(const struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
+ return false;
+ }
+ return true;
+}
+
+static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
+{
+ return qdisc_skb_cb(skb)->pkt_len;
+}
+
+/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
+enum net_xmit_qdisc_t {
+ __NET_XMIT_STOLEN = 0x00010000,
+ __NET_XMIT_BYPASS = 0x00020000,
+};
+
+#ifdef CONFIG_NET_CLS_ACT
+#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
+#else
+#define net_xmit_drop_count(e) (1)
+#endif
+
+static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
+ const struct Qdisc *sch)
+{
+#ifdef CONFIG_NET_SCHED
+ struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
+
+ if (stab)
+ __qdisc_calculate_pkt_len(skb, stab);
+#endif
+}
+
+static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ qdisc_calculate_pkt_len(skb, sch);
+ return sch->enqueue(skb, sch, to_free);
+}
+
+static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
+ __u64 bytes, __u32 packets)
+{
+ u64_stats_update_begin(&bstats->syncp);
+ u64_stats_add(&bstats->bytes, bytes);
+ u64_stats_add(&bstats->packets, packets);
+ u64_stats_update_end(&bstats->syncp);
+}
+
+static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
+ const struct sk_buff *skb)
+{
+ _bstats_update(bstats,
+ qdisc_pkt_len(skb),
+ skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
+}
+
+static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
+}
+
+static inline void qdisc_bstats_update(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ bstats_update(&sch->bstats, skb);
+}
+
+static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+}
+
+static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
+}
+
+static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+}
+
+static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
+}
+
+static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
+{
+ this_cpu_inc(sch->cpu_qstats->qlen);
+}
+
+static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
+{
+ this_cpu_dec(sch->cpu_qstats->qlen);
+}
+
+static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
+{
+ this_cpu_inc(sch->cpu_qstats->requeues);
+}
+
+static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
+{
+ sch->qstats.drops += count;
+}
+
+static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
+{
+ qstats->drops++;
+}
+
+static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
+{
+ qstats->overlimits++;
+}
+
+static inline void qdisc_qstats_drop(struct Qdisc *sch)
+{
+ qstats_drop_inc(&sch->qstats);
+}
+
+static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
+{
+ this_cpu_inc(sch->cpu_qstats->drops);
+}
+
+static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
+{
+ sch->qstats.overlimits++;
+}
+
+static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
+{
+ __u32 qlen = qdisc_qlen_sum(sch);
+
+ return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
+}
+
+static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
+ __u32 *backlog)
+{
+ struct gnet_stats_queue qstats = { 0 };
+
+ gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats);
+ *qlen = qstats.qlen + qdisc_qlen(sch);
+ *backlog = qstats.backlog;
+}
+
+static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
+{
+ __u32 qlen, backlog;
+
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
+static inline void qdisc_purge_queue(struct Qdisc *sch)
+{
+ __u32 qlen, backlog;
+
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+ qdisc_reset(sch);
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
+static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
+ struct qdisc_skb_head *qh)
+{
+ struct sk_buff *last = qh->tail;
+
+ if (last) {
+ skb->next = NULL;
+ last->next = skb;
+ qh->tail = skb;
+ } else {
+ qh->tail = skb;
+ qh->head = skb;
+ }
+ qh->qlen++;
+}
+
+static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
+{
+ __qdisc_enqueue_tail(skb, &sch->q);
+ qdisc_qstats_backlog_inc(sch, skb);
+ return NET_XMIT_SUCCESS;
+}
+
+static inline void __qdisc_enqueue_head(struct sk_buff *skb,
+ struct qdisc_skb_head *qh)
+{
+ skb->next = qh->head;
+
+ if (!qh->head)
+ qh->tail = skb;
+ qh->head = skb;
+ qh->qlen++;
+}
+
+static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
+{
+ struct sk_buff *skb = qh->head;
+
+ if (likely(skb != NULL)) {
+ qh->head = skb->next;
+ qh->qlen--;
+ if (qh->head == NULL)
+ qh->tail = NULL;
+ skb->next = NULL;
+ }
+
+ return skb;
+}
+
+static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
+{
+ struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
+
+ if (likely(skb != NULL)) {
+ qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_bstats_update(sch, skb);
+ }
+
+ return skb;
+}
+
+/* Instead of calling kfree_skb() while root qdisc lock is held,
+ * queue the skb for future freeing at end of __dev_xmit_skb()
+ */
+static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
+{
+ skb->next = *to_free;
+ *to_free = skb;
+}
+
+static inline void __qdisc_drop_all(struct sk_buff *skb,
+ struct sk_buff **to_free)
+{
+ if (skb->prev)
+ skb->prev->next = *to_free;
+ else
+ skb->next = *to_free;
+ *to_free = skb;
+}
+
+static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
+ struct qdisc_skb_head *qh,
+ struct sk_buff **to_free)
+{
+ struct sk_buff *skb = __qdisc_dequeue_head(qh);
+
+ if (likely(skb != NULL)) {
+ unsigned int len = qdisc_pkt_len(skb);
+
+ qdisc_qstats_backlog_dec(sch, skb);
+ __qdisc_drop(skb, to_free);
+ return len;
+ }
+
+ return 0;
+}
+
+static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
+{
+ const struct qdisc_skb_head *qh = &sch->q;
+
+ return qh->head;
+}
+
+/* generic pseudo peek method for non-work-conserving qdisc */
+static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
+{
+ struct sk_buff *skb = skb_peek(&sch->gso_skb);
+
+ /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
+ if (!skb) {
+ skb = sch->dequeue(sch);
+
+ if (skb) {
+ __skb_queue_head(&sch->gso_skb, skb);
+ /* it's still part of the queue */
+ qdisc_qstats_backlog_inc(sch, skb);
+ sch->q.qlen++;
+ }
+ }
+
+ return skb;
+}
+
+static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
+ struct sk_buff *skb)
+{
+ if (qdisc_is_percpu_stats(sch)) {
+ qdisc_qstats_cpu_backlog_dec(sch, skb);
+ qdisc_bstats_cpu_update(sch, skb);
+ qdisc_qstats_cpu_qlen_dec(sch);
+ } else {
+ qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_bstats_update(sch, skb);
+ sch->q.qlen--;
+ }
+}
+
+static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
+ unsigned int pkt_len)
+{
+ if (qdisc_is_percpu_stats(sch)) {
+ qdisc_qstats_cpu_qlen_inc(sch);
+ this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
+ } else {
+ sch->qstats.backlog += pkt_len;
+ sch->q.qlen++;
+ }
+}
+
+/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
+static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
+{
+ struct sk_buff *skb = skb_peek(&sch->gso_skb);
+
+ if (skb) {
+ skb = __skb_dequeue(&sch->gso_skb);
+ if (qdisc_is_percpu_stats(sch)) {
+ qdisc_qstats_cpu_backlog_dec(sch, skb);
+ qdisc_qstats_cpu_qlen_dec(sch);
+ } else {
+ qdisc_qstats_backlog_dec(sch, skb);
+ sch->q.qlen--;
+ }
+ } else {
+ skb = sch->dequeue(sch);
+ }
+
+ return skb;
+}
+
+static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
+{
+ /*
+ * We do not know the backlog in bytes of this list, it
+ * is up to the caller to correct it
+ */
+ ASSERT_RTNL();
+ if (qh->qlen) {
+ rtnl_kfree_skbs(qh->head, qh->tail);
+
+ qh->head = NULL;
+ qh->tail = NULL;
+ qh->qlen = 0;
+ }
+}
+
+static inline void qdisc_reset_queue(struct Qdisc *sch)
+{
+ __qdisc_reset_queue(&sch->q);
+}
+
+static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
+ struct Qdisc **pold)
+{
+ struct Qdisc *old;
+
+ sch_tree_lock(sch);
+ old = *pold;
+ *pold = new;
+ if (old != NULL)
+ qdisc_purge_queue(old);
+ sch_tree_unlock(sch);
+
+ return old;
+}
+
+static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
+{
+ rtnl_kfree_skbs(skb, skb);
+ qdisc_qstats_drop(sch);
+}
+
+static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ __qdisc_drop(skb, to_free);
+ qdisc_qstats_cpu_drop(sch);
+
+ return NET_XMIT_DROP;
+}
+
+static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ __qdisc_drop(skb, to_free);
+ qdisc_qstats_drop(sch);
+
+ return NET_XMIT_DROP;
+}
+
+static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ __qdisc_drop_all(skb, to_free);
+ qdisc_qstats_drop(sch);
+
+ return NET_XMIT_DROP;
+}
+
+/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
+ long it will take to send a packet given its size.
+ */
+static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
+{
+ int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
+ if (slot < 0)
+ slot = 0;
+ slot >>= rtab->rate.cell_log;
+ if (slot > 255)
+ return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
+ return rtab->data[slot];
+}
+
+struct psched_ratecfg {
+ u64 rate_bytes_ps; /* bytes per second */
+ u32 mult;
+ u16 overhead;
+ u16 mpu;
+ u8 linklayer;
+ u8 shift;
+};
+
+static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
+ unsigned int len)
+{
+ len += r->overhead;
+
+ if (len < r->mpu)
+ len = r->mpu;
+
+ if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
+ return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
+
+ return ((u64)len * r->mult) >> r->shift;
+}
+
+void psched_ratecfg_precompute(struct psched_ratecfg *r,
+ const struct tc_ratespec *conf,
+ u64 rate64);
+
+static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
+ const struct psched_ratecfg *r)
+{
+ memset(res, 0, sizeof(*res));
+
+ /* legacy struct tc_ratespec has a 32bit @rate field
+ * Qdisc using 64bit rate should add new attributes
+ * in order to maintain compatibility.
+ */
+ res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
+
+ res->overhead = r->overhead;
+ res->mpu = r->mpu;
+ res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
+}
+
+struct psched_pktrate {
+ u64 rate_pkts_ps; /* packets per second */
+ u32 mult;
+ u8 shift;
+};
+
+static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r,
+ unsigned int pkt_num)
+{
+ return ((u64)pkt_num * r->mult) >> r->shift;
+}
+
+void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
+
+/* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
+ * The fast path only needs to access filter list and to update stats
+ */
+struct mini_Qdisc {
+ struct tcf_proto *filter_list;
+ struct tcf_block *block;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
+ struct gnet_stats_queue __percpu *cpu_qstats;
+ unsigned long rcu_state;
+};
+
+static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
+ const struct sk_buff *skb)
+{
+ bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
+}
+
+static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
+{
+ this_cpu_inc(miniq->cpu_qstats->drops);
+}
+
+struct mini_Qdisc_pair {
+ struct mini_Qdisc miniq1;
+ struct mini_Qdisc miniq2;
+ struct mini_Qdisc __rcu **p_miniq;
+};
+
+void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
+ struct tcf_proto *tp_head);
+void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
+ struct mini_Qdisc __rcu **p_miniq);
+void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
+ struct tcf_block *block);
+
+void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
+
+int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
+
+/* Make sure qdisc is no longer in SCHED state. */
+static inline void qdisc_synchronize(const struct Qdisc *q)
+{
+ while (test_bit(__QDISC_STATE_SCHED, &q->state))
+ msleep(1);
+}
+
+#endif
diff --git a/include/net/scm.h b/include/net/scm.h
new file mode 100644
index 000000000..585adc134
--- /dev/null
+++ b/include/net/scm.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_NET_SCM_H
+#define __LINUX_NET_SCM_H
+
+#include <linux/limits.h>
+#include <linux/net.h>
+#include <linux/cred.h>
+#include <linux/security.h>
+#include <linux/pid.h>
+#include <linux/nsproxy.h>
+#include <linux/sched/signal.h>
+
+/* Well, we should have at least one descriptor open
+ * to accept passed FDs 8)
+ */
+#define SCM_MAX_FD 253
+
+struct scm_creds {
+ u32 pid;
+ kuid_t uid;
+ kgid_t gid;
+};
+
+struct scm_fp_list {
+ short count;
+ short max;
+ struct user_struct *user;
+ struct file *fp[SCM_MAX_FD];
+};
+
+struct scm_cookie {
+ struct pid *pid; /* Skb credentials */
+ struct scm_fp_list *fp; /* Passed files */
+ struct scm_creds creds; /* Skb credentials */
+#ifdef CONFIG_SECURITY_NETWORK
+ u32 secid; /* Passed security ID */
+#endif
+};
+
+void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm);
+void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm);
+int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm);
+void __scm_destroy(struct scm_cookie *scm);
+struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl);
+
+#ifdef CONFIG_SECURITY_NETWORK
+static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm)
+{
+ security_socket_getpeersec_dgram(sock, NULL, &scm->secid);
+}
+#else
+static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm)
+{ }
+#endif /* CONFIG_SECURITY_NETWORK */
+
+static __inline__ void scm_set_cred(struct scm_cookie *scm,
+ struct pid *pid, kuid_t uid, kgid_t gid)
+{
+ scm->pid = get_pid(pid);
+ scm->creds.pid = pid_vnr(pid);
+ scm->creds.uid = uid;
+ scm->creds.gid = gid;
+}
+
+static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
+{
+ put_pid(scm->pid);
+ scm->pid = NULL;
+}
+
+static __inline__ void scm_destroy(struct scm_cookie *scm)
+{
+ scm_destroy_cred(scm);
+ if (scm->fp)
+ __scm_destroy(scm);
+}
+
+static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
+ struct scm_cookie *scm, bool forcecreds)
+{
+ memset(scm, 0, sizeof(*scm));
+ scm->creds.uid = INVALID_UID;
+ scm->creds.gid = INVALID_GID;
+ if (forcecreds)
+ scm_set_cred(scm, task_tgid(current), current_uid(), current_gid());
+ unix_get_peersec_dgram(sock, scm);
+ if (msg->msg_controllen <= 0)
+ return 0;
+ return __scm_send(sock, msg, scm);
+}
+
+#ifdef CONFIG_SECURITY_NETWORK
+static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
+{
+ char *secdata;
+ u32 seclen;
+ int err;
+
+ if (test_bit(SOCK_PASSSEC, &sock->flags)) {
+ err = security_secid_to_secctx(scm->secid, &secdata, &seclen);
+
+ if (!err) {
+ put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, seclen, secdata);
+ security_release_secctx(secdata, seclen);
+ }
+ }
+}
+
+static inline bool scm_has_secdata(struct socket *sock)
+{
+ return test_bit(SOCK_PASSSEC, &sock->flags);
+}
+#else
+static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
+{ }
+
+static inline bool scm_has_secdata(struct socket *sock)
+{
+ return false;
+}
+#endif /* CONFIG_SECURITY_NETWORK */
+
+static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
+ struct scm_cookie *scm, int flags)
+{
+ if (!msg->msg_control) {
+ if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp ||
+ scm_has_secdata(sock))
+ msg->msg_flags |= MSG_CTRUNC;
+ scm_destroy(scm);
+ return;
+ }
+
+ if (test_bit(SOCK_PASSCRED, &sock->flags)) {
+ struct user_namespace *current_ns = current_user_ns();
+ struct ucred ucreds = {
+ .pid = scm->creds.pid,
+ .uid = from_kuid_munged(current_ns, scm->creds.uid),
+ .gid = from_kgid_munged(current_ns, scm->creds.gid),
+ };
+ put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds);
+ }
+
+ scm_destroy_cred(scm);
+
+ scm_passec(sock, msg, scm);
+
+ if (!scm->fp)
+ return;
+
+ scm_detach_fds(msg, scm);
+}
+
+
+#endif /* __LINUX_NET_SCM_H */
+
diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h
new file mode 100644
index 000000000..d4b3b2dcd
--- /dev/null
+++ b/include/net/sctp/auth.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright 2007 Hewlett-Packard Development Company, L.P.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Vlad Yasevich <vladislav.yasevich@hp.com>
+ */
+
+#ifndef __sctp_auth_h__
+#define __sctp_auth_h__
+
+#include <linux/list.h>
+#include <linux/refcount.h>
+
+struct sctp_endpoint;
+struct sctp_association;
+struct sctp_authkey;
+struct sctp_hmacalgo;
+struct crypto_shash;
+
+/*
+ * Define a generic struct that will hold all the info
+ * necessary for an HMAC transform
+ */
+struct sctp_hmac {
+ __u16 hmac_id; /* one of the above ids */
+ char *hmac_name; /* name for loading */
+ __u16 hmac_len; /* length of the signature */
+};
+
+/* This is generic structure that containst authentication bytes used
+ * as keying material. It's a what is referred to as byte-vector all
+ * over SCTP-AUTH
+ */
+struct sctp_auth_bytes {
+ refcount_t refcnt;
+ __u32 len;
+ __u8 data[];
+};
+
+/* Definition for a shared key, weather endpoint or association */
+struct sctp_shared_key {
+ struct list_head key_list;
+ struct sctp_auth_bytes *key;
+ refcount_t refcnt;
+ __u16 key_id;
+ __u8 deactivated;
+};
+
+#define key_for_each(__key, __list_head) \
+ list_for_each_entry(__key, __list_head, key_list)
+
+#define key_for_each_safe(__key, __tmp, __list_head) \
+ list_for_each_entry_safe(__key, __tmp, __list_head, key_list)
+
+static inline void sctp_auth_key_hold(struct sctp_auth_bytes *key)
+{
+ if (!key)
+ return;
+
+ refcount_inc(&key->refcnt);
+}
+
+void sctp_auth_key_put(struct sctp_auth_bytes *key);
+struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp);
+void sctp_auth_destroy_keys(struct list_head *keys);
+int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp);
+struct sctp_shared_key *sctp_auth_get_shkey(
+ const struct sctp_association *asoc,
+ __u16 key_id);
+int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep,
+ struct sctp_association *asoc,
+ gfp_t gfp);
+int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp);
+void sctp_auth_destroy_hmacs(struct crypto_shash *auth_hmacs[]);
+struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id);
+struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc);
+void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc,
+ struct sctp_hmac_algo_param *hmacs);
+int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc,
+ __be16 hmac_id);
+int sctp_auth_send_cid(enum sctp_cid chunk,
+ const struct sctp_association *asoc);
+int sctp_auth_recv_cid(enum sctp_cid chunk,
+ const struct sctp_association *asoc);
+void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
+ struct sk_buff *skb, struct sctp_auth_chunk *auth,
+ struct sctp_shared_key *ep_key, gfp_t gfp);
+void sctp_auth_shkey_release(struct sctp_shared_key *sh_key);
+void sctp_auth_shkey_hold(struct sctp_shared_key *sh_key);
+
+/* API Helpers */
+int sctp_auth_ep_add_chunkid(struct sctp_endpoint *ep, __u8 chunk_id);
+int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
+ struct sctp_hmacalgo *hmacs);
+int sctp_auth_set_key(struct sctp_endpoint *ep, struct sctp_association *asoc,
+ struct sctp_authkey *auth_key);
+int sctp_auth_set_active_key(struct sctp_endpoint *ep,
+ struct sctp_association *asoc, __u16 key_id);
+int sctp_auth_del_key_id(struct sctp_endpoint *ep,
+ struct sctp_association *asoc, __u16 key_id);
+int sctp_auth_deact_key_id(struct sctp_endpoint *ep,
+ struct sctp_association *asoc, __u16 key_id);
+int sctp_auth_init(struct sctp_endpoint *ep, gfp_t gfp);
+void sctp_auth_free(struct sctp_endpoint *ep);
+
+#endif
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
new file mode 100644
index 000000000..5a9bb09f3
--- /dev/null
+++ b/include/net/sctp/checksum.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel reference Implementation
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001-2003 International Business Machines, Corp.
+ *
+ * This file is part of the SCTP kernel reference Implementation
+ *
+ * SCTP Checksum functions
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Dinakaran Joseph
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ *
+ * Rewritten to use libcrc32c by:
+ * Vlad Yasevich <vladislav.yasevich@hp.com>
+ */
+
+#ifndef __sctp_checksum_h__
+#define __sctp_checksum_h__
+
+#include <linux/types.h>
+#include <net/sctp/sctp.h>
+#include <linux/crc32c.h>
+#include <linux/crc32.h>
+
+static inline __wsum sctp_csum_update(const void *buff, int len, __wsum sum)
+{
+ /* This uses the crypto implementation of crc32c, which is either
+ * implemented w/ hardware support or resolves to __crc32c_le().
+ */
+ return (__force __wsum)crc32c((__force __u32)sum, buff, len);
+}
+
+static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
+ int offset, int len)
+{
+ return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
+ (__force __u32)csum2, len);
+}
+
+static const struct skb_checksum_ops sctp_csum_ops = {
+ .update = sctp_csum_update,
+ .combine = sctp_csum_combine,
+};
+
+static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
+ unsigned int offset)
+{
+ struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
+ __le32 old = sh->checksum;
+ __wsum new;
+
+ sh->checksum = 0;
+ new = ~__skb_checksum(skb, offset, skb->len - offset, ~(__wsum)0,
+ &sctp_csum_ops);
+ sh->checksum = old;
+
+ return cpu_to_le32((__force __u32)new);
+}
+
+#endif /* __sctp_checksum_h__ */
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
new file mode 100644
index 000000000..2058fabff
--- /dev/null
+++ b/include/net/sctp/command.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel Implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (C) 1999-2001 Cisco, Motorola
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These are the definitions needed for the command object.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+
+#ifndef __net_sctp_command_h__
+#define __net_sctp_command_h__
+
+#include <net/sctp/constants.h>
+#include <net/sctp/structs.h>
+
+
+enum sctp_verb {
+ SCTP_CMD_NOP = 0, /* Do nothing. */
+ SCTP_CMD_NEW_ASOC, /* Register a new association. */
+ SCTP_CMD_DELETE_TCB, /* Delete the current association. */
+ SCTP_CMD_NEW_STATE, /* Enter a new state. */
+ SCTP_CMD_REPORT_TSN, /* Record the arrival of a TSN. */
+ SCTP_CMD_GEN_SACK, /* Send a Selective ACK (maybe). */
+ SCTP_CMD_PROCESS_SACK, /* Process an inbound SACK. */
+ SCTP_CMD_GEN_INIT_ACK, /* Generate an INIT ACK chunk. */
+ SCTP_CMD_PEER_INIT, /* Process a INIT from the peer. */
+ SCTP_CMD_GEN_COOKIE_ECHO, /* Generate a COOKIE ECHO chunk. */
+ SCTP_CMD_CHUNK_ULP, /* Send a chunk to the sockets layer. */
+ SCTP_CMD_EVENT_ULP, /* Send a notification to the sockets layer. */
+ SCTP_CMD_REPLY, /* Send a chunk to our peer. */
+ SCTP_CMD_SEND_PKT, /* Send a full packet to our peer. */
+ SCTP_CMD_RETRAN, /* Mark a transport for retransmission. */
+ SCTP_CMD_ECN_CE, /* Do delayed CE processing. */
+ SCTP_CMD_ECN_ECNE, /* Do delayed ECNE processing. */
+ SCTP_CMD_ECN_CWR, /* Do delayed CWR processing. */
+ SCTP_CMD_TIMER_START, /* Start a timer. */
+ SCTP_CMD_TIMER_START_ONCE, /* Start a timer once */
+ SCTP_CMD_TIMER_RESTART, /* Restart a timer. */
+ SCTP_CMD_TIMER_STOP, /* Stop a timer. */
+ SCTP_CMD_INIT_CHOOSE_TRANSPORT, /* Choose transport for an INIT. */
+ SCTP_CMD_INIT_COUNTER_RESET, /* Reset init counter. */
+ SCTP_CMD_INIT_COUNTER_INC, /* Increment init counter. */
+ SCTP_CMD_INIT_RESTART, /* High level, do init timer work. */
+ SCTP_CMD_COOKIEECHO_RESTART, /* High level, do cookie-echo timer work. */
+ SCTP_CMD_INIT_FAILED, /* High level, do init failure work. */
+ SCTP_CMD_REPORT_DUP, /* Report a duplicate TSN. */
+ SCTP_CMD_STRIKE, /* Mark a strike against a transport. */
+ SCTP_CMD_HB_TIMERS_START, /* Start the heartbeat timers. */
+ SCTP_CMD_HB_TIMER_UPDATE, /* Update a heartbeat timers. */
+ SCTP_CMD_HB_TIMERS_STOP, /* Stop the heartbeat timers. */
+ SCTP_CMD_PROBE_TIMER_UPDATE, /* Update a probe timer. */
+ SCTP_CMD_TRANSPORT_HB_SENT, /* Reset the status of a transport. */
+ SCTP_CMD_TRANSPORT_IDLE, /* Do manipulations on idle transport */
+ SCTP_CMD_TRANSPORT_ON, /* Mark the transport as active. */
+ SCTP_CMD_REPORT_ERROR, /* Pass this error back out of the sm. */
+ SCTP_CMD_REPORT_BAD_TAG, /* Verification tags didn't match. */
+ SCTP_CMD_PROCESS_CTSN, /* Sideeffect from shutdown. */
+ SCTP_CMD_ASSOC_FAILED, /* Handle association failure. */
+ SCTP_CMD_DISCARD_PACKET, /* Discard the whole packet. */
+ SCTP_CMD_GEN_SHUTDOWN, /* Generate a SHUTDOWN chunk. */
+ SCTP_CMD_PURGE_OUTQUEUE, /* Purge all data waiting to be sent. */
+ SCTP_CMD_SETUP_T2, /* Hi-level, setup T2-shutdown parms. */
+ SCTP_CMD_RTO_PENDING, /* Set transport's rto_pending. */
+ SCTP_CMD_PART_DELIVER, /* Partial data delivery considerations. */
+ SCTP_CMD_RENEGE, /* Renege data on an association. */
+ SCTP_CMD_SETUP_T4, /* ADDIP, setup T4 RTO timer parms. */
+ SCTP_CMD_PROCESS_OPERR, /* Process an ERROR chunk. */
+ SCTP_CMD_REPORT_FWDTSN, /* Report new cumulative TSN Ack. */
+ SCTP_CMD_PROCESS_FWDTSN, /* Skips were reported, so process further. */
+ SCTP_CMD_CLEAR_INIT_TAG, /* Clears association peer's inittag. */
+ SCTP_CMD_DEL_NON_PRIMARY, /* Removes non-primary peer transports. */
+ SCTP_CMD_T3_RTX_TIMERS_STOP, /* Stops T3-rtx pending timers */
+ SCTP_CMD_FORCE_PRIM_RETRAN, /* Forces retrans. over primary path. */
+ SCTP_CMD_SET_SK_ERR, /* Set sk_err */
+ SCTP_CMD_ASSOC_CHANGE, /* generate and send assoc_change event */
+ SCTP_CMD_ADAPTATION_IND, /* generate and send adaptation event */
+ SCTP_CMD_PEER_NO_AUTH, /* generate and send authentication event */
+ SCTP_CMD_ASSOC_SHKEY, /* generate the association shared keys */
+ SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
+ SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
+ SCTP_CMD_SEND_MSG, /* Send the whole use message */
+ SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
+ SCTP_CMD_SET_ASOC, /* Restore association context */
+ SCTP_CMD_LAST
+};
+
+/* How many commands can you put in an struct sctp_cmd_seq?
+ * This is a rather arbitrary number, ideally derived from a careful
+ * analysis of the state functions, but in reality just taken from
+ * thin air in the hopes othat we don't trigger a kernel panic.
+ */
+#define SCTP_MAX_NUM_COMMANDS 20
+
+union sctp_arg {
+ void *zero_all; /* Set to NULL to clear the entire union */
+ __s32 i32;
+ __u32 u32;
+ __be32 be32;
+ __u16 u16;
+ __u8 u8;
+ int error;
+ __be16 err;
+ enum sctp_state state;
+ enum sctp_event_timeout to;
+ struct sctp_chunk *chunk;
+ struct sctp_association *asoc;
+ struct sctp_transport *transport;
+ struct sctp_bind_addr *bp;
+ struct sctp_init_chunk *init;
+ struct sctp_ulpevent *ulpevent;
+ struct sctp_packet *packet;
+ struct sctp_sackhdr *sackh;
+ struct sctp_datamsg *msg;
+};
+
+/* We are simulating ML type constructors here.
+ *
+ * SCTP_ARG_CONSTRUCTOR(NAME, TYPE, ELT) builds a function called
+ * SCTP_NAME() which takes an argument of type TYPE and returns an
+ * union sctp_arg. It does this by inserting the sole argument into
+ * the ELT union element of a local union sctp_arg.
+ *
+ * E.g., SCTP_ARG_CONSTRUCTOR(I32, __s32, i32) builds SCTP_I32(arg),
+ * which takes an __s32 and returns a union sctp_arg containing the
+ * __s32. So, after foo = SCTP_I32(arg), foo.i32 == arg.
+ */
+
+#define SCTP_ARG_CONSTRUCTOR(name, type, elt) \
+static inline union sctp_arg \
+SCTP_## name (type arg) \
+{ union sctp_arg retval;\
+ retval.zero_all = NULL;\
+ retval.elt = arg;\
+ return retval;\
+}
+
+SCTP_ARG_CONSTRUCTOR(I32, __s32, i32)
+SCTP_ARG_CONSTRUCTOR(U32, __u32, u32)
+SCTP_ARG_CONSTRUCTOR(BE32, __be32, be32)
+SCTP_ARG_CONSTRUCTOR(U16, __u16, u16)
+SCTP_ARG_CONSTRUCTOR(U8, __u8, u8)
+SCTP_ARG_CONSTRUCTOR(ERROR, int, error)
+SCTP_ARG_CONSTRUCTOR(PERR, __be16, err) /* protocol error */
+SCTP_ARG_CONSTRUCTOR(STATE, enum sctp_state, state)
+SCTP_ARG_CONSTRUCTOR(TO, enum sctp_event_timeout, to)
+SCTP_ARG_CONSTRUCTOR(CHUNK, struct sctp_chunk *, chunk)
+SCTP_ARG_CONSTRUCTOR(ASOC, struct sctp_association *, asoc)
+SCTP_ARG_CONSTRUCTOR(TRANSPORT, struct sctp_transport *, transport)
+SCTP_ARG_CONSTRUCTOR(BA, struct sctp_bind_addr *, bp)
+SCTP_ARG_CONSTRUCTOR(PEER_INIT, struct sctp_init_chunk *, init)
+SCTP_ARG_CONSTRUCTOR(ULPEVENT, struct sctp_ulpevent *, ulpevent)
+SCTP_ARG_CONSTRUCTOR(PACKET, struct sctp_packet *, packet)
+SCTP_ARG_CONSTRUCTOR(SACKH, struct sctp_sackhdr *, sackh)
+SCTP_ARG_CONSTRUCTOR(DATAMSG, struct sctp_datamsg *, msg)
+
+static inline union sctp_arg SCTP_FORCE(void)
+{
+ return SCTP_I32(1);
+}
+
+static inline union sctp_arg SCTP_NOFORCE(void)
+{
+ return SCTP_I32(0);
+}
+
+static inline union sctp_arg SCTP_NULL(void)
+{
+ union sctp_arg retval;
+ retval.zero_all = NULL;
+ return retval;
+}
+
+struct sctp_cmd {
+ union sctp_arg obj;
+ enum sctp_verb verb;
+};
+
+struct sctp_cmd_seq {
+ struct sctp_cmd cmds[SCTP_MAX_NUM_COMMANDS];
+ struct sctp_cmd *last_used_slot;
+ struct sctp_cmd *next_cmd;
+};
+
+
+/* Initialize a block of memory as a command sequence.
+ * Return 0 if the initialization fails.
+ */
+static inline int sctp_init_cmd_seq(struct sctp_cmd_seq *seq)
+{
+ /* cmds[] is filled backwards to simplify the overflow BUG() check */
+ seq->last_used_slot = seq->cmds + SCTP_MAX_NUM_COMMANDS;
+ seq->next_cmd = seq->last_used_slot;
+ return 1; /* We always succeed. */
+}
+
+
+/* Add a command to an struct sctp_cmd_seq.
+ *
+ * Use the SCTP_* constructors defined by SCTP_ARG_CONSTRUCTOR() above
+ * to wrap data which goes in the obj argument.
+ */
+static inline void sctp_add_cmd_sf(struct sctp_cmd_seq *seq,
+ enum sctp_verb verb, union sctp_arg obj)
+{
+ struct sctp_cmd *cmd = seq->last_used_slot - 1;
+
+ BUG_ON(cmd < seq->cmds);
+
+ cmd->verb = verb;
+ cmd->obj = obj;
+ seq->last_used_slot = cmd;
+}
+
+/* Return the next command structure in an sctp_cmd_seq.
+ * Return NULL at the end of the sequence.
+ */
+static inline struct sctp_cmd *sctp_next_cmd(struct sctp_cmd_seq *seq)
+{
+ if (seq->next_cmd <= seq->last_used_slot)
+ return NULL;
+
+ return --seq->next_cmd;
+}
+
+#endif /* __net_sctp_command_h__ */
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
new file mode 100644
index 000000000..5859e0a16
--- /dev/null
+++ b/include/net/sctp/constants.h
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Randall Stewart <randall@stewart.chicago.il.us>
+ * Ken Morneau <kmorneau@cisco.com>
+ * Qiaobing Xie <qxie1@motorola.com>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Sridhar Samudrala <samudrala@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ */
+
+#ifndef __sctp_constants_h__
+#define __sctp_constants_h__
+
+#include <linux/sctp.h>
+#include <linux/ipv6.h> /* For ipv6hdr. */
+#include <net/tcp_states.h> /* For TCP states used in enum sctp_sock_state */
+
+/* Value used for stream negotiation. */
+enum { SCTP_MAX_STREAM = 0xffff };
+enum { SCTP_DEFAULT_OUTSTREAMS = 10 };
+enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
+
+/* Since CIDs are sparse, we need all four of the following
+ * symbols. CIDs are dense through SCTP_CID_BASE_MAX.
+ */
+#define SCTP_CID_BASE_MAX SCTP_CID_SHUTDOWN_COMPLETE
+
+#define SCTP_NUM_BASE_CHUNK_TYPES (SCTP_CID_BASE_MAX + 1)
+
+#define SCTP_NUM_ADDIP_CHUNK_TYPES 2
+
+#define SCTP_NUM_PRSCTP_CHUNK_TYPES 1
+
+#define SCTP_NUM_RECONF_CHUNK_TYPES 1
+
+#define SCTP_NUM_AUTH_CHUNK_TYPES 1
+
+#define SCTP_NUM_CHUNK_TYPES (SCTP_NUM_BASE_CHUNK_TYPES + \
+ SCTP_NUM_ADDIP_CHUNK_TYPES +\
+ SCTP_NUM_PRSCTP_CHUNK_TYPES +\
+ SCTP_NUM_RECONF_CHUNK_TYPES +\
+ SCTP_NUM_AUTH_CHUNK_TYPES)
+
+/* These are the different flavours of event. */
+enum sctp_event_type {
+ SCTP_EVENT_T_CHUNK = 1,
+ SCTP_EVENT_T_TIMEOUT,
+ SCTP_EVENT_T_OTHER,
+ SCTP_EVENT_T_PRIMITIVE
+};
+
+/* As a convenience for the state machine, we append SCTP_EVENT_* and
+ * SCTP_ULP_* to the list of possible chunks.
+ */
+
+enum sctp_event_timeout {
+ SCTP_EVENT_TIMEOUT_NONE = 0,
+ SCTP_EVENT_TIMEOUT_T1_COOKIE,
+ SCTP_EVENT_TIMEOUT_T1_INIT,
+ SCTP_EVENT_TIMEOUT_T2_SHUTDOWN,
+ SCTP_EVENT_TIMEOUT_T3_RTX,
+ SCTP_EVENT_TIMEOUT_T4_RTO,
+ SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
+ SCTP_EVENT_TIMEOUT_HEARTBEAT,
+ SCTP_EVENT_TIMEOUT_RECONF,
+ SCTP_EVENT_TIMEOUT_PROBE,
+ SCTP_EVENT_TIMEOUT_SACK,
+ SCTP_EVENT_TIMEOUT_AUTOCLOSE,
+};
+
+#define SCTP_EVENT_TIMEOUT_MAX SCTP_EVENT_TIMEOUT_AUTOCLOSE
+#define SCTP_NUM_TIMEOUT_TYPES (SCTP_EVENT_TIMEOUT_MAX + 1)
+
+enum sctp_event_other {
+ SCTP_EVENT_NO_PENDING_TSN = 0,
+ SCTP_EVENT_ICMP_PROTO_UNREACH,
+};
+
+#define SCTP_EVENT_OTHER_MAX SCTP_EVENT_ICMP_PROTO_UNREACH
+#define SCTP_NUM_OTHER_TYPES (SCTP_EVENT_OTHER_MAX + 1)
+
+/* These are primitive requests from the ULP. */
+enum sctp_event_primitive {
+ SCTP_PRIMITIVE_ASSOCIATE = 0,
+ SCTP_PRIMITIVE_SHUTDOWN,
+ SCTP_PRIMITIVE_ABORT,
+ SCTP_PRIMITIVE_SEND,
+ SCTP_PRIMITIVE_REQUESTHEARTBEAT,
+ SCTP_PRIMITIVE_ASCONF,
+ SCTP_PRIMITIVE_RECONF,
+};
+
+#define SCTP_EVENT_PRIMITIVE_MAX SCTP_PRIMITIVE_RECONF
+#define SCTP_NUM_PRIMITIVE_TYPES (SCTP_EVENT_PRIMITIVE_MAX + 1)
+
+/* We define here a utility type for manipulating subtypes.
+ * The subtype constructors all work like this:
+ *
+ * union sctp_subtype foo = SCTP_ST_CHUNK(SCTP_CID_INIT);
+ */
+
+union sctp_subtype {
+ enum sctp_cid chunk;
+ enum sctp_event_timeout timeout;
+ enum sctp_event_other other;
+ enum sctp_event_primitive primitive;
+};
+
+#define SCTP_SUBTYPE_CONSTRUCTOR(_name, _type, _elt) \
+static inline union sctp_subtype \
+SCTP_ST_## _name (_type _arg) \
+{ union sctp_subtype _retval; _retval._elt = _arg; return _retval; }
+
+SCTP_SUBTYPE_CONSTRUCTOR(CHUNK, enum sctp_cid, chunk)
+SCTP_SUBTYPE_CONSTRUCTOR(TIMEOUT, enum sctp_event_timeout, timeout)
+SCTP_SUBTYPE_CONSTRUCTOR(OTHER, enum sctp_event_other, other)
+SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive)
+
+
+#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA || \
+ a->chunk_hdr->type == SCTP_CID_I_DATA)
+
+/* Internal error codes */
+enum sctp_ierror {
+ SCTP_IERROR_NO_ERROR = 0,
+ SCTP_IERROR_BASE = 1000,
+ SCTP_IERROR_NO_COOKIE,
+ SCTP_IERROR_BAD_SIG,
+ SCTP_IERROR_STALE_COOKIE,
+ SCTP_IERROR_NOMEM,
+ SCTP_IERROR_MALFORMED,
+ SCTP_IERROR_BAD_TAG,
+ SCTP_IERROR_BIG_GAP,
+ SCTP_IERROR_DUP_TSN,
+ SCTP_IERROR_HIGH_TSN,
+ SCTP_IERROR_IGNORE_TSN,
+ SCTP_IERROR_NO_DATA,
+ SCTP_IERROR_BAD_STREAM,
+ SCTP_IERROR_BAD_PORTS,
+ SCTP_IERROR_AUTH_BAD_HMAC,
+ SCTP_IERROR_AUTH_BAD_KEYID,
+ SCTP_IERROR_PROTO_VIOLATION,
+ SCTP_IERROR_ERROR,
+ SCTP_IERROR_ABORT,
+};
+
+
+
+/* SCTP state defines for internal state machine */
+enum sctp_state {
+
+ SCTP_STATE_CLOSED = 0,
+ SCTP_STATE_COOKIE_WAIT = 1,
+ SCTP_STATE_COOKIE_ECHOED = 2,
+ SCTP_STATE_ESTABLISHED = 3,
+ SCTP_STATE_SHUTDOWN_PENDING = 4,
+ SCTP_STATE_SHUTDOWN_SENT = 5,
+ SCTP_STATE_SHUTDOWN_RECEIVED = 6,
+ SCTP_STATE_SHUTDOWN_ACK_SENT = 7,
+
+};
+
+#define SCTP_STATE_MAX SCTP_STATE_SHUTDOWN_ACK_SENT
+#define SCTP_STATE_NUM_STATES (SCTP_STATE_MAX + 1)
+
+/* These are values for sk->state.
+ * For a UDP-style SCTP socket, the states are defined as follows
+ * - A socket in SCTP_SS_CLOSED state indicates that it is not willing to
+ * accept new associations, but it can initiate the creation of new ones.
+ * - A socket in SCTP_SS_LISTENING state indicates that it is willing to
+ * accept new associations and can initiate the creation of new ones.
+ * - A socket in SCTP_SS_ESTABLISHED state indicates that it is a peeled off
+ * socket with one association.
+ * For a TCP-style SCTP socket, the states are defined as follows
+ * - A socket in SCTP_SS_CLOSED state indicates that it is not willing to
+ * accept new associations, but it can initiate the creation of new ones.
+ * - A socket in SCTP_SS_LISTENING state indicates that it is willing to
+ * accept new associations, but cannot initiate the creation of new ones.
+ * - A socket in SCTP_SS_ESTABLISHED state indicates that it has a single
+ * association.
+ */
+enum sctp_sock_state {
+ SCTP_SS_CLOSED = TCP_CLOSE,
+ SCTP_SS_LISTENING = TCP_LISTEN,
+ SCTP_SS_ESTABLISHING = TCP_SYN_SENT,
+ SCTP_SS_ESTABLISHED = TCP_ESTABLISHED,
+ SCTP_SS_CLOSING = TCP_CLOSE_WAIT,
+};
+
+enum sctp_plpmtud_state {
+ SCTP_PL_DISABLED,
+ SCTP_PL_BASE,
+ SCTP_PL_SEARCH,
+ SCTP_PL_COMPLETE,
+ SCTP_PL_ERROR,
+};
+
+#define SCTP_BASE_PLPMTU 1200
+#define SCTP_MAX_PLPMTU 9000
+#define SCTP_MIN_PLPMTU 512
+
+#define SCTP_MAX_PROBES 3
+
+#define SCTP_PL_BIG_STEP 32
+#define SCTP_PL_MIN_STEP 4
+
+/* These functions map various type to printable names. */
+const char *sctp_cname(const union sctp_subtype id); /* chunk types */
+const char *sctp_oname(const union sctp_subtype id); /* other events */
+const char *sctp_tname(const union sctp_subtype id); /* timeouts */
+const char *sctp_pname(const union sctp_subtype id); /* primitives */
+
+/* This is a table of printable names of sctp_state_t's. */
+extern const char *const sctp_state_tbl[];
+extern const char *const sctp_evttype_tbl[];
+extern const char *const sctp_status_tbl[];
+
+/* Maximum chunk length considering padding requirements. */
+enum { SCTP_MAX_CHUNK_LEN = ((1<<16) - sizeof(__u32)) };
+
+/* Encourage Cookie-Echo bundling by pre-fragmenting chunks a little
+ * harder (until reaching ESTABLISHED state).
+ */
+enum { SCTP_ARBITRARY_COOKIE_ECHO_LEN = 200 };
+
+/* Guess at how big to make the TSN mapping array.
+ * We guarantee that we can handle at least this big a gap between the
+ * cumulative ACK and the highest TSN. In practice, we can often
+ * handle up to twice this value.
+ *
+ * NEVER make this more than 32767 (2^15-1). The Gap Ack Blocks in a
+ * SACK (see section 3.3.4) are only 16 bits, so 2*SCTP_TSN_MAP_SIZE
+ * must be less than 65535 (2^16 - 1), or we will have overflow
+ * problems creating SACK's.
+ */
+#define SCTP_TSN_MAP_INITIAL BITS_PER_LONG
+#define SCTP_TSN_MAP_INCREMENT SCTP_TSN_MAP_INITIAL
+#define SCTP_TSN_MAP_SIZE 4096
+
+/* We will not record more than this many duplicate TSNs between two
+ * SACKs. The minimum PMTU is 512. Remove all the headers and there
+ * is enough room for 117 duplicate reports. Round down to the
+ * nearest power of 2.
+ */
+enum { SCTP_MAX_DUP_TSNS = 16 };
+enum { SCTP_MAX_GABS = 16 };
+
+/* Heartbeat interval - 30 secs */
+#define SCTP_DEFAULT_TIMEOUT_HEARTBEAT (30*1000)
+
+/* Delayed sack timer - 200ms */
+#define SCTP_DEFAULT_TIMEOUT_SACK (200)
+
+/* RTO.Initial - 3 seconds
+ * RTO.Min - 1 second
+ * RTO.Max - 60 seconds
+ * RTO.Alpha - 1/8
+ * RTO.Beta - 1/4
+ */
+#define SCTP_RTO_INITIAL (3 * 1000)
+#define SCTP_RTO_MIN (1 * 1000)
+#define SCTP_RTO_MAX (60 * 1000)
+
+#define SCTP_RTO_ALPHA 3 /* 1/8 when converted to right shifts. */
+#define SCTP_RTO_BETA 2 /* 1/4 when converted to right shifts. */
+
+/* Maximum number of new data packets that can be sent in a burst. */
+#define SCTP_DEFAULT_MAX_BURST 4
+
+#define SCTP_CLOCK_GRANULARITY 1 /* 1 jiffy */
+
+#define SCTP_DEFAULT_COOKIE_LIFE (60 * 1000) /* 60 seconds */
+
+#define SCTP_DEFAULT_MINWINDOW 1500 /* default minimum rwnd size */
+#define SCTP_DEFAULT_MAXWINDOW 65535 /* default rwnd size */
+#define SCTP_DEFAULT_RWND_SHIFT 4 /* by default, update on 1/16 of
+ * rcvbuf, which is 1/8 of initial
+ * window
+ */
+#define SCTP_DEFAULT_MAXSEGMENT 1500 /* MTU size, this is the limit
+ * to which we will raise the P-MTU.
+ */
+#define SCTP_DEFAULT_MINSEGMENT 512 /* MTU size ... if no mtu disc */
+
+#define SCTP_SECRET_SIZE 32 /* Number of octets in a 256 bits. */
+
+#define SCTP_SIGNATURE_SIZE 20 /* size of a SLA-1 signature */
+
+#define SCTP_COOKIE_MULTIPLE 32 /* Pad out our cookie to make our hash
+ * functions simpler to write.
+ */
+
+#define SCTP_DEFAULT_UDP_PORT 9899 /* default UDP tunneling port */
+
+/* These are the values for pf exposure, UNUSED is to keep compatible with old
+ * applications by default.
+ */
+enum {
+ SCTP_PF_EXPOSE_UNSET,
+ SCTP_PF_EXPOSE_DISABLE,
+ SCTP_PF_EXPOSE_ENABLE,
+};
+#define SCTP_PF_EXPOSE_MAX SCTP_PF_EXPOSE_ENABLE
+
+#define SCTP_PS_RETRANS_MAX 0xffff
+
+/* These return values describe the success or failure of a number of
+ * routines which form the lower interface to SCTP_outqueue.
+ */
+enum sctp_xmit {
+ SCTP_XMIT_OK,
+ SCTP_XMIT_PMTU_FULL,
+ SCTP_XMIT_RWND_FULL,
+ SCTP_XMIT_DELAY,
+};
+
+/* These are the commands for manipulating transports. */
+enum sctp_transport_cmd {
+ SCTP_TRANSPORT_UP,
+ SCTP_TRANSPORT_DOWN,
+ SCTP_TRANSPORT_PF,
+};
+
+/* These are the address scopes defined mainly for IPv4 addresses
+ * based on draft of SCTP IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>.
+ * These scopes are hopefully generic enough to be used on scoping both
+ * IPv4 and IPv6 addresses in SCTP.
+ * At this point, the IPv6 scopes will be mapped to these internal scopes
+ * as much as possible.
+ */
+enum sctp_scope {
+ SCTP_SCOPE_GLOBAL, /* IPv4 global addresses */
+ SCTP_SCOPE_PRIVATE, /* IPv4 private addresses */
+ SCTP_SCOPE_LINK, /* IPv4 link local address */
+ SCTP_SCOPE_LOOPBACK, /* IPv4 loopback address */
+ SCTP_SCOPE_UNUSABLE, /* IPv4 unusable addresses */
+};
+
+enum {
+ SCTP_SCOPE_POLICY_DISABLE, /* Disable IPv4 address scoping */
+ SCTP_SCOPE_POLICY_ENABLE, /* Enable IPv4 address scoping */
+ SCTP_SCOPE_POLICY_PRIVATE, /* Follow draft but allow IPv4 private addresses */
+ SCTP_SCOPE_POLICY_LINK, /* Follow draft but allow IPv4 link local addresses */
+};
+
+#define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
+
+/* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
+ * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
+ * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
+ * addresses.
+ */
+#define IS_IPV4_UNUSABLE_ADDRESS(a) \
+ ((htonl(INADDR_BROADCAST) == a) || \
+ ipv4_is_multicast(a) || \
+ ipv4_is_zeronet(a) || \
+ ipv4_is_anycast_6to4(a))
+
+/* Flags used for the bind address copy functions. */
+#define SCTP_ADDR4_ALLOWED 0x00000001 /* IPv4 address is allowed by
+ local sock family */
+#define SCTP_ADDR6_ALLOWED 0x00000002 /* IPv6 address is allowed by
+ local sock family */
+#define SCTP_ADDR4_PEERSUPP 0x00000004 /* IPv4 address is supported by
+ peer */
+#define SCTP_ADDR6_PEERSUPP 0x00000008 /* IPv6 address is supported by
+ peer */
+
+/* Reasons to retransmit. */
+enum sctp_retransmit_reason {
+ SCTP_RTXR_T3_RTX,
+ SCTP_RTXR_FAST_RTX,
+ SCTP_RTXR_PMTUD,
+ SCTP_RTXR_T1_RTX,
+};
+
+/* Reasons to lower cwnd. */
+enum sctp_lower_cwnd {
+ SCTP_LOWER_CWND_T3_RTX,
+ SCTP_LOWER_CWND_FAST_RTX,
+ SCTP_LOWER_CWND_ECNE,
+ SCTP_LOWER_CWND_INACTIVE,
+};
+
+
+/* SCTP-AUTH Necessary constants */
+
+/* SCTP-AUTH, Section 3.3
+ *
+ * The following Table 2 shows the currently defined values for HMAC
+ * identifiers.
+ *
+ * +-----------------+--------------------------+
+ * | HMAC Identifier | Message Digest Algorithm |
+ * +-----------------+--------------------------+
+ * | 0 | Reserved |
+ * | 1 | SHA-1 defined in [8] |
+ * | 2 | Reserved |
+ * | 3 | SHA-256 defined in [8] |
+ * +-----------------+--------------------------+
+ */
+enum {
+ SCTP_AUTH_HMAC_ID_RESERVED_0,
+ SCTP_AUTH_HMAC_ID_SHA1,
+ SCTP_AUTH_HMAC_ID_RESERVED_2,
+#if defined (CONFIG_CRYPTO_SHA256) || defined (CONFIG_CRYPTO_SHA256_MODULE)
+ SCTP_AUTH_HMAC_ID_SHA256,
+#endif
+ __SCTP_AUTH_HMAC_MAX
+};
+
+#define SCTP_AUTH_HMAC_ID_MAX __SCTP_AUTH_HMAC_MAX - 1
+#define SCTP_AUTH_NUM_HMACS __SCTP_AUTH_HMAC_MAX
+#define SCTP_SHA1_SIG_SIZE 20
+#define SCTP_SHA256_SIG_SIZE 32
+
+/* SCTP-AUTH, Section 3.2
+ * The chunk types for INIT, INIT-ACK, SHUTDOWN-COMPLETE and AUTH chunks
+ * MUST NOT be listed in the CHUNKS parameter
+ */
+#define SCTP_NUM_NOAUTH_CHUNKS 4
+#define SCTP_AUTH_MAX_CHUNKS (SCTP_NUM_CHUNK_TYPES - SCTP_NUM_NOAUTH_CHUNKS)
+
+/* SCTP-AUTH Section 6.1
+ * The RANDOM parameter MUST contain a 32 byte random number.
+ */
+#define SCTP_AUTH_RANDOM_LENGTH 32
+
+#define SCTP_PROBE_TIMER_MIN 5000
+
+#endif /* __sctp_constants_h__ */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
new file mode 100644
index 000000000..a04999ee9
--- /dev/null
+++ b/include/net/sctp/sctp.h
@@ -0,0 +1,679 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001-2003 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * The base lksctp header.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Ryan Layer <rmlayer@us.ibm.com>
+ * Kevin Gao <kevin.gao@intel.com>
+ */
+
+#ifndef __net_sctp_h__
+#define __net_sctp_h__
+
+/* Header Strategy.
+ * Start getting some control over the header file depencies:
+ * includes
+ * constants
+ * structs
+ * prototypes
+ * macros, externs, and inlines
+ *
+ * Move test_frame specific items out of the kernel headers
+ * and into the test frame headers. This is not perfect in any sense
+ * and will continue to evolve.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <linux/tty.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/idr.h>
+
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#endif
+
+#include <linux/uaccess.h>
+#include <asm/page.h>
+#include <net/sock.h>
+#include <net/snmp.h>
+#include <net/sctp/structs.h>
+#include <net/sctp/constants.h>
+
+#ifdef CONFIG_IP_SCTP_MODULE
+#define SCTP_PROTOSW_FLAG 0
+#else /* static! */
+#define SCTP_PROTOSW_FLAG INET_PROTOSW_PERMANENT
+#endif
+
+/* Round an int up to the next multiple of 4. */
+#define SCTP_PAD4(s) (((s)+3)&~3)
+/* Truncate to the previous multiple of 4. */
+#define SCTP_TRUNC4(s) ((s)&~3)
+
+/*
+ * Function declarations.
+ */
+
+/*
+ * sctp/protocol.c
+ */
+int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *addr,
+ enum sctp_scope, gfp_t gfp, int flags);
+struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
+int sctp_register_pf(struct sctp_pf *, sa_family_t);
+void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
+int sctp_udp_sock_start(struct net *net);
+void sctp_udp_sock_stop(struct net *net);
+
+/*
+ * sctp/socket.c
+ */
+int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags);
+int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+int sctp_inet_listen(struct socket *sock, int backlog);
+void sctp_write_space(struct sock *sk);
+void sctp_data_ready(struct sock *sk);
+__poll_t sctp_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
+void sctp_sock_rfree(struct sk_buff *skb);
+void sctp_copy_sock(struct sock *newsk, struct sock *sk,
+ struct sctp_association *asoc);
+extern struct percpu_counter sctp_sockets_allocated;
+int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
+struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int *);
+
+typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *);
+void sctp_transport_walk_start(struct rhashtable_iter *iter);
+void sctp_transport_walk_stop(struct rhashtable_iter *iter);
+struct sctp_transport *sctp_transport_get_next(struct net *net,
+ struct rhashtable_iter *iter);
+struct sctp_transport *sctp_transport_get_idx(struct net *net,
+ struct rhashtable_iter *iter, int pos);
+int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
+ const union sctp_addr *laddr,
+ const union sctp_addr *paddr, void *p);
+int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
+ struct net *net, int *pos, void *p);
+int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
+int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
+ struct sctp_info *info);
+
+/*
+ * sctp/primitive.c
+ */
+int sctp_primitive_ASSOCIATE(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_SHUTDOWN(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_ABORT(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_SEND(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_REQUESTHEARTBEAT(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_RECONF(struct net *net, struct sctp_association *asoc,
+ void *arg);
+
+/*
+ * sctp/input.c
+ */
+int sctp_rcv(struct sk_buff *skb);
+int sctp_v4_err(struct sk_buff *skb, u32 info);
+int sctp_hash_endpoint(struct sctp_endpoint *ep);
+void sctp_unhash_endpoint(struct sctp_endpoint *);
+struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
+ struct sctphdr *, struct sctp_association **,
+ struct sctp_transport **);
+void sctp_err_finish(struct sock *, struct sctp_transport *);
+int sctp_udp_v4_err(struct sock *sk, struct sk_buff *skb);
+int sctp_udp_v6_err(struct sock *sk, struct sk_buff *skb);
+void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
+ struct sctp_transport *t, __u32 pmtu);
+void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
+ struct sk_buff *);
+void sctp_icmp_proto_unreachable(struct sock *sk,
+ struct sctp_association *asoc,
+ struct sctp_transport *t);
+void sctp_backlog_migrate(struct sctp_association *assoc,
+ struct sock *oldsk, struct sock *newsk);
+int sctp_transport_hashtable_init(void);
+void sctp_transport_hashtable_destroy(void);
+int sctp_hash_transport(struct sctp_transport *t);
+void sctp_unhash_transport(struct sctp_transport *t);
+struct sctp_transport *sctp_addrs_lookup_transport(
+ struct net *net,
+ const union sctp_addr *laddr,
+ const union sctp_addr *paddr);
+struct sctp_transport *sctp_epaddr_lookup_transport(
+ const struct sctp_endpoint *ep,
+ const union sctp_addr *paddr);
+
+/*
+ * sctp/proc.c
+ */
+int __net_init sctp_proc_init(struct net *net);
+
+/*
+ * sctp/offload.c
+ */
+int sctp_offload_init(void);
+
+/*
+ * sctp/stream_sched.c
+ */
+void sctp_sched_ops_init(void);
+
+/*
+ * sctp/stream.c
+ */
+int sctp_send_reset_streams(struct sctp_association *asoc,
+ struct sctp_reset_streams *params);
+int sctp_send_reset_assoc(struct sctp_association *asoc);
+int sctp_send_add_streams(struct sctp_association *asoc,
+ struct sctp_add_streams *params);
+
+/*
+ * Module global variables
+ */
+
+ /*
+ * sctp/protocol.c
+ */
+extern struct kmem_cache *sctp_chunk_cachep __read_mostly;
+extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
+extern long sysctl_sctp_mem[3];
+extern int sysctl_sctp_rmem[3];
+extern int sysctl_sctp_wmem[3];
+
+/*
+ * Section: Macros, externs, and inlines
+ */
+
+/* SCTP SNMP MIB stats handlers */
+#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define __SCTP_INC_STATS(net, field) __SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
+
+/* sctp mib definitions */
+enum {
+ SCTP_MIB_NUM = 0,
+ SCTP_MIB_CURRESTAB, /* CurrEstab */
+ SCTP_MIB_ACTIVEESTABS, /* ActiveEstabs */
+ SCTP_MIB_PASSIVEESTABS, /* PassiveEstabs */
+ SCTP_MIB_ABORTEDS, /* Aborteds */
+ SCTP_MIB_SHUTDOWNS, /* Shutdowns */
+ SCTP_MIB_OUTOFBLUES, /* OutOfBlues */
+ SCTP_MIB_CHECKSUMERRORS, /* ChecksumErrors */
+ SCTP_MIB_OUTCTRLCHUNKS, /* OutCtrlChunks */
+ SCTP_MIB_OUTORDERCHUNKS, /* OutOrderChunks */
+ SCTP_MIB_OUTUNORDERCHUNKS, /* OutUnorderChunks */
+ SCTP_MIB_INCTRLCHUNKS, /* InCtrlChunks */
+ SCTP_MIB_INORDERCHUNKS, /* InOrderChunks */
+ SCTP_MIB_INUNORDERCHUNKS, /* InUnorderChunks */
+ SCTP_MIB_FRAGUSRMSGS, /* FragUsrMsgs */
+ SCTP_MIB_REASMUSRMSGS, /* ReasmUsrMsgs */
+ SCTP_MIB_OUTSCTPPACKS, /* OutSCTPPacks */
+ SCTP_MIB_INSCTPPACKS, /* InSCTPPacks */
+ SCTP_MIB_T1_INIT_EXPIREDS,
+ SCTP_MIB_T1_COOKIE_EXPIREDS,
+ SCTP_MIB_T2_SHUTDOWN_EXPIREDS,
+ SCTP_MIB_T3_RTX_EXPIREDS,
+ SCTP_MIB_T4_RTO_EXPIREDS,
+ SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS,
+ SCTP_MIB_DELAY_SACK_EXPIREDS,
+ SCTP_MIB_AUTOCLOSE_EXPIREDS,
+ SCTP_MIB_T1_RETRANSMITS,
+ SCTP_MIB_T3_RETRANSMITS,
+ SCTP_MIB_PMTUD_RETRANSMITS,
+ SCTP_MIB_FAST_RETRANSMITS,
+ SCTP_MIB_IN_PKT_SOFTIRQ,
+ SCTP_MIB_IN_PKT_BACKLOG,
+ SCTP_MIB_IN_PKT_DISCARDS,
+ SCTP_MIB_IN_DATA_CHUNK_DISCARDS,
+ __SCTP_MIB_MAX
+};
+
+#define SCTP_MIB_MAX __SCTP_MIB_MAX
+struct sctp_mib {
+ unsigned long mibs[SCTP_MIB_MAX];
+};
+
+/* helper function to track stats about max rto and related transport */
+static inline void sctp_max_rto(struct sctp_association *asoc,
+ struct sctp_transport *trans)
+{
+ if (asoc->stats.max_obs_rto < (__u64)trans->rto) {
+ asoc->stats.max_obs_rto = trans->rto;
+ memset(&asoc->stats.obs_rto_ipaddr, 0,
+ sizeof(struct sockaddr_storage));
+ memcpy(&asoc->stats.obs_rto_ipaddr, &trans->ipaddr,
+ trans->af_specific->sockaddr_len);
+ }
+}
+
+/*
+ * Macros for keeping a global reference of object allocations.
+ */
+#ifdef CONFIG_SCTP_DBG_OBJCNT
+
+extern atomic_t sctp_dbg_objcnt_sock;
+extern atomic_t sctp_dbg_objcnt_ep;
+extern atomic_t sctp_dbg_objcnt_assoc;
+extern atomic_t sctp_dbg_objcnt_transport;
+extern atomic_t sctp_dbg_objcnt_chunk;
+extern atomic_t sctp_dbg_objcnt_bind_addr;
+extern atomic_t sctp_dbg_objcnt_bind_bucket;
+extern atomic_t sctp_dbg_objcnt_addr;
+extern atomic_t sctp_dbg_objcnt_datamsg;
+extern atomic_t sctp_dbg_objcnt_keys;
+
+/* Macros to atomically increment/decrement objcnt counters. */
+#define SCTP_DBG_OBJCNT_INC(name) \
+atomic_inc(&sctp_dbg_objcnt_## name)
+#define SCTP_DBG_OBJCNT_DEC(name) \
+atomic_dec(&sctp_dbg_objcnt_## name)
+#define SCTP_DBG_OBJCNT(name) \
+atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0)
+
+/* Macro to help create new entries in the global array of
+ * objcnt counters.
+ */
+#define SCTP_DBG_OBJCNT_ENTRY(name) \
+{.label= #name, .counter= &sctp_dbg_objcnt_## name}
+
+void sctp_dbg_objcnt_init(struct net *);
+
+#else
+
+#define SCTP_DBG_OBJCNT_INC(name)
+#define SCTP_DBG_OBJCNT_DEC(name)
+
+static inline void sctp_dbg_objcnt_init(struct net *net) { return; }
+
+#endif /* CONFIG_SCTP_DBG_OBJCOUNT */
+
+#if defined CONFIG_SYSCTL
+void sctp_sysctl_register(void);
+void sctp_sysctl_unregister(void);
+int sctp_sysctl_net_register(struct net *net);
+void sctp_sysctl_net_unregister(struct net *net);
+#else
+static inline void sctp_sysctl_register(void) { return; }
+static inline void sctp_sysctl_unregister(void) { return; }
+static inline int sctp_sysctl_net_register(struct net *net) { return 0; }
+static inline void sctp_sysctl_net_unregister(struct net *net) { return; }
+#endif
+
+/* Size of Supported Address Parameter for 'x' address types. */
+#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+void sctp_v6_pf_init(void);
+void sctp_v6_pf_exit(void);
+int sctp_v6_protosw_init(void);
+void sctp_v6_protosw_exit(void);
+int sctp_v6_add_protocol(void);
+void sctp_v6_del_protocol(void);
+
+#else /* #ifdef defined(CONFIG_IPV6) */
+
+static inline void sctp_v6_pf_init(void) { return; }
+static inline void sctp_v6_pf_exit(void) { return; }
+static inline int sctp_v6_protosw_init(void) { return 0; }
+static inline void sctp_v6_protosw_exit(void) { return; }
+static inline int sctp_v6_add_protocol(void) { return 0; }
+static inline void sctp_v6_del_protocol(void) { return; }
+
+#endif /* #if defined(CONFIG_IPV6) */
+
+
+/* Map an association to an assoc_id. */
+static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc)
+{
+ return asoc ? asoc->assoc_id : 0;
+}
+
+static inline enum sctp_sstat_state
+sctp_assoc_to_state(const struct sctp_association *asoc)
+{
+ /* SCTP's uapi always had SCTP_EMPTY(=0) as a dummy state, but we
+ * got rid of it in kernel space. Therefore SCTP_CLOSED et al
+ * start at =1 in user space, but actually as =0 in kernel space.
+ * Now that we can not break user space and SCTP_EMPTY is exposed
+ * there, we need to fix it up with an ugly offset not to break
+ * applications. :(
+ */
+ return asoc->state + 1;
+}
+
+/* Look up the association by its id. */
+struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
+
+int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp);
+
+/* A macro to walk a list of skbs. */
+#define sctp_skb_for_each(pos, head, tmp) \
+ skb_queue_walk_safe(head, pos, tmp)
+
+/**
+ * sctp_list_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. The head item is
+ * returned or %NULL if the list is empty.
+ */
+
+static inline struct list_head *sctp_list_dequeue(struct list_head *list)
+{
+ struct list_head *result = NULL;
+
+ if (!list_empty(list)) {
+ result = list->next;
+ list_del_init(result);
+ }
+ return result;
+}
+
+/* SCTP version of skb_set_owner_r. We need this one because
+ * of the way we have to do receive buffer accounting on bundled
+ * chunks.
+ */
+static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
+{
+ struct sctp_ulpevent *event = sctp_skb2event(skb);
+
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sctp_sock_rfree;
+ atomic_add(event->rmem_len, &sk->sk_rmem_alloc);
+ /*
+ * This mimics the behavior of skb_set_owner_r
+ */
+ sk_mem_charge(sk, event->rmem_len);
+}
+
+/* Tests if the list has one and only one entry. */
+static inline int sctp_list_single_entry(struct list_head *head)
+{
+ return list_is_singular(head);
+}
+
+static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
+{
+ return !list_empty(&chunk->list);
+}
+
+/* Walk through a list of TLV parameters. Don't trust the
+ * individual parameter lengths and instead depend on
+ * the chunk length to indicate when to stop. Make sure
+ * there is room for a param header too.
+ */
+#define sctp_walk_params(pos, chunk, member)\
+_sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
+
+#define _sctp_walk_params(pos, chunk, end, member)\
+for (pos.v = chunk->member;\
+ (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
+ (void *)chunk + end) &&\
+ pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
+ ntohs(pos.p->length) >= sizeof(struct sctp_paramhdr);\
+ pos.v += SCTP_PAD4(ntohs(pos.p->length)))
+
+#define sctp_walk_errors(err, chunk_hdr)\
+_sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
+
+#define _sctp_walk_errors(err, chunk_hdr, end)\
+for (err = (struct sctp_errhdr *)((void *)chunk_hdr + \
+ sizeof(struct sctp_chunkhdr));\
+ ((void *)err + offsetof(struct sctp_errhdr, length) + sizeof(err->length) <=\
+ (void *)chunk_hdr + end) &&\
+ (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
+ ntohs(err->length) >= sizeof(struct sctp_errhdr); \
+ err = (struct sctp_errhdr *)((void *)err + SCTP_PAD4(ntohs(err->length))))
+
+#define sctp_walk_fwdtsn(pos, chunk)\
+_sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk))
+
+#define _sctp_walk_fwdtsn(pos, chunk, end)\
+for (pos = chunk->subh.fwdtsn_hdr->skip;\
+ (void *)pos <= (void *)chunk->subh.fwdtsn_hdr->skip + end - sizeof(struct sctp_fwdtsn_skip);\
+ pos++)
+
+/* External references. */
+
+extern struct proto sctp_prot;
+extern struct proto sctpv6_prot;
+void sctp_put_port(struct sock *sk);
+
+extern struct idr sctp_assocs_id;
+extern spinlock_t sctp_assocs_id_lock;
+
+/* Static inline functions. */
+
+/* Convert from an IP version number to an Address Family symbol. */
+static inline int ipver2af(__u8 ipver)
+{
+ switch (ipver) {
+ case 4:
+ return AF_INET;
+ case 6:
+ return AF_INET6;
+ default:
+ return 0;
+ }
+}
+
+/* Convert from an address parameter type to an address family. */
+static inline int param_type2af(__be16 type)
+{
+ switch (type) {
+ case SCTP_PARAM_IPV4_ADDRESS:
+ return AF_INET;
+ case SCTP_PARAM_IPV6_ADDRESS:
+ return AF_INET6;
+ default:
+ return 0;
+ }
+}
+
+/* Warning: The following hash functions assume a power of two 'size'. */
+/* This is the hash function for the SCTP port hash table. */
+static inline int sctp_phashfn(struct net *net, __u16 lport)
+{
+ return (net_hash_mix(net) + lport) & (sctp_port_hashsize - 1);
+}
+
+/* This is the hash function for the endpoint hash table. */
+static inline int sctp_ep_hashfn(struct net *net, __u16 lport)
+{
+ return (net_hash_mix(net) + lport) & (sctp_ep_hashsize - 1);
+}
+
+#define sctp_for_each_hentry(ep, head) \
+ hlist_for_each_entry(ep, head, node)
+
+/* Is a socket of this style? */
+#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
+static inline int __sctp_style(const struct sock *sk,
+ enum sctp_socket_type style)
+{
+ return sctp_sk(sk)->type == style;
+}
+
+/* Is the association in this state? */
+#define sctp_state(asoc, state) __sctp_state((asoc), (SCTP_STATE_##state))
+static inline int __sctp_state(const struct sctp_association *asoc,
+ enum sctp_state state)
+{
+ return asoc->state == state;
+}
+
+/* Is the socket in this state? */
+#define sctp_sstate(sk, state) __sctp_sstate((sk), (SCTP_SS_##state))
+static inline int __sctp_sstate(const struct sock *sk,
+ enum sctp_sock_state state)
+{
+ return sk->sk_state == state;
+}
+
+/* Map v4-mapped v6 address back to v4 address */
+static inline void sctp_v6_map_v4(union sctp_addr *addr)
+{
+ addr->v4.sin_family = AF_INET;
+ addr->v4.sin_port = addr->v6.sin6_port;
+ addr->v4.sin_addr.s_addr = addr->v6.sin6_addr.s6_addr32[3];
+}
+
+/* Map v4 address to v4-mapped v6 address */
+static inline void sctp_v4_map_v6(union sctp_addr *addr)
+{
+ __be16 port;
+
+ port = addr->v4.sin_port;
+ addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+ addr->v6.sin6_port = port;
+ addr->v6.sin6_family = AF_INET6;
+ addr->v6.sin6_flowinfo = 0;
+ addr->v6.sin6_scope_id = 0;
+ addr->v6.sin6_addr.s6_addr32[0] = 0;
+ addr->v6.sin6_addr.s6_addr32[1] = 0;
+ addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
+}
+
+/* The cookie is always 0 since this is how it's used in the
+ * pmtu code.
+ */
+static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
+{
+ if (t->dst && !dst_check(t->dst, t->dst_cookie))
+ sctp_transport_dst_release(t);
+
+ return t->dst;
+}
+
+/* Calculate max payload size given a MTU, or the total overhead if
+ * given MTU is zero
+ */
+static inline __u32 __sctp_mtu_payload(const struct sctp_sock *sp,
+ const struct sctp_transport *t,
+ __u32 mtu, __u32 extra)
+{
+ __u32 overhead = sizeof(struct sctphdr) + extra;
+
+ if (sp) {
+ overhead += sp->pf->af->net_header_len;
+ if (sp->udp_port && (!t || t->encap_port))
+ overhead += sizeof(struct udphdr);
+ } else {
+ overhead += sizeof(struct ipv6hdr);
+ }
+
+ if (WARN_ON_ONCE(mtu && mtu <= overhead))
+ mtu = overhead;
+
+ return mtu ? mtu - overhead : overhead;
+}
+
+static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
+ __u32 mtu, __u32 extra)
+{
+ return __sctp_mtu_payload(sp, NULL, mtu, extra);
+}
+
+static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
+{
+ return SCTP_TRUNC4(max_t(__u32, dst_mtu(dst),
+ SCTP_DEFAULT_MINSEGMENT));
+}
+
+static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
+{
+ __u32 pmtu = sctp_dst_mtu(t->dst);
+
+ if (t->pathmtu == pmtu)
+ return true;
+
+ t->pathmtu = pmtu;
+
+ return false;
+}
+
+static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
+{
+ return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
+}
+
+static inline int sctp_transport_pl_hlen(struct sctp_transport *t)
+{
+ return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0) -
+ sizeof(struct sctphdr);
+}
+
+static inline void sctp_transport_pl_reset(struct sctp_transport *t)
+{
+ if (t->probe_interval && (t->param_flags & SPP_PMTUD_ENABLE) &&
+ (t->state == SCTP_ACTIVE || t->state == SCTP_UNKNOWN)) {
+ if (t->pl.state == SCTP_PL_DISABLED) {
+ t->pl.state = SCTP_PL_BASE;
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+ sctp_transport_reset_probe_timer(t);
+ }
+ } else {
+ if (t->pl.state != SCTP_PL_DISABLED) {
+ if (del_timer(&t->probe_timer))
+ sctp_transport_put(t);
+ t->pl.state = SCTP_PL_DISABLED;
+ }
+ }
+}
+
+static inline void sctp_transport_pl_update(struct sctp_transport *t)
+{
+ if (t->pl.state == SCTP_PL_DISABLED)
+ return;
+
+ t->pl.state = SCTP_PL_BASE;
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+ sctp_transport_reset_probe_timer(t);
+}
+
+static inline bool sctp_transport_pl_enabled(struct sctp_transport *t)
+{
+ return t->pl.state != SCTP_PL_DISABLED;
+}
+
+static inline bool sctp_newsk_ready(const struct sock *sk)
+{
+ return sock_flag(sk, SOCK_DEAD) || sk->sk_socket;
+}
+
+static inline void sctp_sock_set_nodelay(struct sock *sk)
+{
+ lock_sock(sk);
+ sctp_sk(sk)->nodelay = true;
+ release_sock(sk);
+}
+
+#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
new file mode 100644
index 000000000..f37c7a558
--- /dev/null
+++ b/include/net/sctp/sm.h
@@ -0,0 +1,432 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These are definitions needed by the state machine.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Dajiang Zhang <dajiang.zhang@nokia.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Kevin Gao <kevin.gao@intel.com>
+ */
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <net/sctp/command.h>
+#include <net/sctp/sctp.h>
+
+#ifndef __sctp_sm_h__
+#define __sctp_sm_h__
+
+/*
+ * Possible values for the disposition are:
+ */
+enum sctp_disposition {
+ SCTP_DISPOSITION_DISCARD, /* No further processing. */
+ SCTP_DISPOSITION_CONSUME, /* Process return values normally. */
+ SCTP_DISPOSITION_NOMEM, /* We ran out of memory--recover. */
+ SCTP_DISPOSITION_DELETE_TCB, /* Close the association. */
+ SCTP_DISPOSITION_ABORT, /* Close the association NOW. */
+ SCTP_DISPOSITION_VIOLATION, /* The peer is misbehaving. */
+ SCTP_DISPOSITION_NOT_IMPL, /* This entry is not implemented. */
+ SCTP_DISPOSITION_ERROR, /* This is plain old user error. */
+ SCTP_DISPOSITION_BUG, /* This is a bug. */
+};
+
+typedef enum sctp_disposition (sctp_state_fn_t) (
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands);
+typedef void (sctp_timer_event_t) (struct timer_list *);
+struct sctp_sm_table_entry {
+ sctp_state_fn_t *fn;
+ const char *name;
+};
+
+/* A naming convention of "sctp_sf_xxx" applies to all the state functions
+ * currently in use.
+ */
+
+/* Prototypes for generic state functions. */
+sctp_state_fn_t sctp_sf_not_impl;
+sctp_state_fn_t sctp_sf_bug;
+
+/* Prototypes for gener timer state functions. */
+sctp_state_fn_t sctp_sf_timer_ignore;
+
+/* Prototypes for chunk state functions. */
+sctp_state_fn_t sctp_sf_do_9_1_abort;
+sctp_state_fn_t sctp_sf_cookie_wait_abort;
+sctp_state_fn_t sctp_sf_cookie_echoed_abort;
+sctp_state_fn_t sctp_sf_shutdown_pending_abort;
+sctp_state_fn_t sctp_sf_shutdown_sent_abort;
+sctp_state_fn_t sctp_sf_shutdown_ack_sent_abort;
+sctp_state_fn_t sctp_sf_do_5_1B_init;
+sctp_state_fn_t sctp_sf_do_5_1C_ack;
+sctp_state_fn_t sctp_sf_do_5_1D_ce;
+sctp_state_fn_t sctp_sf_do_5_1E_ca;
+sctp_state_fn_t sctp_sf_do_4_C;
+sctp_state_fn_t sctp_sf_eat_data_6_2;
+sctp_state_fn_t sctp_sf_eat_data_fast_4_4;
+sctp_state_fn_t sctp_sf_eat_sack_6_2;
+sctp_state_fn_t sctp_sf_operr_notify;
+sctp_state_fn_t sctp_sf_t1_init_timer_expire;
+sctp_state_fn_t sctp_sf_t1_cookie_timer_expire;
+sctp_state_fn_t sctp_sf_t2_timer_expire;
+sctp_state_fn_t sctp_sf_t4_timer_expire;
+sctp_state_fn_t sctp_sf_t5_timer_expire;
+sctp_state_fn_t sctp_sf_sendbeat_8_3;
+sctp_state_fn_t sctp_sf_beat_8_3;
+sctp_state_fn_t sctp_sf_backbeat_8_3;
+sctp_state_fn_t sctp_sf_do_9_2_final;
+sctp_state_fn_t sctp_sf_do_9_2_shutdown;
+sctp_state_fn_t sctp_sf_do_9_2_shut_ctsn;
+sctp_state_fn_t sctp_sf_do_ecn_cwr;
+sctp_state_fn_t sctp_sf_do_ecne;
+sctp_state_fn_t sctp_sf_ootb;
+sctp_state_fn_t sctp_sf_pdiscard;
+sctp_state_fn_t sctp_sf_violation;
+sctp_state_fn_t sctp_sf_discard_chunk;
+sctp_state_fn_t sctp_sf_do_5_2_1_siminit;
+sctp_state_fn_t sctp_sf_do_5_2_2_dupinit;
+sctp_state_fn_t sctp_sf_do_5_2_3_initack;
+sctp_state_fn_t sctp_sf_do_5_2_4_dupcook;
+sctp_state_fn_t sctp_sf_unk_chunk;
+sctp_state_fn_t sctp_sf_do_8_5_1_E_sa;
+sctp_state_fn_t sctp_sf_cookie_echoed_err;
+sctp_state_fn_t sctp_sf_do_asconf;
+sctp_state_fn_t sctp_sf_do_asconf_ack;
+sctp_state_fn_t sctp_sf_do_reconf;
+sctp_state_fn_t sctp_sf_do_9_2_reshutack;
+sctp_state_fn_t sctp_sf_eat_fwd_tsn;
+sctp_state_fn_t sctp_sf_eat_fwd_tsn_fast;
+sctp_state_fn_t sctp_sf_eat_auth;
+
+/* Prototypes for primitive event state functions. */
+sctp_state_fn_t sctp_sf_do_prm_asoc;
+sctp_state_fn_t sctp_sf_do_prm_send;
+sctp_state_fn_t sctp_sf_do_9_2_prm_shutdown;
+sctp_state_fn_t sctp_sf_cookie_wait_prm_shutdown;
+sctp_state_fn_t sctp_sf_cookie_echoed_prm_shutdown;
+sctp_state_fn_t sctp_sf_do_9_1_prm_abort;
+sctp_state_fn_t sctp_sf_cookie_wait_prm_abort;
+sctp_state_fn_t sctp_sf_cookie_echoed_prm_abort;
+sctp_state_fn_t sctp_sf_shutdown_pending_prm_abort;
+sctp_state_fn_t sctp_sf_shutdown_sent_prm_abort;
+sctp_state_fn_t sctp_sf_shutdown_ack_sent_prm_abort;
+sctp_state_fn_t sctp_sf_error_closed;
+sctp_state_fn_t sctp_sf_error_shutdown;
+sctp_state_fn_t sctp_sf_ignore_primitive;
+sctp_state_fn_t sctp_sf_do_prm_requestheartbeat;
+sctp_state_fn_t sctp_sf_do_prm_asconf;
+sctp_state_fn_t sctp_sf_do_prm_reconf;
+
+/* Prototypes for other event state functions. */
+sctp_state_fn_t sctp_sf_do_no_pending_tsn;
+sctp_state_fn_t sctp_sf_do_9_2_start_shutdown;
+sctp_state_fn_t sctp_sf_do_9_2_shutdown_ack;
+sctp_state_fn_t sctp_sf_ignore_other;
+sctp_state_fn_t sctp_sf_cookie_wait_icmp_abort;
+
+/* Prototypes for timeout event state functions. */
+sctp_state_fn_t sctp_sf_do_6_3_3_rtx;
+sctp_state_fn_t sctp_sf_send_reconf;
+sctp_state_fn_t sctp_sf_send_probe;
+sctp_state_fn_t sctp_sf_do_6_2_sack;
+sctp_state_fn_t sctp_sf_autoclose_timer_expire;
+
+/* Prototypes for utility support functions. */
+__u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
+const struct sctp_sm_table_entry *sctp_sm_lookup_event(
+ struct net *net,
+ enum sctp_event_type event_type,
+ enum sctp_state state,
+ union sctp_subtype event_subtype);
+int sctp_chunk_iif(const struct sctp_chunk *);
+struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *,
+ struct sctp_chunk *,
+ gfp_t gfp);
+__u32 sctp_generate_verification_tag(void);
+void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag);
+
+/* Prototypes for chunk-building functions. */
+struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
+ const struct sctp_bind_addr *bp,
+ gfp_t gfp, int vparam_len);
+struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ const gfp_t gfp, const int unkparam_len);
+struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
+ const __u32 lowest_tsn,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
+ __u8 flags, int paylen, gfp_t gfp);
+struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc,
+ __u32 new_cum_tsn, size_t nstreams,
+ struct sctp_ifwdtsn_skip *skiplist);
+struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
+ const struct sctp_sndrcvinfo *sinfo,
+ int len, __u8 flags, gfp_t gfp);
+struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
+ const __u32 lowest_tsn);
+struct sctp_chunk *sctp_make_sack(struct sctp_association *asoc);
+struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_shutdown_complete(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+int sctp_init_cause(struct sctp_chunk *chunk, __be16 cause, size_t paylen);
+struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ const size_t hint);
+struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ __u32 tsn);
+struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
+ struct msghdr *msg, size_t msg_len);
+struct sctp_chunk *sctp_make_abort_violation(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ const __u8 *payload,
+ const size_t paylen);
+struct sctp_chunk *sctp_make_violation_paramlen(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ struct sctp_paramhdr *param);
+struct sctp_chunk *sctp_make_violation_max_retrans(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_new_encap_port(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
+ const struct sctp_transport *transport,
+ __u32 probe_size);
+struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ const void *payload,
+ const size_t paylen);
+struct sctp_chunk *sctp_make_pad(const struct sctp_association *asoc, int len);
+struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ __be16 cause_code, const void *payload,
+ size_t paylen, size_t reserve_tail);
+
+struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
+ union sctp_addr *laddr,
+ struct sockaddr *addrs,
+ int addrcnt, __be16 flags);
+struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
+ union sctp_addr *addr);
+bool sctp_verify_asconf(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk, bool addr_param_needed,
+ struct sctp_paramhdr **errp);
+struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ struct sctp_chunk *asconf);
+int sctp_process_asconf_ack(struct sctp_association *asoc,
+ struct sctp_chunk *asconf_ack);
+struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
+ __u32 new_cum_tsn, size_t nstreams,
+ struct sctp_fwdtsn_skip *skiplist);
+struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc,
+ __u16 key_id);
+struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc,
+ __u16 stream_num, __be16 *stream_list,
+ bool out, bool in);
+struct sctp_chunk *sctp_make_strreset_tsnreq(
+ const struct sctp_association *asoc);
+struct sctp_chunk *sctp_make_strreset_addstrm(
+ const struct sctp_association *asoc,
+ __u16 out, __u16 in);
+struct sctp_chunk *sctp_make_strreset_resp(const struct sctp_association *asoc,
+ __u32 result, __u32 sn);
+struct sctp_chunk *sctp_make_strreset_tsnresp(struct sctp_association *asoc,
+ __u32 result, __u32 sn,
+ __u32 sender_tsn,
+ __u32 receiver_tsn);
+bool sctp_verify_reconf(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ struct sctp_paramhdr **errp);
+void sctp_chunk_assign_tsn(struct sctp_chunk *chunk);
+void sctp_chunk_assign_ssn(struct sctp_chunk *chunk);
+
+/* Prototypes for stream-processing functions. */
+struct sctp_chunk *sctp_process_strreset_outreq(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_inreq(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_tsnreq(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_addstrm_out(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_addstrm_in(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_resp(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+
+/* Prototypes for statetable processing. */
+
+int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
+ union sctp_subtype subtype, enum sctp_state state,
+ struct sctp_endpoint *ep, struct sctp_association *asoc,
+ void *event_arg, gfp_t gfp);
+
+/* 2nd level prototypes */
+void sctp_generate_t3_rtx_event(struct timer_list *t);
+void sctp_generate_heartbeat_event(struct timer_list *t);
+void sctp_generate_reconf_event(struct timer_list *t);
+void sctp_generate_probe_event(struct timer_list *t);
+void sctp_generate_proto_unreach_event(struct timer_list *t);
+
+void sctp_ootb_pkt_free(struct sctp_packet *packet);
+
+struct sctp_association *sctp_unpack_cookie(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ gfp_t gfp, int *err,
+ struct sctp_chunk **err_chk_p);
+
+/* 3rd level prototypes */
+__u32 sctp_generate_tag(const struct sctp_endpoint *ep);
+__u32 sctp_generate_tsn(const struct sctp_endpoint *ep);
+
+/* Extern declarations for major data structures. */
+extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
+
+
+/* Get the size of a DATA chunk payload. */
+static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
+{
+ __u16 size;
+
+ size = ntohs(chunk->chunk_hdr->length);
+ size -= sctp_datachk_len(&chunk->asoc->stream);
+
+ return size;
+}
+
+/* Compare two TSNs */
+#define TSN_lt(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) < 0))
+
+#define TSN_lte(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) <= 0))
+
+/* Compare two MIDs */
+#define MID_lt(a, b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) < 0))
+
+/* Compare two SSNs */
+#define SSN_lt(a,b) \
+ (typecheck(__u16, a) && \
+ typecheck(__u16, b) && \
+ ((__s16)((a) - (b)) < 0))
+
+/* ADDIP 3.1.1 */
+#define ADDIP_SERIAL_gte(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((b) - (a)) <= 0))
+
+/* Check VTAG of the packet matches the sender's own tag. */
+static inline int
+sctp_vtag_verify(const struct sctp_chunk *chunk,
+ const struct sctp_association *asoc)
+{
+ /* RFC 2960 Sec 8.5 When receiving an SCTP packet, the endpoint
+ * MUST ensure that the value in the Verification Tag field of
+ * the received SCTP packet matches its own Tag. If the received
+ * Verification Tag value does not match the receiver's own
+ * tag value, the receiver shall silently discard the packet...
+ */
+ if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag)
+ return 0;
+
+ chunk->transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
+ return 1;
+}
+
+/* Check VTAG of the packet matches the sender's own tag and the T bit is
+ * not set, OR its peer's tag and the T bit is set in the Chunk Flags.
+ */
+static inline int
+sctp_vtag_verify_either(const struct sctp_chunk *chunk,
+ const struct sctp_association *asoc)
+{
+ /* RFC 2960 Section 8.5.1, sctpimpguide Section 2.41
+ *
+ * B) The receiver of a ABORT MUST accept the packet
+ * if the Verification Tag field of the packet matches its own tag
+ * and the T bit is not set
+ * OR
+ * it is set to its peer's tag and the T bit is set in the Chunk
+ * Flags.
+ * Otherwise, the receiver MUST silently discard the packet
+ * and take no further action.
+ *
+ * C) The receiver of a SHUTDOWN COMPLETE shall accept the packet
+ * if the Verification Tag field of the packet matches its own tag
+ * and the T bit is not set
+ * OR
+ * it is set to its peer's tag and the T bit is set in the Chunk
+ * Flags.
+ * Otherwise, the receiver MUST silently discard the packet
+ * and take no further action. An endpoint MUST ignore the
+ * SHUTDOWN COMPLETE if it is not in the SHUTDOWN-ACK-SENT state.
+ */
+ if ((!sctp_test_T_bit(chunk) &&
+ (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)) ||
+ (sctp_test_T_bit(chunk) && asoc->c.peer_vtag &&
+ (ntohl(chunk->sctp_hdr->vtag) == asoc->c.peer_vtag))) {
+ return 1;
+ }
+
+ return 0;
+}
+
+#endif /* __sctp_sm_h__ */
diff --git a/include/net/sctp/stream_interleave.h b/include/net/sctp/stream_interleave.h
new file mode 100644
index 000000000..526358555
--- /dev/null
+++ b/include/net/sctp/stream_interleave.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * These are definitions used by the stream schedulers, defined in RFC
+ * draft ndata (https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-11)
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Xin Long <lucien.xin@gmail.com>
+ */
+
+#ifndef __sctp_stream_interleave_h__
+#define __sctp_stream_interleave_h__
+
+struct sctp_stream_interleave {
+ __u16 data_chunk_len;
+ __u16 ftsn_chunk_len;
+ /* (I-)DATA process */
+ struct sctp_chunk *(*make_datafrag)(const struct sctp_association *asoc,
+ const struct sctp_sndrcvinfo *sinfo,
+ int len, __u8 flags, gfp_t gfp);
+ void (*assign_number)(struct sctp_chunk *chunk);
+ bool (*validate_data)(struct sctp_chunk *chunk);
+ int (*ulpevent_data)(struct sctp_ulpq *ulpq,
+ struct sctp_chunk *chunk, gfp_t gfp);
+ int (*enqueue_event)(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event);
+ void (*renege_events)(struct sctp_ulpq *ulpq,
+ struct sctp_chunk *chunk, gfp_t gfp);
+ void (*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
+ void (*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
+ /* (I-)FORWARD-TSN process */
+ void (*generate_ftsn)(struct sctp_outq *q, __u32 ctsn);
+ bool (*validate_ftsn)(struct sctp_chunk *chunk);
+ void (*report_ftsn)(struct sctp_ulpq *ulpq, __u32 ftsn);
+ void (*handle_ftsn)(struct sctp_ulpq *ulpq,
+ struct sctp_chunk *chunk);
+};
+
+void sctp_stream_interleave_init(struct sctp_stream *stream);
+
+#endif /* __sctp_stream_interleave_h__ */
diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h
new file mode 100644
index 000000000..65058faea
--- /dev/null
+++ b/include/net/sctp/stream_sched.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * These are definitions used by the stream schedulers, defined in RFC
+ * draft ndata (https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-11)
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+ */
+
+#ifndef __sctp_stream_sched_h__
+#define __sctp_stream_sched_h__
+
+struct sctp_sched_ops {
+ /* Property handling for a given stream */
+ int (*set)(struct sctp_stream *stream, __u16 sid, __u16 value,
+ gfp_t gfp);
+ int (*get)(struct sctp_stream *stream, __u16 sid, __u16 *value);
+
+ /* Init the specific scheduler */
+ int (*init)(struct sctp_stream *stream);
+ /* Init a stream */
+ int (*init_sid)(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
+ /* free a stream */
+ void (*free_sid)(struct sctp_stream *stream, __u16 sid);
+ /* Frees the entire thing */
+ void (*free)(struct sctp_stream *stream);
+
+ /* Enqueue a chunk */
+ void (*enqueue)(struct sctp_outq *q, struct sctp_datamsg *msg);
+ /* Dequeue a chunk */
+ struct sctp_chunk *(*dequeue)(struct sctp_outq *q);
+ /* Called only if the chunk fit the packet */
+ void (*dequeue_done)(struct sctp_outq *q, struct sctp_chunk *chunk);
+ /* Sched all chunks already enqueued */
+ void (*sched_all)(struct sctp_stream *steam);
+ /* Unched all chunks already enqueued */
+ void (*unsched_all)(struct sctp_stream *steam);
+};
+
+int sctp_sched_set_sched(struct sctp_association *asoc,
+ enum sctp_sched_type sched);
+int sctp_sched_get_sched(struct sctp_association *asoc);
+int sctp_sched_set_value(struct sctp_association *asoc, __u16 sid,
+ __u16 value, gfp_t gfp);
+int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
+ __u16 *value);
+void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch);
+
+void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch);
+int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
+struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream);
+
+void sctp_sched_ops_register(enum sctp_sched_type sched,
+ struct sctp_sched_ops *sched_ops);
+void sctp_sched_ops_prio_init(void);
+void sctp_sched_ops_rr_init(void);
+
+#endif /* __sctp_stream_sched_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
new file mode 100644
index 000000000..00ee9eb60
--- /dev/null
+++ b/include/net/sctp/structs.h
@@ -0,0 +1,2205 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Randall Stewart <randall@sctp.chicago.il.us>
+ * Ken Morneau <kmorneau@cisco.com>
+ * Qiaobing Xie <qxie1@email.mot.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Hui Huang <hui.huang@nokia.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Dajiang Zhang <dajiang.zhang@nokia.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Ryan Layer <rmlayer@us.ibm.com>
+ * Anup Pemmaiah <pemmaiah@cc.usu.edu>
+ * Kevin Gao <kevin.gao@intel.com>
+ */
+
+#ifndef __sctp_structs_h__
+#define __sctp_structs_h__
+
+#include <linux/ktime.h>
+#include <linux/generic-radix-tree.h>
+#include <linux/rhashtable-types.h>
+#include <linux/socket.h> /* linux/in.h needs this!! */
+#include <linux/in.h> /* We get struct sockaddr_in. */
+#include <linux/in6.h> /* We get struct in6_addr */
+#include <linux/ipv6.h>
+#include <asm/param.h> /* We get MAXHOSTNAMELEN. */
+#include <linux/atomic.h> /* This gets us atomic counters. */
+#include <linux/skbuff.h> /* We need sk_buff_head. */
+#include <linux/workqueue.h> /* We need tq_struct. */
+#include <linux/sctp.h> /* We need sctp* header structs. */
+#include <net/sctp/auth.h> /* We need auth specific structs */
+#include <net/ip.h> /* For inet_skb_parm */
+
+/* A convenience structure for handling sockaddr structures.
+ * We should wean ourselves off this.
+ */
+union sctp_addr {
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ struct sockaddr sa;
+};
+
+/* Forward declarations for data structures. */
+struct sctp_globals;
+struct sctp_endpoint;
+struct sctp_association;
+struct sctp_transport;
+struct sctp_packet;
+struct sctp_chunk;
+struct sctp_inq;
+struct sctp_outq;
+struct sctp_bind_addr;
+struct sctp_ulpq;
+struct sctp_ep_common;
+struct crypto_shash;
+struct sctp_stream;
+
+
+#include <net/sctp/tsnmap.h>
+#include <net/sctp/ulpevent.h>
+#include <net/sctp/ulpqueue.h>
+#include <net/sctp/stream_interleave.h>
+
+/* Structures useful for managing bind/connect. */
+
+struct sctp_bind_bucket {
+ unsigned short port;
+ signed char fastreuse;
+ signed char fastreuseport;
+ kuid_t fastuid;
+ struct hlist_node node;
+ struct hlist_head owner;
+ struct net *net;
+};
+
+struct sctp_bind_hashbucket {
+ spinlock_t lock;
+ struct hlist_head chain;
+};
+
+/* Used for hashing all associations. */
+struct sctp_hashbucket {
+ rwlock_t lock;
+ struct hlist_head chain;
+} __attribute__((__aligned__(8)));
+
+
+/* The SCTP globals structure. */
+extern struct sctp_globals {
+ /* This is a list of groups of functions for each address
+ * family that we support.
+ */
+ struct list_head address_families;
+
+ /* This is the hash of all endpoints. */
+ struct sctp_hashbucket *ep_hashtable;
+ /* This is the sctp port control hash. */
+ struct sctp_bind_hashbucket *port_hashtable;
+ /* This is the hash of all transports. */
+ struct rhltable transport_hashtable;
+
+ /* Sizes of above hashtables. */
+ int ep_hashsize;
+ int port_hashsize;
+
+ /* Default initialization values to be applied to new associations. */
+ __u16 max_instreams;
+ __u16 max_outstreams;
+
+ /* Flag to indicate whether computing and verifying checksum
+ * is disabled. */
+ bool checksum_disable;
+} sctp_globals;
+
+#define sctp_max_instreams (sctp_globals.max_instreams)
+#define sctp_max_outstreams (sctp_globals.max_outstreams)
+#define sctp_address_families (sctp_globals.address_families)
+#define sctp_ep_hashsize (sctp_globals.ep_hashsize)
+#define sctp_ep_hashtable (sctp_globals.ep_hashtable)
+#define sctp_port_hashsize (sctp_globals.port_hashsize)
+#define sctp_port_hashtable (sctp_globals.port_hashtable)
+#define sctp_transport_hashtable (sctp_globals.transport_hashtable)
+#define sctp_checksum_disable (sctp_globals.checksum_disable)
+
+/* SCTP Socket type: UDP or TCP style. */
+enum sctp_socket_type {
+ SCTP_SOCKET_UDP = 0,
+ SCTP_SOCKET_UDP_HIGH_BANDWIDTH,
+ SCTP_SOCKET_TCP
+};
+
+/* Per socket SCTP information. */
+struct sctp_sock {
+ /* inet_sock has to be the first member of sctp_sock */
+ struct inet_sock inet;
+ /* What kind of a socket is this? */
+ enum sctp_socket_type type;
+
+ /* PF_ family specific functions. */
+ struct sctp_pf *pf;
+
+ /* Access to HMAC transform. */
+ struct crypto_shash *hmac;
+ char *sctp_hmac_alg;
+
+ /* What is our base endpointer? */
+ struct sctp_endpoint *ep;
+
+ struct sctp_bind_bucket *bind_hash;
+ /* Various Socket Options. */
+ __u16 default_stream;
+ __u32 default_ppid;
+ __u16 default_flags;
+ __u32 default_context;
+ __u32 default_timetolive;
+ __u32 default_rcv_context;
+ int max_burst;
+
+ /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to
+ * the destination address every heartbeat interval. This value
+ * will be inherited by all new associations.
+ */
+ __u32 hbinterval;
+ __u32 probe_interval;
+
+ __be16 udp_port;
+ __be16 encap_port;
+
+ /* This is the max_retrans value for new associations. */
+ __u16 pathmaxrxt;
+
+ __u32 flowlabel;
+ __u8 dscp;
+
+ __u16 pf_retrans;
+ __u16 ps_retrans;
+
+ /* The initial Path MTU to use for new associations. */
+ __u32 pathmtu;
+
+ /* The default SACK delay timeout for new associations. */
+ __u32 sackdelay;
+ __u32 sackfreq;
+
+ /* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */
+ __u32 param_flags;
+
+ __u32 default_ss;
+
+ struct sctp_rtoinfo rtoinfo;
+ struct sctp_paddrparams paddrparam;
+ struct sctp_assocparams assocparams;
+
+ /*
+ * These two structures must be grouped together for the usercopy
+ * whitelist region.
+ */
+ __u16 subscribe;
+ struct sctp_initmsg initmsg;
+
+ int user_frag;
+
+ __u32 autoclose;
+ __u32 adaptation_ind;
+ __u32 pd_point;
+ __u16 nodelay:1,
+ pf_expose:2,
+ reuse:1,
+ disable_fragments:1,
+ v4mapped:1,
+ frag_interleave:1,
+ recvrcvinfo:1,
+ recvnxtinfo:1,
+ data_ready_signalled:1;
+
+ atomic_t pd_mode;
+
+ /* Fields after this point will be skipped on copies, like on accept
+ * and peeloff operations
+ */
+
+ /* Receive to here while partial delivery is in effect. */
+ struct sk_buff_head pd_lobby;
+
+ struct list_head auto_asconf_list;
+ int do_auto_asconf;
+};
+
+static inline struct sctp_sock *sctp_sk(const struct sock *sk)
+{
+ return (struct sctp_sock *)sk;
+}
+
+static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp)
+{
+ return (struct sock *)sp;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+struct sctp6_sock {
+ struct sctp_sock sctp;
+ struct ipv6_pinfo inet6;
+};
+#endif /* CONFIG_IPV6 */
+
+
+/* This is our APPLICATION-SPECIFIC state cookie.
+ * THIS IS NOT DICTATED BY THE SPECIFICATION.
+ */
+/* These are the parts of an association which we send in the cookie.
+ * Most of these are straight out of:
+ * RFC2960 12.2 Parameters necessary per association (i.e. the TCB)
+ *
+ */
+
+struct sctp_cookie {
+
+ /* My : Tag expected in every inbound packet and sent
+ * Verification: in the INIT or INIT ACK chunk.
+ * Tag :
+ */
+ __u32 my_vtag;
+
+ /* Peer's : Tag expected in every outbound packet except
+ * Verification: in the INIT chunk.
+ * Tag :
+ */
+ __u32 peer_vtag;
+
+ /* The rest of these are not from the spec, but really need to
+ * be in the cookie.
+ */
+
+ /* My Tie Tag : Assist in discovering a restarting association. */
+ __u32 my_ttag;
+
+ /* Peer's Tie Tag: Assist in discovering a restarting association. */
+ __u32 peer_ttag;
+
+ /* When does this cookie expire? */
+ ktime_t expiration;
+
+ /* Number of inbound/outbound streams which are set
+ * and negotiated during the INIT process.
+ */
+ __u16 sinit_num_ostreams;
+ __u16 sinit_max_instreams;
+
+ /* This is the first sequence number I used. */
+ __u32 initial_tsn;
+
+ /* This holds the originating address of the INIT packet. */
+ union sctp_addr peer_addr;
+
+ /* IG Section 2.35.3
+ * Include the source port of the INIT-ACK
+ */
+ __u16 my_port;
+
+ __u8 prsctp_capable;
+
+ /* Padding for future use */
+ __u8 padding;
+
+ __u32 adaptation_ind;
+
+ __u8 auth_random[sizeof(struct sctp_paramhdr) +
+ SCTP_AUTH_RANDOM_LENGTH];
+ __u8 auth_hmacs[SCTP_AUTH_NUM_HMACS * sizeof(__u16) + 2];
+ __u8 auth_chunks[sizeof(struct sctp_paramhdr) + SCTP_AUTH_MAX_CHUNKS];
+
+ /* This is a shim for my peer's INIT packet, followed by
+ * a copy of the raw address list of the association.
+ * The length of the raw address list is saved in the
+ * raw_addr_list_len field, which will be used at the time when
+ * the association TCB is re-constructed from the cookie.
+ */
+ __u32 raw_addr_list_len;
+ struct sctp_init_chunk peer_init[];
+};
+
+
+/* The format of our cookie that we send to our peer. */
+struct sctp_signed_cookie {
+ __u8 signature[SCTP_SECRET_SIZE];
+ __u32 __pad; /* force sctp_cookie alignment to 64 bits */
+ struct sctp_cookie c;
+} __packed;
+
+/* This is another convenience type to allocate memory for address
+ * params for the maximum size and pass such structures around
+ * internally.
+ */
+union sctp_addr_param {
+ struct sctp_paramhdr p;
+ struct sctp_ipv4addr_param v4;
+ struct sctp_ipv6addr_param v6;
+};
+
+/* A convenience type to allow walking through the various
+ * parameters and avoid casting all over the place.
+ */
+union sctp_params {
+ void *v;
+ struct sctp_paramhdr *p;
+ struct sctp_cookie_preserve_param *life;
+ struct sctp_hostname_param *dns;
+ struct sctp_cookie_param *cookie;
+ struct sctp_supported_addrs_param *sat;
+ struct sctp_ipv4addr_param *v4;
+ struct sctp_ipv6addr_param *v6;
+ union sctp_addr_param *addr;
+ struct sctp_adaptation_ind_param *aind;
+ struct sctp_supported_ext_param *ext;
+ struct sctp_random_param *random;
+ struct sctp_chunks_param *chunks;
+ struct sctp_hmac_algo_param *hmac_algo;
+ struct sctp_addip_param *addip;
+};
+
+/* RFC 2960. Section 3.3.5 Heartbeat.
+ * Heartbeat Information: variable length
+ * The Sender-specific Heartbeat Info field should normally include
+ * information about the sender's current time when this HEARTBEAT
+ * chunk is sent and the destination transport address to which this
+ * HEARTBEAT is sent (see Section 8.3).
+ */
+struct sctp_sender_hb_info {
+ struct sctp_paramhdr param_hdr;
+ union sctp_addr daddr;
+ unsigned long sent_at;
+ __u64 hb_nonce;
+ __u32 probe_size;
+};
+
+int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
+ gfp_t gfp);
+int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid);
+void sctp_stream_free(struct sctp_stream *stream);
+void sctp_stream_clear(struct sctp_stream *stream);
+void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
+
+/* What is the current SSN number for this stream? */
+#define sctp_ssn_peek(stream, type, sid) \
+ (sctp_stream_##type((stream), (sid))->ssn)
+
+/* Return the next SSN number for this stream. */
+#define sctp_ssn_next(stream, type, sid) \
+ (sctp_stream_##type((stream), (sid))->ssn++)
+
+/* Skip over this ssn and all below. */
+#define sctp_ssn_skip(stream, type, sid, ssn) \
+ (sctp_stream_##type((stream), (sid))->ssn = ssn + 1)
+
+/* What is the current MID number for this stream? */
+#define sctp_mid_peek(stream, type, sid) \
+ (sctp_stream_##type((stream), (sid))->mid)
+
+/* Return the next MID number for this stream. */
+#define sctp_mid_next(stream, type, sid) \
+ (sctp_stream_##type((stream), (sid))->mid++)
+
+/* Skip over this mid and all below. */
+#define sctp_mid_skip(stream, type, sid, mid) \
+ (sctp_stream_##type((stream), (sid))->mid = mid + 1)
+
+/* What is the current MID_uo number for this stream? */
+#define sctp_mid_uo_peek(stream, type, sid) \
+ (sctp_stream_##type((stream), (sid))->mid_uo)
+
+/* Return the next MID_uo number for this stream. */
+#define sctp_mid_uo_next(stream, type, sid) \
+ (sctp_stream_##type((stream), (sid))->mid_uo++)
+
+/*
+ * Pointers to address related SCTP functions.
+ * (i.e. things that depend on the address family.)
+ */
+struct sctp_af {
+ int (*sctp_xmit) (struct sk_buff *skb,
+ struct sctp_transport *);
+ int (*setsockopt) (struct sock *sk,
+ int level,
+ int optname,
+ sockptr_t optval,
+ unsigned int optlen);
+ int (*getsockopt) (struct sock *sk,
+ int level,
+ int optname,
+ char __user *optval,
+ int __user *optlen);
+ void (*get_dst) (struct sctp_transport *t,
+ union sctp_addr *saddr,
+ struct flowi *fl,
+ struct sock *sk);
+ void (*get_saddr) (struct sctp_sock *sk,
+ struct sctp_transport *t,
+ struct flowi *fl);
+ void (*copy_addrlist) (struct list_head *,
+ struct net_device *);
+ int (*cmp_addr) (const union sctp_addr *addr1,
+ const union sctp_addr *addr2);
+ void (*addr_copy) (union sctp_addr *dst,
+ union sctp_addr *src);
+ void (*from_skb) (union sctp_addr *,
+ struct sk_buff *skb,
+ int saddr);
+ void (*from_sk) (union sctp_addr *,
+ struct sock *sk);
+ bool (*from_addr_param) (union sctp_addr *,
+ union sctp_addr_param *,
+ __be16 port, int iif);
+ int (*to_addr_param) (const union sctp_addr *,
+ union sctp_addr_param *);
+ int (*addr_valid) (union sctp_addr *,
+ struct sctp_sock *,
+ const struct sk_buff *);
+ enum sctp_scope (*scope)(union sctp_addr *);
+ void (*inaddr_any) (union sctp_addr *, __be16);
+ int (*is_any) (const union sctp_addr *);
+ int (*available) (union sctp_addr *,
+ struct sctp_sock *);
+ int (*skb_iif) (const struct sk_buff *sk);
+ int (*is_ce) (const struct sk_buff *sk);
+ void (*seq_dump_addr)(struct seq_file *seq,
+ union sctp_addr *addr);
+ void (*ecn_capable)(struct sock *sk);
+ __u16 net_header_len;
+ int sockaddr_len;
+ int (*ip_options_len)(struct sock *sk);
+ sa_family_t sa_family;
+ struct list_head list;
+};
+
+struct sctp_af *sctp_get_af_specific(sa_family_t);
+int sctp_register_af(struct sctp_af *);
+
+/* Protocol family functions. */
+struct sctp_pf {
+ void (*event_msgname)(struct sctp_ulpevent *, char *, int *);
+ void (*skb_msgname) (struct sk_buff *, char *, int *);
+ int (*af_supported) (sa_family_t, struct sctp_sock *);
+ int (*cmp_addr) (const union sctp_addr *,
+ const union sctp_addr *,
+ struct sctp_sock *);
+ int (*bind_verify) (struct sctp_sock *, union sctp_addr *);
+ int (*send_verify) (struct sctp_sock *, union sctp_addr *);
+ int (*supported_addrs)(const struct sctp_sock *, __be16 *);
+ struct sock *(*create_accept_sk) (struct sock *sk,
+ struct sctp_association *asoc,
+ bool kern);
+ int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr);
+ void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
+ void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
+ void (*copy_ip_options)(struct sock *sk, struct sock *newsk);
+ struct sctp_af *af;
+};
+
+
+/* Structure to track chunk fragments that have been acked, but peer
+ * fragments of the same message have not.
+ */
+struct sctp_datamsg {
+ /* Chunks waiting to be submitted to lower layer. */
+ struct list_head chunks;
+ /* Reference counting. */
+ refcount_t refcnt;
+ /* When is this message no longer interesting to the peer? */
+ unsigned long expires_at;
+ /* Did the messenge fail to send? */
+ int send_error;
+ u8 send_failed:1,
+ can_delay:1, /* should this message be Nagle delayed */
+ abandoned:1; /* should this message be abandoned */
+};
+
+struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
+ struct sctp_sndrcvinfo *,
+ struct iov_iter *);
+void sctp_datamsg_free(struct sctp_datamsg *);
+void sctp_datamsg_put(struct sctp_datamsg *);
+void sctp_chunk_fail(struct sctp_chunk *, int error);
+int sctp_chunk_abandoned(struct sctp_chunk *);
+
+/* RFC2960 1.4 Key Terms
+ *
+ * o Chunk: A unit of information within an SCTP packet, consisting of
+ * a chunk header and chunk-specific content.
+ *
+ * As a matter of convenience, we remember the SCTP common header for
+ * each chunk as well as a few other header pointers...
+ */
+struct sctp_chunk {
+ struct list_head list;
+
+ refcount_t refcnt;
+
+ /* How many times this chunk have been sent, for prsctp RTX policy */
+ int sent_count;
+
+ union {
+ /* This is our link to the per-transport transmitted list. */
+ struct list_head transmitted_list;
+ /* List in specific stream outq */
+ struct list_head stream_list;
+ };
+
+ /* This field is used by chunks that hold fragmented data.
+ * For the first fragment this is the list that holds the rest of
+ * fragments. For the remaining fragments, this is the link to the
+ * frag_list maintained in the first fragment.
+ */
+ struct list_head frag_list;
+
+ /* This points to the sk_buff containing the actual data. */
+ struct sk_buff *skb;
+
+ union {
+ /* In case of GSO packets, this will store the head one */
+ struct sk_buff *head_skb;
+ /* In case of auth enabled, this will point to the shkey */
+ struct sctp_shared_key *shkey;
+ };
+
+ /* These are the SCTP headers by reverse order in a packet.
+ * Note that some of these may happen more than once. In that
+ * case, we point at the "current" one, whatever that means
+ * for that level of header.
+ */
+
+ /* We point this at the FIRST TLV parameter to chunk_hdr. */
+ union sctp_params param_hdr;
+ union {
+ __u8 *v;
+ struct sctp_datahdr *data_hdr;
+ struct sctp_inithdr *init_hdr;
+ struct sctp_sackhdr *sack_hdr;
+ struct sctp_heartbeathdr *hb_hdr;
+ struct sctp_sender_hb_info *hbs_hdr;
+ struct sctp_shutdownhdr *shutdown_hdr;
+ struct sctp_signed_cookie *cookie_hdr;
+ struct sctp_ecnehdr *ecne_hdr;
+ struct sctp_cwrhdr *ecn_cwr_hdr;
+ struct sctp_errhdr *err_hdr;
+ struct sctp_addiphdr *addip_hdr;
+ struct sctp_fwdtsn_hdr *fwdtsn_hdr;
+ struct sctp_authhdr *auth_hdr;
+ struct sctp_idatahdr *idata_hdr;
+ struct sctp_ifwdtsn_hdr *ifwdtsn_hdr;
+ } subh;
+
+ __u8 *chunk_end;
+
+ struct sctp_chunkhdr *chunk_hdr;
+ struct sctphdr *sctp_hdr;
+
+ /* This needs to be recoverable for SCTP_SEND_FAILED events. */
+ struct sctp_sndrcvinfo sinfo;
+
+ /* Which association does this belong to? */
+ struct sctp_association *asoc;
+
+ /* What endpoint received this chunk? */
+ struct sctp_ep_common *rcvr;
+
+ /* We fill this in if we are calculating RTT. */
+ unsigned long sent_at;
+
+ /* What is the origin IP address for this chunk? */
+ union sctp_addr source;
+ /* Destination address for this chunk. */
+ union sctp_addr dest;
+
+ /* For outbound message, track all fragments for SEND_FAILED. */
+ struct sctp_datamsg *msg;
+
+ /* For an inbound chunk, this tells us where it came from.
+ * For an outbound chunk, it tells us where we'd like it to
+ * go. It is NULL if we have no preference.
+ */
+ struct sctp_transport *transport;
+
+ /* SCTP-AUTH: For the special case inbound processing of COOKIE-ECHO
+ * we need save a pointer to the AUTH chunk, since the SCTP-AUTH
+ * spec violates the principle premis that all chunks are processed
+ * in order.
+ */
+ struct sk_buff *auth_chunk;
+
+#define SCTP_CAN_FRTX 0x0
+#define SCTP_NEED_FRTX 0x1
+#define SCTP_DONT_FRTX 0x2
+ __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */
+ has_tsn:1, /* Does this chunk have a TSN yet? */
+ has_ssn:1, /* Does this chunk have a SSN yet? */
+#define has_mid has_ssn
+ singleton:1, /* Only chunk in the packet? */
+ end_of_packet:1, /* Last chunk in the packet? */
+ ecn_ce_done:1, /* Have we processed the ECN CE bit? */
+ pdiscard:1, /* Discard the whole packet now? */
+ tsn_gap_acked:1, /* Is this chunk acked by a GAP ACK? */
+ data_accepted:1, /* At least 1 chunk accepted */
+ auth:1, /* IN: was auth'ed | OUT: needs auth */
+ has_asconf:1, /* IN: have seen an asconf before */
+ pmtu_probe:1, /* Used by PLPMTUD, can be set in s HB chunk */
+ tsn_missing_report:2, /* Data chunk missing counter. */
+ fast_retransmit:2; /* Is this chunk fast retransmitted? */
+};
+
+#define sctp_chunk_retransmitted(chunk) (chunk->sent_count > 1)
+void sctp_chunk_hold(struct sctp_chunk *);
+void sctp_chunk_put(struct sctp_chunk *);
+int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len,
+ struct iov_iter *from);
+void sctp_chunk_free(struct sctp_chunk *);
+void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
+struct sctp_chunk *sctp_chunkify(struct sk_buff *,
+ const struct sctp_association *,
+ struct sock *, gfp_t gfp);
+void sctp_init_addrs(struct sctp_chunk *, union sctp_addr *,
+ union sctp_addr *);
+const union sctp_addr *sctp_source(const struct sctp_chunk *chunk);
+
+static inline __u16 sctp_chunk_stream_no(struct sctp_chunk *ch)
+{
+ return ntohs(ch->subh.data_hdr->stream);
+}
+
+enum {
+ SCTP_ADDR_NEW, /* new address added to assoc/ep */
+ SCTP_ADDR_SRC, /* address can be used as source */
+ SCTP_ADDR_DEL, /* address about to be deleted */
+};
+
+/* This is a structure for holding either an IPv6 or an IPv4 address. */
+struct sctp_sockaddr_entry {
+ struct list_head list;
+ struct rcu_head rcu;
+ union sctp_addr a;
+ __u8 state;
+ __u8 valid;
+};
+
+#define SCTP_ADDRESS_TICK_DELAY 500
+
+/* This structure holds lists of chunks as we are assembling for
+ * transmission.
+ */
+struct sctp_packet {
+ /* These are the SCTP header values (host order) for the packet. */
+ __u16 source_port;
+ __u16 destination_port;
+ __u32 vtag;
+
+ /* This contains the payload chunks. */
+ struct list_head chunk_list;
+
+ /* This is the overhead of the sctp and ip headers. */
+ size_t overhead;
+ /* This is the total size of all chunks INCLUDING padding. */
+ size_t size;
+ /* This is the maximum size this packet may have */
+ size_t max_size;
+
+ /* The packet is destined for this transport address.
+ * The function we finally use to pass down to the next lower
+ * layer lives in the transport structure.
+ */
+ struct sctp_transport *transport;
+
+ /* pointer to the auth chunk for this packet */
+ struct sctp_chunk *auth;
+
+ u8 has_cookie_echo:1, /* This packet contains a COOKIE-ECHO chunk. */
+ has_sack:1, /* This packet contains a SACK chunk. */
+ has_auth:1, /* This packet contains an AUTH chunk */
+ has_data:1, /* This packet contains at least 1 DATA chunk */
+ ipfragok:1; /* So let ip fragment this packet */
+};
+
+void sctp_packet_init(struct sctp_packet *, struct sctp_transport *,
+ __u16 sport, __u16 dport);
+void sctp_packet_config(struct sctp_packet *, __u32 vtag, int);
+enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk,
+ int one_packet, gfp_t gfp);
+enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk);
+int sctp_packet_transmit(struct sctp_packet *, gfp_t);
+void sctp_packet_free(struct sctp_packet *);
+
+static inline int sctp_packet_empty(struct sctp_packet *packet)
+{
+ return packet->size == packet->overhead;
+}
+
+/* This represents a remote transport address.
+ * For local transport addresses, we just use union sctp_addr.
+ *
+ * RFC2960 Section 1.4 Key Terms
+ *
+ * o Transport address: A Transport Address is traditionally defined
+ * by Network Layer address, Transport Layer protocol and Transport
+ * Layer port number. In the case of SCTP running over IP, a
+ * transport address is defined by the combination of an IP address
+ * and an SCTP port number (where SCTP is the Transport protocol).
+ *
+ * RFC2960 Section 7.1 SCTP Differences from TCP Congestion control
+ *
+ * o The sender keeps a separate congestion control parameter set for
+ * each of the destination addresses it can send to (not each
+ * source-destination pair but for each destination). The parameters
+ * should decay if the address is not used for a long enough time
+ * period.
+ *
+ */
+struct sctp_transport {
+ /* A list of transports. */
+ struct list_head transports;
+ struct rhlist_head node;
+
+ /* Reference counting. */
+ refcount_t refcnt;
+ /* RTO-Pending : A flag used to track if one of the DATA
+ * chunks sent to this address is currently being
+ * used to compute a RTT. If this flag is 0,
+ * the next DATA chunk sent to this destination
+ * should be used to compute a RTT and this flag
+ * should be set. Every time the RTT
+ * calculation completes (i.e. the DATA chunk
+ * is SACK'd) clear this flag.
+ */
+ __u32 rto_pending:1,
+
+ /*
+ * hb_sent : a flag that signals that we have a pending
+ * heartbeat.
+ */
+ hb_sent:1,
+
+ /* Is the Path MTU update pending on this tranport */
+ pmtu_pending:1,
+
+ dst_pending_confirm:1, /* need to confirm neighbour */
+
+ /* Has this transport moved the ctsn since we last sacked */
+ sack_generation:1;
+ u32 dst_cookie;
+
+ struct flowi fl;
+
+ /* This is the peer's IP address and port. */
+ union sctp_addr ipaddr;
+
+ /* These are the functions we call to handle LLP stuff. */
+ struct sctp_af *af_specific;
+
+ /* Which association do we belong to? */
+ struct sctp_association *asoc;
+
+ /* RFC2960
+ *
+ * 12.3 Per Transport Address Data
+ *
+ * For each destination transport address in the peer's
+ * address list derived from the INIT or INIT ACK chunk, a
+ * number of data elements needs to be maintained including:
+ */
+ /* RTO : The current retransmission timeout value. */
+ unsigned long rto;
+
+ __u32 rtt; /* This is the most recent RTT. */
+
+ /* RTTVAR : The current RTT variation. */
+ __u32 rttvar;
+
+ /* SRTT : The current smoothed round trip time. */
+ __u32 srtt;
+
+ /*
+ * These are the congestion stats.
+ */
+ /* cwnd : The current congestion window. */
+ __u32 cwnd; /* This is the actual cwnd. */
+
+ /* ssthresh : The current slow start threshold value. */
+ __u32 ssthresh;
+
+ /* partial : The tracking method for increase of cwnd when in
+ * bytes acked : congestion avoidance mode (see Section 6.2.2)
+ */
+ __u32 partial_bytes_acked;
+
+ /* Data that has been sent, but not acknowledged. */
+ __u32 flight_size;
+
+ __u32 burst_limited; /* Holds old cwnd when max.burst is applied */
+
+ /* Destination */
+ struct dst_entry *dst;
+ /* Source address. */
+ union sctp_addr saddr;
+
+ /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to
+ * the destination address every heartbeat interval.
+ */
+ unsigned long hbinterval;
+ unsigned long probe_interval;
+
+ /* SACK delay timeout */
+ unsigned long sackdelay;
+ __u32 sackfreq;
+
+ atomic_t mtu_info;
+
+ /* When was the last time that we heard from this transport? We use
+ * this to pick new active and retran paths.
+ */
+ ktime_t last_time_heard;
+
+ /* When was the last time that we sent a chunk using this
+ * transport? We use this to check for idle transports
+ */
+ unsigned long last_time_sent;
+
+ /* Last time(in jiffies) when cwnd is reduced due to the congestion
+ * indication based on ECNE chunk.
+ */
+ unsigned long last_time_ecne_reduced;
+
+ __be16 encap_port;
+
+ /* This is the max_retrans value for the transport and will
+ * be initialized from the assocs value. This can be changed
+ * using the SCTP_SET_PEER_ADDR_PARAMS socket option.
+ */
+ __u16 pathmaxrxt;
+
+ __u32 flowlabel;
+ __u8 dscp;
+
+ /* This is the partially failed retrans value for the transport
+ * and will be initialized from the assocs value. This can be changed
+ * using the SCTP_PEER_ADDR_THLDS socket option
+ */
+ __u16 pf_retrans;
+ /* Used for primary path switchover. */
+ __u16 ps_retrans;
+ /* PMTU : The current known path MTU. */
+ __u32 pathmtu;
+
+ /* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */
+ __u32 param_flags;
+
+ /* The number of times INIT has been sent on this transport. */
+ int init_sent_count;
+
+ /* state : The current state of this destination,
+ * : i.e. SCTP_ACTIVE, SCTP_INACTIVE, SCTP_UNKNOWN.
+ */
+ int state;
+
+ /* These are the error stats for this destination. */
+
+ /* Error count : The current error count for this destination. */
+ unsigned short error_count;
+
+ /* Per : A timer used by each destination.
+ * Destination :
+ * Timer :
+ *
+ * [Everywhere else in the text this is called T3-rtx. -ed]
+ */
+ struct timer_list T3_rtx_timer;
+
+ /* Heartbeat timer is per destination. */
+ struct timer_list hb_timer;
+
+ /* Timer to handle ICMP proto unreachable envets */
+ struct timer_list proto_unreach_timer;
+
+ /* Timer to handler reconf chunk rtx */
+ struct timer_list reconf_timer;
+
+ /* Timer to send a probe HB packet for PLPMTUD */
+ struct timer_list probe_timer;
+
+ /* Since we're using per-destination retransmission timers
+ * (see above), we're also using per-destination "transmitted"
+ * queues. This probably ought to be a private struct
+ * accessible only within the outqueue, but it's not, yet.
+ */
+ struct list_head transmitted;
+
+ /* We build bundle-able packets for this transport here. */
+ struct sctp_packet packet;
+
+ /* This is the list of transports that have chunks to send. */
+ struct list_head send_ready;
+
+ /* State information saved for SFR_CACC algorithm. The key
+ * idea in SFR_CACC is to maintain state at the sender on a
+ * per-destination basis when a changeover happens.
+ * char changeover_active;
+ * char cycling_changeover;
+ * __u32 next_tsn_at_change;
+ * char cacc_saw_newack;
+ */
+ struct {
+ /* An unsigned integer, which stores the next TSN to be
+ * used by the sender, at the moment of changeover.
+ */
+ __u32 next_tsn_at_change;
+
+ /* A flag which indicates the occurrence of a changeover */
+ char changeover_active;
+
+ /* A flag which indicates whether the change of primary is
+ * the first switch to this destination address during an
+ * active switch.
+ */
+ char cycling_changeover;
+
+ /* A temporary flag, which is used during the processing of
+ * a SACK to estimate the causative TSN(s)'s group.
+ */
+ char cacc_saw_newack;
+ } cacc;
+
+ struct {
+ __u16 pmtu;
+ __u16 probe_size;
+ __u16 probe_high;
+ __u8 probe_count;
+ __u8 state;
+ } pl; /* plpmtud related */
+
+ /* 64-bit random number sent with heartbeat. */
+ __u64 hb_nonce;
+
+ struct rcu_head rcu;
+};
+
+struct sctp_transport *sctp_transport_new(struct net *, const union sctp_addr *,
+ gfp_t);
+void sctp_transport_set_owner(struct sctp_transport *,
+ struct sctp_association *);
+void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
+ struct sctp_sock *);
+void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
+void sctp_transport_free(struct sctp_transport *);
+void sctp_transport_reset_t3_rtx(struct sctp_transport *);
+void sctp_transport_reset_hb_timer(struct sctp_transport *);
+void sctp_transport_reset_reconf_timer(struct sctp_transport *transport);
+void sctp_transport_reset_probe_timer(struct sctp_transport *transport);
+void sctp_transport_reset_raise_timer(struct sctp_transport *transport);
+int sctp_transport_hold(struct sctp_transport *);
+void sctp_transport_put(struct sctp_transport *);
+void sctp_transport_update_rto(struct sctp_transport *, __u32);
+void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32);
+void sctp_transport_lower_cwnd(struct sctp_transport *t,
+ enum sctp_lower_cwnd reason);
+void sctp_transport_burst_limited(struct sctp_transport *);
+void sctp_transport_burst_reset(struct sctp_transport *);
+unsigned long sctp_transport_timeout(struct sctp_transport *);
+void sctp_transport_reset(struct sctp_transport *t);
+bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
+void sctp_transport_immediate_rtx(struct sctp_transport *);
+void sctp_transport_dst_release(struct sctp_transport *t);
+void sctp_transport_dst_confirm(struct sctp_transport *t);
+void sctp_transport_pl_send(struct sctp_transport *t);
+bool sctp_transport_pl_recv(struct sctp_transport *t);
+
+
+/* This is the structure we use to queue packets as they come into
+ * SCTP. We write packets to it and read chunks from it.
+ */
+struct sctp_inq {
+ /* This is actually a queue of sctp_chunk each
+ * containing a partially decoded packet.
+ */
+ struct list_head in_chunk_list;
+ /* This is the packet which is currently off the in queue and is
+ * being worked on through the inbound chunk processing.
+ */
+ struct sctp_chunk *in_progress;
+
+ /* This is the delayed task to finish delivering inbound
+ * messages.
+ */
+ struct work_struct immediate;
+};
+
+void sctp_inq_init(struct sctp_inq *);
+void sctp_inq_free(struct sctp_inq *);
+void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet);
+struct sctp_chunk *sctp_inq_pop(struct sctp_inq *);
+struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *);
+void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t);
+
+/* This is the structure we use to hold outbound chunks. You push
+ * chunks in and they automatically pop out the other end as bundled
+ * packets (it calls (*output_handler)()).
+ *
+ * This structure covers sections 6.3, 6.4, 6.7, 6.8, 6.10, 7., 8.1,
+ * and 8.2 of the v13 draft.
+ *
+ * It handles retransmissions. The connection to the timeout portion
+ * of the state machine is through sctp_..._timeout() and timeout_handler.
+ *
+ * If you feed it SACKs, it will eat them.
+ *
+ * If you give it big chunks, it will fragment them.
+ *
+ * It assigns TSN's to data chunks. This happens at the last possible
+ * instant before transmission.
+ *
+ * When free()'d, it empties itself out via output_handler().
+ */
+struct sctp_outq {
+ struct sctp_association *asoc;
+
+ /* Data pending that has never been transmitted. */
+ struct list_head out_chunk_list;
+
+ /* Stream scheduler being used */
+ struct sctp_sched_ops *sched;
+
+ unsigned int out_qlen; /* Total length of queued data chunks. */
+
+ /* Error of send failed, may used in SCTP_SEND_FAILED event. */
+ unsigned int error;
+
+ /* These are control chunks we want to send. */
+ struct list_head control_chunk_list;
+
+ /* These are chunks that have been sacked but are above the
+ * CTSN, or cumulative tsn ack point.
+ */
+ struct list_head sacked;
+
+ /* Put chunks on this list to schedule them for
+ * retransmission.
+ */
+ struct list_head retransmit;
+
+ /* Put chunks on this list to save them for FWD TSN processing as
+ * they were abandoned.
+ */
+ struct list_head abandoned;
+
+ /* How many unackd bytes do we have in-flight? */
+ __u32 outstanding_bytes;
+
+ /* Are we doing fast-rtx on this queue */
+ char fast_rtx;
+
+ /* Corked? */
+ char cork;
+};
+
+void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
+void sctp_outq_teardown(struct sctp_outq *);
+void sctp_outq_free(struct sctp_outq*);
+void sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
+int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *);
+int sctp_outq_is_empty(const struct sctp_outq *);
+void sctp_outq_restart(struct sctp_outq *);
+
+void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
+ enum sctp_retransmit_reason reason);
+void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
+void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
+void sctp_prsctp_prune(struct sctp_association *asoc,
+ struct sctp_sndrcvinfo *sinfo, int msg_len);
+void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
+/* Uncork and flush an outqueue. */
+static inline void sctp_outq_cork(struct sctp_outq *q)
+{
+ q->cork = 1;
+}
+
+/* SCTP skb control block.
+ * sctp_input_cb is currently used on rx and sock rx queue
+ */
+struct sctp_input_cb {
+ union {
+ struct inet_skb_parm h4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_skb_parm h6;
+#endif
+ } header;
+ struct sctp_chunk *chunk;
+ struct sctp_af *af;
+ __be16 encap_port;
+};
+#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0]))
+
+struct sctp_output_cb {
+ struct sk_buff *last;
+};
+#define SCTP_OUTPUT_CB(__skb) ((struct sctp_output_cb *)&((__skb)->cb[0]))
+
+static inline const struct sk_buff *sctp_gso_headskb(const struct sk_buff *skb)
+{
+ const struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
+
+ return chunk->head_skb ? : skb;
+}
+
+/* These bind address data fields common between endpoints and associations */
+struct sctp_bind_addr {
+
+ /* RFC 2960 12.1 Parameters necessary for the SCTP instance
+ *
+ * SCTP Port: The local SCTP port number the endpoint is
+ * bound to.
+ */
+ __u16 port;
+
+ /* RFC 2960 12.1 Parameters necessary for the SCTP instance
+ *
+ * Address List: The list of IP addresses that this instance
+ * has bound. This information is passed to one's
+ * peer(s) in INIT and INIT ACK chunks.
+ */
+ struct list_head address_list;
+};
+
+void sctp_bind_addr_init(struct sctp_bind_addr *, __u16 port);
+void sctp_bind_addr_free(struct sctp_bind_addr *);
+int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
+ const struct sctp_bind_addr *src,
+ enum sctp_scope scope, gfp_t gfp,
+ int flags);
+int sctp_bind_addr_dup(struct sctp_bind_addr *dest,
+ const struct sctp_bind_addr *src,
+ gfp_t gfp);
+int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
+ int new_size, __u8 addr_state, gfp_t gfp);
+int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *);
+int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
+ struct sctp_sock *);
+int sctp_bind_addr_conflict(struct sctp_bind_addr *, const union sctp_addr *,
+ struct sctp_sock *, struct sctp_sock *);
+int sctp_bind_addr_state(const struct sctp_bind_addr *bp,
+ const union sctp_addr *addr);
+int sctp_bind_addrs_check(struct sctp_sock *sp,
+ struct sctp_sock *sp2, int cnt2);
+union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
+ const union sctp_addr *addrs,
+ int addrcnt,
+ struct sctp_sock *opt);
+union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp,
+ int *addrs_len,
+ gfp_t gfp);
+int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len,
+ __u16 port, gfp_t gfp);
+
+enum sctp_scope sctp_scope(const union sctp_addr *addr);
+int sctp_in_scope(struct net *net, const union sctp_addr *addr,
+ const enum sctp_scope scope);
+int sctp_is_any(struct sock *sk, const union sctp_addr *addr);
+int sctp_is_ep_boundall(struct sock *sk);
+
+
+/* What type of endpoint? */
+enum sctp_endpoint_type {
+ SCTP_EP_TYPE_SOCKET,
+ SCTP_EP_TYPE_ASSOCIATION,
+};
+
+/*
+ * A common base class to bridge the implmentation view of a
+ * socket (usually listening) endpoint versus an association's
+ * local endpoint.
+ * This common structure is useful for several purposes:
+ * 1) Common interface for lookup routines.
+ * a) Subfunctions work for either endpoint or association
+ * b) Single interface to lookup allows hiding the lookup lock rather
+ * than acquiring it externally.
+ * 2) Common interface for the inbound chunk handling/state machine.
+ * 3) Common object handling routines for reference counting, etc.
+ * 4) Disentangle association lookup from endpoint lookup, where we
+ * do not have to find our endpoint to find our association.
+ *
+ */
+
+struct sctp_ep_common {
+ /* Runtime type information. What kind of endpoint is this? */
+ enum sctp_endpoint_type type;
+
+ /* Some fields to help us manage this object.
+ * refcnt - Reference count access to this object.
+ * dead - Do not attempt to use this object.
+ */
+ refcount_t refcnt;
+ bool dead;
+
+ /* What socket does this endpoint belong to? */
+ struct sock *sk;
+
+ /* Cache netns and it won't change once set */
+ struct net *net;
+
+ /* This is where we receive inbound chunks. */
+ struct sctp_inq inqueue;
+
+ /* This substructure includes the defining parameters of the
+ * endpoint:
+ * bind_addr.port is our shared port number.
+ * bind_addr.address_list is our set of local IP addresses.
+ */
+ struct sctp_bind_addr bind_addr;
+};
+
+
+/* RFC Section 1.4 Key Terms
+ *
+ * o SCTP endpoint: The logical sender/receiver of SCTP packets. On a
+ * multi-homed host, an SCTP endpoint is represented to its peers as a
+ * combination of a set of eligible destination transport addresses to
+ * which SCTP packets can be sent and a set of eligible source
+ * transport addresses from which SCTP packets can be received.
+ * All transport addresses used by an SCTP endpoint must use the
+ * same port number, but can use multiple IP addresses. A transport
+ * address used by an SCTP endpoint must not be used by another
+ * SCTP endpoint. In other words, a transport address is unique
+ * to an SCTP endpoint.
+ *
+ * From an implementation perspective, each socket has one of these.
+ * A TCP-style socket will have exactly one association on one of
+ * these. An UDP-style socket will have multiple associations hanging
+ * off one of these.
+ */
+
+struct sctp_endpoint {
+ /* Common substructure for endpoint and association. */
+ struct sctp_ep_common base;
+
+ /* Fields to help us manage our entries in the hash tables. */
+ struct hlist_node node;
+ int hashent;
+
+ /* Associations: A list of current associations and mappings
+ * to the data consumers for each association. This
+ * may be in the form of a hash table or other
+ * implementation dependent structure. The data
+ * consumers may be process identification
+ * information such as file descriptors, named pipe
+ * pointer, or table pointers dependent on how SCTP
+ * is implemented.
+ */
+ /* This is really a list of struct sctp_association entries. */
+ struct list_head asocs;
+
+ /* Secret Key: A secret key used by this endpoint to compute
+ * the MAC. This SHOULD be a cryptographic quality
+ * random number with a sufficient length.
+ * Discussion in [RFC1750] can be helpful in
+ * selection of the key.
+ */
+ __u8 secret_key[SCTP_SECRET_SIZE];
+
+ /* digest: This is a digest of the sctp cookie. This field is
+ * only used on the receive path when we try to validate
+ * that the cookie has not been tampered with. We put
+ * this here so we pre-allocate this once and can re-use
+ * on every receive.
+ */
+ __u8 *digest;
+
+ /* sendbuf acct. policy. */
+ __u32 sndbuf_policy;
+
+ /* rcvbuf acct. policy. */
+ __u32 rcvbuf_policy;
+
+ /* SCTP AUTH: array of the HMACs that will be allocated
+ * we need this per association so that we don't serialize
+ */
+ struct crypto_shash **auth_hmacs;
+
+ /* SCTP-AUTH: hmacs for the endpoint encoded into parameter */
+ struct sctp_hmac_algo_param *auth_hmacs_list;
+
+ /* SCTP-AUTH: chunks to authenticate encoded into parameter */
+ struct sctp_chunks_param *auth_chunk_list;
+
+ /* SCTP-AUTH: endpoint shared keys */
+ struct list_head endpoint_shared_keys;
+ __u16 active_key_id;
+ __u8 ecn_enable:1,
+ auth_enable:1,
+ intl_enable:1,
+ prsctp_enable:1,
+ asconf_enable:1,
+ reconf_enable:1;
+
+ __u8 strreset_enable;
+ struct rcu_head rcu;
+};
+
+/* Recover the outter endpoint structure. */
+static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
+{
+ struct sctp_endpoint *ep;
+
+ ep = container_of(base, struct sctp_endpoint, base);
+ return ep;
+}
+
+/* These are function signatures for manipulating endpoints. */
+struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
+void sctp_endpoint_free(struct sctp_endpoint *);
+void sctp_endpoint_put(struct sctp_endpoint *);
+int sctp_endpoint_hold(struct sctp_endpoint *ep);
+void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *);
+struct sctp_association *sctp_endpoint_lookup_assoc(
+ const struct sctp_endpoint *ep,
+ const union sctp_addr *paddr,
+ struct sctp_transport **);
+bool sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
+ const union sctp_addr *paddr);
+struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *,
+ struct net *, const union sctp_addr *);
+bool sctp_has_association(struct net *net, const union sctp_addr *laddr,
+ const union sctp_addr *paddr);
+
+int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ enum sctp_cid cid, struct sctp_init_chunk *peer_init,
+ struct sctp_chunk *chunk, struct sctp_chunk **err_chunk);
+int sctp_process_init(struct sctp_association *, struct sctp_chunk *chunk,
+ const union sctp_addr *peer,
+ struct sctp_init_chunk *init, gfp_t gfp);
+__u32 sctp_generate_tag(const struct sctp_endpoint *);
+__u32 sctp_generate_tsn(const struct sctp_endpoint *);
+
+struct sctp_inithdr_host {
+ __u32 init_tag;
+ __u32 a_rwnd;
+ __u16 num_outbound_streams;
+ __u16 num_inbound_streams;
+ __u32 initial_tsn;
+};
+
+struct sctp_stream_priorities {
+ /* List of priorities scheduled */
+ struct list_head prio_sched;
+ /* List of streams scheduled */
+ struct list_head active;
+ /* The next stream in line */
+ struct sctp_stream_out_ext *next;
+ __u16 prio;
+ __u16 users;
+};
+
+struct sctp_stream_out_ext {
+ __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
+ __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+ struct list_head outq; /* chunks enqueued by this stream */
+ union {
+ struct {
+ /* Scheduled streams list */
+ struct list_head prio_list;
+ struct sctp_stream_priorities *prio_head;
+ };
+ /* Fields used by RR scheduler */
+ struct {
+ struct list_head rr_list;
+ };
+ };
+};
+
+struct sctp_stream_out {
+ union {
+ __u32 mid;
+ __u16 ssn;
+ };
+ __u32 mid_uo;
+ struct sctp_stream_out_ext *ext;
+ __u8 state;
+};
+
+struct sctp_stream_in {
+ union {
+ __u32 mid;
+ __u16 ssn;
+ };
+ __u32 mid_uo;
+ __u32 fsn;
+ __u32 fsn_uo;
+ char pd_mode;
+ char pd_mode_uo;
+};
+
+struct sctp_stream {
+ GENRADIX(struct sctp_stream_out) out;
+ GENRADIX(struct sctp_stream_in) in;
+
+ __u16 outcnt;
+ __u16 incnt;
+ /* Current stream being sent, if any */
+ struct sctp_stream_out *out_curr;
+ union {
+ /* Fields used by priority scheduler */
+ struct {
+ /* List of priorities scheduled */
+ struct list_head prio_list;
+ };
+ /* Fields used by RR scheduler */
+ struct {
+ /* List of streams scheduled */
+ struct list_head rr_list;
+ /* The next stream in line */
+ struct sctp_stream_out_ext *rr_next;
+ };
+ };
+ struct sctp_stream_interleave *si;
+};
+
+static inline struct sctp_stream_out *sctp_stream_out(
+ struct sctp_stream *stream,
+ __u16 sid)
+{
+ return genradix_ptr(&stream->out, sid);
+}
+
+static inline struct sctp_stream_in *sctp_stream_in(
+ struct sctp_stream *stream,
+ __u16 sid)
+{
+ return genradix_ptr(&stream->in, sid);
+}
+
+#define SCTP_SO(s, i) sctp_stream_out((s), (i))
+#define SCTP_SI(s, i) sctp_stream_in((s), (i))
+
+#define SCTP_STREAM_CLOSED 0x00
+#define SCTP_STREAM_OPEN 0x01
+
+static inline __u16 sctp_datachk_len(const struct sctp_stream *stream)
+{
+ return stream->si->data_chunk_len;
+}
+
+static inline __u16 sctp_datahdr_len(const struct sctp_stream *stream)
+{
+ return stream->si->data_chunk_len - sizeof(struct sctp_chunkhdr);
+}
+
+static inline __u16 sctp_ftsnchk_len(const struct sctp_stream *stream)
+{
+ return stream->si->ftsn_chunk_len;
+}
+
+static inline __u16 sctp_ftsnhdr_len(const struct sctp_stream *stream)
+{
+ return stream->si->ftsn_chunk_len - sizeof(struct sctp_chunkhdr);
+}
+
+/* SCTP_GET_ASSOC_STATS counters */
+struct sctp_priv_assoc_stats {
+ /* Maximum observed rto in the association during subsequent
+ * observations. Value is set to 0 if no RTO measurement took place
+ * The transport where the max_rto was observed is returned in
+ * obs_rto_ipaddr
+ */
+ struct sockaddr_storage obs_rto_ipaddr;
+ __u64 max_obs_rto;
+ /* Total In and Out SACKs received and sent */
+ __u64 isacks;
+ __u64 osacks;
+ /* Total In and Out packets received and sent */
+ __u64 opackets;
+ __u64 ipackets;
+ /* Total retransmitted chunks */
+ __u64 rtxchunks;
+ /* TSN received > next expected */
+ __u64 outofseqtsns;
+ /* Duplicate Chunks received */
+ __u64 idupchunks;
+ /* Gap Ack Blocks received */
+ __u64 gapcnt;
+ /* Unordered data chunks sent and received */
+ __u64 ouodchunks;
+ __u64 iuodchunks;
+ /* Ordered data chunks sent and received */
+ __u64 oodchunks;
+ __u64 iodchunks;
+ /* Control chunks sent and received */
+ __u64 octrlchunks;
+ __u64 ictrlchunks;
+};
+
+/* RFC2960
+ *
+ * 12. Recommended Transmission Control Block (TCB) Parameters
+ *
+ * This section details a recommended set of parameters that should
+ * be contained within the TCB for an implementation. This section is
+ * for illustrative purposes and should not be deemed as requirements
+ * on an implementation or as an exhaustive list of all parameters
+ * inside an SCTP TCB. Each implementation may need its own additional
+ * parameters for optimization.
+ */
+
+
+/* Here we have information about each individual association. */
+struct sctp_association {
+
+ /* A base structure common to endpoint and association.
+ * In this context, it represents the associations's view
+ * of the local endpoint of the association.
+ */
+ struct sctp_ep_common base;
+
+ /* Associations on the same socket. */
+ struct list_head asocs;
+
+ /* association id. */
+ sctp_assoc_t assoc_id;
+
+ /* This is our parent endpoint. */
+ struct sctp_endpoint *ep;
+
+ /* These are those association elements needed in the cookie. */
+ struct sctp_cookie c;
+
+ /* This is all information about our peer. */
+ struct {
+ /* transport_addr_list
+ *
+ * Peer : A list of SCTP transport addresses that the
+ * Transport : peer is bound to. This information is derived
+ * Address : from the INIT or INIT ACK and is used to
+ * List : associate an inbound packet with a given
+ * : association. Normally this information is
+ * : hashed or keyed for quick lookup and access
+ * : of the TCB.
+ * : The list is also initialized with the list
+ * : of addresses passed with the sctp_connectx()
+ * : call.
+ *
+ * It is a list of SCTP_transport's.
+ */
+ struct list_head transport_addr_list;
+
+ /* rwnd
+ *
+ * Peer Rwnd : Current calculated value of the peer's rwnd.
+ */
+ __u32 rwnd;
+
+ /* transport_count
+ *
+ * Peer : A count of the number of peer addresses
+ * Transport : in the Peer Transport Address List.
+ * Address :
+ * Count :
+ */
+ __u16 transport_count;
+
+ /* port
+ * The transport layer port number.
+ */
+ __u16 port;
+
+ /* primary_path
+ *
+ * Primary : This is the current primary destination
+ * Path : transport address of the peer endpoint. It
+ * : may also specify a source transport address
+ * : on this endpoint.
+ *
+ * All of these paths live on transport_addr_list.
+ *
+ * At the bakeoffs, we discovered that the intent of
+ * primaryPath is that it only changes when the ULP
+ * asks to have it changed. We add the activePath to
+ * designate the connection we are currently using to
+ * transmit new data and most control chunks.
+ */
+ struct sctp_transport *primary_path;
+
+ /* Cache the primary path address here, when we
+ * need a an address for msg_name.
+ */
+ union sctp_addr primary_addr;
+
+ /* active_path
+ * The path that we are currently using to
+ * transmit new data and most control chunks.
+ */
+ struct sctp_transport *active_path;
+
+ /* retran_path
+ *
+ * RFC2960 6.4 Multi-homed SCTP Endpoints
+ * ...
+ * Furthermore, when its peer is multi-homed, an
+ * endpoint SHOULD try to retransmit a chunk to an
+ * active destination transport address that is
+ * different from the last destination address to
+ * which the DATA chunk was sent.
+ */
+ struct sctp_transport *retran_path;
+
+ /* Pointer to last transport I have sent on. */
+ struct sctp_transport *last_sent_to;
+
+ /* This is the last transport I have received DATA on. */
+ struct sctp_transport *last_data_from;
+
+ /*
+ * Mapping An array of bits or bytes indicating which out of
+ * Array order TSN's have been received (relative to the
+ * Last Rcvd TSN). If no gaps exist, i.e. no out of
+ * order packets have been received, this array
+ * will be set to all zero. This structure may be
+ * in the form of a circular buffer or bit array.
+ *
+ * Last Rcvd : This is the last TSN received in
+ * TSN : sequence. This value is set initially by
+ * : taking the peer's Initial TSN, received in
+ * : the INIT or INIT ACK chunk, and subtracting
+ * : one from it.
+ *
+ * Throughout most of the specification this is called the
+ * "Cumulative TSN ACK Point". In this case, we
+ * ignore the advice in 12.2 in favour of the term
+ * used in the bulk of the text. This value is hidden
+ * in tsn_map--we get it by calling sctp_tsnmap_get_ctsn().
+ */
+ struct sctp_tsnmap tsn_map;
+
+ /* This mask is used to disable sending the ASCONF chunk
+ * with specified parameter to peer.
+ */
+ __be16 addip_disabled_mask;
+
+ /* These are capabilities which our peer advertised. */
+ __u16 ecn_capable:1, /* Can peer do ECN? */
+ ipv4_address:1, /* Peer understands IPv4 addresses? */
+ ipv6_address:1, /* Peer understands IPv6 addresses? */
+ hostname_address:1, /* Peer understands DNS addresses? */
+ asconf_capable:1, /* Does peer support ADDIP? */
+ prsctp_capable:1, /* Can peer do PR-SCTP? */
+ reconf_capable:1, /* Can peer do RE-CONFIG? */
+ intl_capable:1, /* Can peer do INTERLEAVE */
+ auth_capable:1, /* Is peer doing SCTP-AUTH? */
+ /* sack_needed:
+ * This flag indicates if the next received
+ * packet is to be responded to with a
+ * SACK. This is initialized to 0. When a packet
+ * is received sack_cnt is incremented. If this value
+ * reaches 2 or more, a SACK is sent and the
+ * value is reset to 0. Note: This is used only
+ * when no DATA chunks are received out of
+ * order. When DATA chunks are out of order,
+ * SACK's are not delayed (see Section 6).
+ */
+ sack_needed:1, /* Do we need to sack the peer? */
+ sack_generation:1,
+ zero_window_announced:1;
+
+ __u32 sack_cnt;
+
+ __u32 adaptation_ind; /* Adaptation Code point. */
+
+ struct sctp_inithdr_host i;
+ void *cookie;
+ int cookie_len;
+
+ /* ADDIP Section 4.2 Upon reception of an ASCONF Chunk.
+ * C1) ... "Peer-Serial-Number'. This value MUST be initialized to the
+ * Initial TSN Value minus 1
+ */
+ __u32 addip_serial;
+
+ /* SCTP-AUTH: We need to know pears random number, hmac list
+ * and authenticated chunk list. All that is part of the
+ * cookie and these are just pointers to those locations
+ */
+ struct sctp_random_param *peer_random;
+ struct sctp_chunks_param *peer_chunks;
+ struct sctp_hmac_algo_param *peer_hmacs;
+ } peer;
+
+ /* State : A state variable indicating what state the
+ * : association is in, i.e. COOKIE-WAIT,
+ * : COOKIE-ECHOED, ESTABLISHED, SHUTDOWN-PENDING,
+ * : SHUTDOWN-SENT, SHUTDOWN-RECEIVED, SHUTDOWN-ACK-SENT.
+ *
+ * Note: No "CLOSED" state is illustrated since if a
+ * association is "CLOSED" its TCB SHOULD be removed.
+ *
+ * In this implementation we DO have a CLOSED
+ * state which is used during initiation and shutdown.
+ *
+ * State takes values from SCTP_STATE_*.
+ */
+ enum sctp_state state;
+
+ /* Overall : The overall association error count.
+ * Error Count : [Clear this any time I get something.]
+ */
+ int overall_error_count;
+
+ /* The cookie life I award for any cookie. */
+ ktime_t cookie_life;
+
+ /* These are the association's initial, max, and min RTO values.
+ * These values will be initialized by system defaults, but can
+ * be modified via the SCTP_RTOINFO socket option.
+ */
+ unsigned long rto_initial;
+ unsigned long rto_max;
+ unsigned long rto_min;
+
+ /* Maximum number of new data packets that can be sent in a burst. */
+ int max_burst;
+
+ /* This is the max_retrans value for the association. This value will
+ * be initialized from system defaults, but can be
+ * modified by the SCTP_ASSOCINFO socket option.
+ */
+ int max_retrans;
+
+ /* This is the partially failed retrans value for the transport
+ * and will be initialized from the assocs value. This can be
+ * changed using the SCTP_PEER_ADDR_THLDS socket option
+ */
+ __u16 pf_retrans;
+ /* Used for primary path switchover. */
+ __u16 ps_retrans;
+
+ /* Maximum number of times the endpoint will retransmit INIT */
+ __u16 max_init_attempts;
+
+ /* How many times have we resent an INIT? */
+ __u16 init_retries;
+
+ /* The largest timeout or RTO value to use in attempting an INIT */
+ unsigned long max_init_timeo;
+
+ /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to
+ * the destination address every heartbeat interval. This value
+ * will be inherited by all new transports.
+ */
+ unsigned long hbinterval;
+ unsigned long probe_interval;
+
+ __be16 encap_port;
+
+ /* This is the max_retrans value for new transports in the
+ * association.
+ */
+ __u16 pathmaxrxt;
+
+ __u32 flowlabel;
+ __u8 dscp;
+
+ /* Flag that path mtu update is pending */
+ __u8 pmtu_pending;
+
+ /* Association : The smallest PMTU discovered for all of the
+ * PMTU : peer's transport addresses.
+ */
+ __u32 pathmtu;
+
+ /* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */
+ __u32 param_flags;
+
+ __u32 sackfreq;
+ /* SACK delay timeout */
+ unsigned long sackdelay;
+
+ unsigned long timeouts[SCTP_NUM_TIMEOUT_TYPES];
+ struct timer_list timers[SCTP_NUM_TIMEOUT_TYPES];
+
+ /* Transport to which SHUTDOWN chunk was last sent. */
+ struct sctp_transport *shutdown_last_sent_to;
+
+ /* Transport to which INIT chunk was last sent. */
+ struct sctp_transport *init_last_sent_to;
+
+ /* How many times have we resent a SHUTDOWN */
+ int shutdown_retries;
+
+ /* Next TSN : The next TSN number to be assigned to a new
+ * : DATA chunk. This is sent in the INIT or INIT
+ * : ACK chunk to the peer and incremented each
+ * : time a DATA chunk is assigned a TSN
+ * : (normally just prior to transmit or during
+ * : fragmentation).
+ */
+ __u32 next_tsn;
+
+ /*
+ * Last Rcvd : This is the last TSN received in sequence. This value
+ * TSN : is set initially by taking the peer's Initial TSN,
+ * : received in the INIT or INIT ACK chunk, and
+ * : subtracting one from it.
+ *
+ * Most of RFC 2960 refers to this as the Cumulative TSN Ack Point.
+ */
+
+ __u32 ctsn_ack_point;
+
+ /* PR-SCTP Advanced.Peer.Ack.Point */
+ __u32 adv_peer_ack_point;
+
+ /* Highest TSN that is acknowledged by incoming SACKs. */
+ __u32 highest_sacked;
+
+ /* TSN marking the fast recovery exit point */
+ __u32 fast_recovery_exit;
+
+ /* Flag to track the current fast recovery state */
+ __u8 fast_recovery;
+
+ /* The number of unacknowledged data chunks. Reported through
+ * the SCTP_STATUS sockopt.
+ */
+ __u16 unack_data;
+
+ /* The total number of data chunks that we've had to retransmit
+ * as the result of a T3 timer expiration
+ */
+ __u32 rtx_data_chunks;
+
+ /* This is the association's receive buffer space. This value is used
+ * to set a_rwnd field in an INIT or a SACK chunk.
+ */
+ __u32 rwnd;
+
+ /* This is the last advertised value of rwnd over a SACK chunk. */
+ __u32 a_rwnd;
+
+ /* Number of bytes by which the rwnd has slopped. The rwnd is allowed
+ * to slop over a maximum of the association's frag_point.
+ */
+ __u32 rwnd_over;
+
+ /* Keeps treack of rwnd pressure. This happens when we have
+ * a window, but not recevie buffer (i.e small packets). This one
+ * is releases slowly (1 PMTU at a time ).
+ */
+ __u32 rwnd_press;
+
+ /* This is the sndbuf size in use for the association.
+ * This corresponds to the sndbuf size for the association,
+ * as specified in the sk->sndbuf.
+ */
+ int sndbuf_used;
+
+ /* This is the amount of memory that this association has allocated
+ * in the receive path at any given time.
+ */
+ atomic_t rmem_alloc;
+
+ /* This is the wait queue head for send requests waiting on
+ * the association sndbuf space.
+ */
+ wait_queue_head_t wait;
+
+ /* The message size at which SCTP fragmentation will occur. */
+ __u32 frag_point;
+ __u32 user_frag;
+
+ /* Counter used to count INIT errors. */
+ int init_err_counter;
+
+ /* Count the number of INIT cycles (for doubling timeout). */
+ int init_cycle;
+
+ /* Default send parameters. */
+ __u16 default_stream;
+ __u16 default_flags;
+ __u32 default_ppid;
+ __u32 default_context;
+ __u32 default_timetolive;
+
+ /* Default receive parameters */
+ __u32 default_rcv_context;
+
+ /* Stream arrays */
+ struct sctp_stream stream;
+
+ /* All outbound chunks go through this structure. */
+ struct sctp_outq outqueue;
+
+ /* A smart pipe that will handle reordering and fragmentation,
+ * as well as handle passing events up to the ULP.
+ */
+ struct sctp_ulpq ulpq;
+
+ /* Last TSN that caused an ECNE Chunk to be sent. */
+ __u32 last_ecne_tsn;
+
+ /* Last TSN that caused a CWR Chunk to be sent. */
+ __u32 last_cwr_tsn;
+
+ /* How many duplicated TSNs have we seen? */
+ int numduptsns;
+
+ /* These are to support
+ * "SCTP Extensions for Dynamic Reconfiguration of IP Addresses
+ * and Enforcement of Flow and Message Limits"
+ * <draft-ietf-tsvwg-addip-sctp-02.txt>
+ * or "ADDIP" for short.
+ */
+
+
+
+ /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
+ *
+ * R1) One and only one ASCONF Chunk MAY be in transit and
+ * unacknowledged at any one time. If a sender, after sending
+ * an ASCONF chunk, decides it needs to transfer another
+ * ASCONF Chunk, it MUST wait until the ASCONF-ACK Chunk
+ * returns from the previous ASCONF Chunk before sending a
+ * subsequent ASCONF. Note this restriction binds each side,
+ * so at any time two ASCONF may be in-transit on any given
+ * association (one sent from each endpoint).
+ *
+ * [This is our one-and-only-one ASCONF in flight. If we do
+ * not have an ASCONF in flight, this is NULL.]
+ */
+ struct sctp_chunk *addip_last_asconf;
+
+ /* ADDIP Section 5.2 Upon reception of an ASCONF Chunk.
+ *
+ * This is needed to implement itmes E1 - E4 of the updated
+ * spec. Here is the justification:
+ *
+ * Since the peer may bundle multiple ASCONF chunks toward us,
+ * we now need the ability to cache multiple ACKs. The section
+ * describes in detail how they are cached and cleaned up.
+ */
+ struct list_head asconf_ack_list;
+
+ /* These ASCONF chunks are waiting to be sent.
+ *
+ * These chunaks can't be pushed to outqueue until receiving
+ * ASCONF_ACK for the previous ASCONF indicated by
+ * addip_last_asconf, so as to guarantee that only one ASCONF
+ * is in flight at any time.
+ *
+ * ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
+ *
+ * In defining the ASCONF Chunk transfer procedures, it is
+ * essential that these transfers MUST NOT cause congestion
+ * within the network. To achieve this, we place these
+ * restrictions on the transfer of ASCONF Chunks:
+ *
+ * R1) One and only one ASCONF Chunk MAY be in transit and
+ * unacknowledged at any one time. If a sender, after sending
+ * an ASCONF chunk, decides it needs to transfer another
+ * ASCONF Chunk, it MUST wait until the ASCONF-ACK Chunk
+ * returns from the previous ASCONF Chunk before sending a
+ * subsequent ASCONF. Note this restriction binds each side,
+ * so at any time two ASCONF may be in-transit on any given
+ * association (one sent from each endpoint).
+ *
+ *
+ * [I really think this is EXACTLY the sort of intelligence
+ * which already resides in sctp_outq. Please move this
+ * queue and its supporting logic down there. --piggy]
+ */
+ struct list_head addip_chunk_list;
+
+ /* ADDIP Section 4.1 ASCONF Chunk Procedures
+ *
+ * A2) A serial number should be assigned to the Chunk. The
+ * serial number SHOULD be a monotonically increasing
+ * number. The serial number SHOULD be initialized at
+ * the start of the association to the same value as the
+ * Initial TSN and every time a new ASCONF chunk is created
+ * it is incremented by one after assigning the serial number
+ * to the newly created chunk.
+ *
+ * ADDIP
+ * 3.1.1 Address/Stream Configuration Change Chunk (ASCONF)
+ *
+ * Serial Number : 32 bits (unsigned integer)
+ *
+ * This value represents a Serial Number for the ASCONF
+ * Chunk. The valid range of Serial Number is from 0 to
+ * 4294967295 (2^32 - 1). Serial Numbers wrap back to 0
+ * after reaching 4294967295.
+ */
+ __u32 addip_serial;
+ int src_out_of_asoc_ok;
+ union sctp_addr *asconf_addr_del_pending;
+ struct sctp_transport *new_transport;
+
+ /* SCTP AUTH: list of the endpoint shared keys. These
+ * keys are provided out of band by the user applicaton
+ * and can't change during the lifetime of the association
+ */
+ struct list_head endpoint_shared_keys;
+
+ /* SCTP AUTH:
+ * The current generated assocaition shared key (secret)
+ */
+ struct sctp_auth_bytes *asoc_shared_key;
+ struct sctp_shared_key *shkey;
+
+ /* SCTP AUTH: hmac id of the first peer requested algorithm
+ * that we support.
+ */
+ __u16 default_hmac_id;
+
+ __u16 active_key_id;
+
+ __u8 need_ecne:1, /* Need to send an ECNE Chunk? */
+ temp:1, /* Is it a temporary association? */
+ pf_expose:2, /* Expose pf state? */
+ force_delay:1;
+
+ __u8 strreset_enable;
+ __u8 strreset_outstanding; /* request param count on the fly */
+
+ __u32 strreset_outseq; /* Update after receiving response */
+ __u32 strreset_inseq; /* Update after receiving request */
+ __u32 strreset_result[2]; /* save the results of last 2 responses */
+
+ struct sctp_chunk *strreset_chunk; /* save request chunk */
+
+ struct sctp_priv_assoc_stats stats;
+
+ int sent_cnt_removable;
+
+ __u16 subscribe;
+
+ __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
+ __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+
+ /* Security identifiers from incoming (INIT). These are set by
+ * security_sctp_assoc_request(). These will only be used by
+ * SCTP TCP type sockets and peeled off connections as they
+ * cause a new socket to be generated. security_sctp_sk_clone()
+ * will then plug these into the new socket.
+ */
+
+ u32 secid;
+ u32 peer_secid;
+
+ struct rcu_head rcu;
+};
+
+
+/* An eyecatcher for determining if we are really looking at an
+ * association data structure.
+ */
+enum {
+ SCTP_ASSOC_EYECATCHER = 0xa550c123,
+};
+
+/* Recover the outter association structure. */
+static inline struct sctp_association *sctp_assoc(struct sctp_ep_common *base)
+{
+ struct sctp_association *asoc;
+
+ asoc = container_of(base, struct sctp_association, base);
+ return asoc;
+}
+
+/* These are function signatures for manipulating associations. */
+
+
+struct sctp_association *
+sctp_association_new(const struct sctp_endpoint *ep, const struct sock *sk,
+ enum sctp_scope scope, gfp_t gfp);
+void sctp_association_free(struct sctp_association *);
+void sctp_association_put(struct sctp_association *);
+void sctp_association_hold(struct sctp_association *);
+
+struct sctp_transport *sctp_assoc_choose_alter_transport(
+ struct sctp_association *, struct sctp_transport *);
+void sctp_assoc_update_retran_path(struct sctp_association *);
+struct sctp_transport *sctp_assoc_lookup_paddr(const struct sctp_association *,
+ const union sctp_addr *);
+int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
+ const union sctp_addr *laddr);
+struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *,
+ const union sctp_addr *address,
+ const gfp_t gfp,
+ const int peer_state);
+void sctp_assoc_del_peer(struct sctp_association *asoc,
+ const union sctp_addr *addr);
+void sctp_assoc_rm_peer(struct sctp_association *asoc,
+ struct sctp_transport *peer);
+void sctp_assoc_control_transport(struct sctp_association *asoc,
+ struct sctp_transport *transport,
+ enum sctp_transport_cmd command,
+ sctp_sn_error_t error);
+struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *, __u32);
+void sctp_assoc_migrate(struct sctp_association *, struct sock *);
+int sctp_assoc_update(struct sctp_association *old,
+ struct sctp_association *new);
+
+__u32 sctp_association_get_next_tsn(struct sctp_association *);
+
+void sctp_assoc_update_frag_point(struct sctp_association *asoc);
+void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu);
+void sctp_assoc_sync_pmtu(struct sctp_association *asoc);
+void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
+void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
+void sctp_assoc_set_primary(struct sctp_association *,
+ struct sctp_transport *);
+void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
+ struct sctp_transport *);
+int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
+ enum sctp_scope scope, gfp_t gfp);
+int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *,
+ struct sctp_cookie*,
+ gfp_t gfp);
+int sctp_assoc_set_id(struct sctp_association *, gfp_t);
+void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc);
+struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
+ const struct sctp_association *asoc,
+ __be32 serial);
+void sctp_asconf_queue_teardown(struct sctp_association *asoc);
+
+int sctp_cmp_addr_exact(const union sctp_addr *ss1,
+ const union sctp_addr *ss2);
+struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc);
+
+/* A convenience structure to parse out SCTP specific CMSGs. */
+struct sctp_cmsgs {
+ struct sctp_initmsg *init;
+ struct sctp_sndrcvinfo *srinfo;
+ struct sctp_sndinfo *sinfo;
+ struct sctp_prinfo *prinfo;
+ struct sctp_authinfo *authinfo;
+ struct msghdr *addrs_msg;
+};
+
+/* Structure for tracking memory objects */
+struct sctp_dbg_objcnt_entry {
+ char *label;
+ atomic_t *counter;
+};
+
+#endif /* __sctp_structs_h__ */
diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h
new file mode 100644
index 000000000..616ae0d49
--- /dev/null
+++ b/include/net/sctp/tsnmap.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These are the definitions needed for the tsnmap type. The tsnmap is used
+ * to track out of order TSNs received.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+#include <net/sctp/constants.h>
+
+#ifndef __sctp_tsnmap_h__
+#define __sctp_tsnmap_h__
+
+/* RFC 2960 12.2 Parameters necessary per association (i.e. the TCB)
+ * Mapping An array of bits or bytes indicating which out of
+ * Array order TSN's have been received (relative to the
+ * Last Rcvd TSN). If no gaps exist, i.e. no out of
+ * order packets have been received, this array
+ * will be set to all zero. This structure may be
+ * in the form of a circular buffer or bit array.
+ */
+struct sctp_tsnmap {
+ /* This array counts the number of chunks with each TSN.
+ * It points at one of the two buffers with which we will
+ * ping-pong between.
+ */
+ unsigned long *tsn_map;
+
+ /* This is the TSN at tsn_map[0]. */
+ __u32 base_tsn;
+
+ /* Last Rcvd : This is the last TSN received in
+ * TSN : sequence. This value is set initially by
+ * : taking the peer's Initial TSN, received in
+ * : the INIT or INIT ACK chunk, and subtracting
+ * : one from it.
+ *
+ * Throughout most of the specification this is called the
+ * "Cumulative TSN ACK Point". In this case, we
+ * ignore the advice in 12.2 in favour of the term
+ * used in the bulk of the text.
+ */
+ __u32 cumulative_tsn_ack_point;
+
+ /* This is the highest TSN we've marked. */
+ __u32 max_tsn_seen;
+
+ /* This is the minimum number of TSNs we can track. This corresponds
+ * to the size of tsn_map. Note: the overflow_map allows us to
+ * potentially track more than this quantity.
+ */
+ __u16 len;
+
+ /* Data chunks pending receipt. used by SCTP_STATUS sockopt */
+ __u16 pending_data;
+
+ /* Record duplicate TSNs here. We clear this after
+ * every SACK. Store up to SCTP_MAX_DUP_TSNS worth of
+ * information.
+ */
+ __u16 num_dup_tsns;
+ __be32 dup_tsns[SCTP_MAX_DUP_TSNS];
+};
+
+struct sctp_tsnmap_iter {
+ __u32 start;
+};
+
+/* Initialize a block of memory as a tsnmap. */
+struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *, __u16 len,
+ __u32 initial_tsn, gfp_t gfp);
+
+void sctp_tsnmap_free(struct sctp_tsnmap *map);
+
+/* Test the tracking state of this TSN.
+ * Returns:
+ * 0 if the TSN has not yet been seen
+ * >0 if the TSN has been seen (duplicate)
+ * <0 if the TSN is invalid (too large to track)
+ */
+int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
+
+/* Mark this TSN as seen. */
+int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn,
+ struct sctp_transport *trans);
+
+/* Mark this TSN and all lower as seen. */
+void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);
+
+/* Retrieve the Cumulative TSN ACK Point. */
+static inline __u32 sctp_tsnmap_get_ctsn(const struct sctp_tsnmap *map)
+{
+ return map->cumulative_tsn_ack_point;
+}
+
+/* Retrieve the highest TSN we've seen. */
+static inline __u32 sctp_tsnmap_get_max_tsn_seen(const struct sctp_tsnmap *map)
+{
+ return map->max_tsn_seen;
+}
+
+/* How many duplicate TSNs are stored? */
+static inline __u16 sctp_tsnmap_num_dups(struct sctp_tsnmap *map)
+{
+ return map->num_dup_tsns;
+}
+
+/* Return pointer to duplicate tsn array as needed by SACK. */
+static inline __be32 *sctp_tsnmap_get_dups(struct sctp_tsnmap *map)
+{
+ map->num_dup_tsns = 0;
+ return map->dup_tsns;
+}
+
+/* How many gap ack blocks do we have recorded? */
+__u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
+ struct sctp_gap_ack_block *gabs);
+
+/* Refresh the count on pending data. */
+__u16 sctp_tsnmap_pending(struct sctp_tsnmap *map);
+
+/* Is there a gap in the TSN map? */
+static inline int sctp_tsnmap_has_gap(const struct sctp_tsnmap *map)
+{
+ return map->cumulative_tsn_ack_point != map->max_tsn_seen;
+}
+
+/* Mark a duplicate TSN. Note: limit the storage of duplicate TSN
+ * information.
+ */
+static inline void sctp_tsnmap_mark_dup(struct sctp_tsnmap *map, __u32 tsn)
+{
+ if (map->num_dup_tsns < SCTP_MAX_DUP_TSNS)
+ map->dup_tsns[map->num_dup_tsns++] = htonl(tsn);
+}
+
+/* Renege a TSN that was seen. */
+void sctp_tsnmap_renege(struct sctp_tsnmap *, __u32 tsn);
+
+/* Is there a gap in the TSN map? */
+int sctp_tsnmap_has_gap(const struct sctp_tsnmap *);
+
+#endif /* __sctp_tsnmap_h__ */
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
new file mode 100644
index 000000000..994e984ee
--- /dev/null
+++ b/include/net/sctp/ulpevent.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ * Copyright (c) 2001 Nokia, Inc.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ *
+ * These are the definitions needed for the sctp_ulpevent type. The
+ * sctp_ulpevent type is used to carry information from the state machine
+ * upwards to the ULP.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+
+#ifndef __sctp_ulpevent_h__
+#define __sctp_ulpevent_h__
+
+/* A structure to carry information to the ULP (e.g. Sockets API) */
+/* Warning: This sits inside an skb.cb[] area. Be very careful of
+ * growing this structure as it is at the maximum limit now.
+ *
+ * sctp_ulpevent is saved in sk->cb(48 bytes), whose last 4 bytes
+ * have been taken by sock_skb_cb, So here it has to use 'packed'
+ * to make sctp_ulpevent fit into the rest 44 bytes.
+ */
+struct sctp_ulpevent {
+ struct sctp_association *asoc;
+ struct sctp_chunk *chunk;
+ unsigned int rmem_len;
+ union {
+ __u32 mid;
+ __u16 ssn;
+ };
+ union {
+ __u32 ppid;
+ __u32 fsn;
+ };
+ __u32 tsn;
+ __u32 cumtsn;
+ __u16 stream;
+ __u16 flags;
+ __u16 msg_flags;
+} __packed;
+
+/* Retrieve the skb this event sits inside of. */
+static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev)
+{
+ return container_of((void *)ev, struct sk_buff, cb);
+}
+
+/* Retrieve & cast the event sitting inside the skb. */
+static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
+{
+ return (struct sctp_ulpevent *)skb->cb;
+}
+
+void sctp_ulpevent_free(struct sctp_ulpevent *);
+int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
+unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list);
+
+struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
+ const struct sctp_association *asoc,
+ __u16 flags,
+ __u16 state,
+ __u16 error,
+ __u16 outbound,
+ __u16 inbound,
+ struct sctp_chunk *chunk,
+ gfp_t gfp);
+
+void sctp_ulpevent_notify_peer_addr_change(struct sctp_transport *transport,
+ int state, int error);
+
+struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ __u16 flags,
+ gfp_t gfp);
+struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ __u16 flags,
+ __u32 error,
+ gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_send_failed_event(
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ __u16 flags,
+ __u32 error,
+ gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
+ const struct sctp_association *asoc,
+ __u16 flags,
+ gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
+ const struct sctp_association *asoc,
+ __u32 indication, __u32 sid, __u32 seq,
+ __u32 flags, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication(
+ const struct sctp_association *asoc, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_authkey(
+ const struct sctp_association *asoc, __u16 key_id,
+ __u32 indication, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
+ const struct sctp_association *asoc, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
+ const struct sctp_association *asoc, __u16 flags,
+ __u16 stream_num, __be16 *stream_list, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
+ const struct sctp_association *asoc, __u16 flags,
+ __u32 local_tsn, __u32 remote_tsn, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
+ const struct sctp_association *asoc, __u16 flags,
+ __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_make_reassembled_event(
+ struct net *net, struct sk_buff_head *queue,
+ struct sk_buff *f_frag, struct sk_buff *l_frag);
+
+void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
+ struct msghdr *);
+void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
+ struct msghdr *);
+void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
+ struct msghdr *, struct sock *sk);
+
+__u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
+
+static inline void sctp_ulpevent_type_set(__u16 *subscribe,
+ __u16 sn_type, __u8 on)
+{
+ if (sn_type > SCTP_SN_TYPE_MAX)
+ return;
+
+ if (on)
+ *subscribe |= (1 << (sn_type - SCTP_SN_TYPE_BASE));
+ else
+ *subscribe &= ~(1 << (sn_type - SCTP_SN_TYPE_BASE));
+}
+
+/* Is this event type enabled? */
+static inline bool sctp_ulpevent_type_enabled(__u16 subscribe, __u16 sn_type)
+{
+ if (sn_type > SCTP_SN_TYPE_MAX)
+ return false;
+
+ return subscribe & (1 << (sn_type - SCTP_SN_TYPE_BASE));
+}
+
+/* Given an event subscription, is this event enabled? */
+static inline bool sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
+ __u16 subscribe)
+{
+ __u16 sn_type;
+
+ if (!sctp_ulpevent_is_notification(event))
+ return true;
+
+ sn_type = sctp_ulpevent_get_notification_type(event);
+
+ return sctp_ulpevent_type_enabled(subscribe, sn_type);
+}
+
+#endif /* __sctp_ulpevent_h__ */
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
new file mode 100644
index 000000000..0eaf8650e
--- /dev/null
+++ b/include/net/sctp/ulpqueue.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ * Copyright (c) 2001 Nokia, Inc.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ *
+ * These are the definitions needed for the sctp_ulpq type. The
+ * sctp_ulpq is the interface between the Upper Layer Protocol, or ULP,
+ * and the core SCTP state machine. This is the component which handles
+ * reassembly and ordering.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+
+#ifndef __sctp_ulpqueue_h__
+#define __sctp_ulpqueue_h__
+
+/* A structure to carry information to the ULP (e.g. Sockets API) */
+struct sctp_ulpq {
+ char pd_mode;
+ struct sctp_association *asoc;
+ struct sk_buff_head reasm;
+ struct sk_buff_head reasm_uo;
+ struct sk_buff_head lobby;
+};
+
+/* Prototypes. */
+struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *,
+ struct sctp_association *);
+void sctp_ulpq_flush(struct sctp_ulpq *ulpq);
+void sctp_ulpq_free(struct sctp_ulpq *);
+
+/* Add a new DATA chunk for processing. */
+int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
+
+/* Add a new event for propagation to the ULP. */
+int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sk_buff_head *skb_list);
+
+/* Renege previously received chunks. */
+void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
+
+/* Perform partial delivery. */
+void sctp_ulpq_partial_delivery(struct sctp_ulpq *, gfp_t);
+
+/* Abort the partial delivery. */
+void sctp_ulpq_abort_pd(struct sctp_ulpq *, gfp_t);
+
+/* Clear the partial data delivery condition on this socket. */
+int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
+
+/* Skip over an SSN. */
+void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
+
+void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
+
+__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
+ struct sk_buff_head *list, __u16 needed);
+
+#endif /* __sctp_ulpqueue_h__ */
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
new file mode 100644
index 000000000..21e7fa2a1
--- /dev/null
+++ b/include/net/secure_seq.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_SECURE_SEQ
+#define _NET_SECURE_SEQ
+
+#include <linux/types.h>
+
+struct net;
+
+u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+ __be16 dport);
+u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport);
+u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr);
+u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
+ __be16 sport, __be16 dport);
+u32 secure_tcpv6_ts_off(const struct net *net,
+ const __be32 *saddr, const __be32 *daddr);
+u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport);
+u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+ __be16 sport, __be16 dport);
+
+#endif /* _NET_SECURE_SEQ */
diff --git a/include/net/seg6.h b/include/net/seg6.h
new file mode 100644
index 000000000..af668f17b
--- /dev/null
+++ b/include/net/seg6.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SR-IPv6 implementation
+ *
+ * Author:
+ * David Lebrun <david.lebrun@uclouvain.be>
+ */
+
+#ifndef _NET_SEG6_H
+#define _NET_SEG6_H
+
+#include <linux/net.h>
+#include <linux/ipv6.h>
+#include <linux/seg6.h>
+#include <linux/rhashtable-types.h>
+
+static inline void update_csum_diff4(struct sk_buff *skb, __be32 from,
+ __be32 to)
+{
+ __be32 diff[] = { ~from, to };
+
+ skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum);
+}
+
+static inline void update_csum_diff16(struct sk_buff *skb, __be32 *from,
+ __be32 *to)
+{
+ __be32 diff[] = {
+ ~from[0], ~from[1], ~from[2], ~from[3],
+ to[0], to[1], to[2], to[3],
+ };
+
+ skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum);
+}
+
+struct seg6_pernet_data {
+ struct mutex lock;
+ struct in6_addr __rcu *tun_src;
+#ifdef CONFIG_IPV6_SEG6_HMAC
+ struct rhashtable hmac_infos;
+#endif
+};
+
+static inline struct seg6_pernet_data *seg6_pernet(struct net *net)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ return net->ipv6.seg6_data;
+#else
+ return NULL;
+#endif
+}
+
+extern int seg6_init(void);
+extern void seg6_exit(void);
+extern int seg6_iptunnel_init(void);
+extern void seg6_iptunnel_exit(void);
+extern int seg6_local_init(void);
+extern void seg6_local_exit(void);
+
+extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced);
+extern struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags);
+extern void seg6_icmp_srh(struct sk_buff *skb, struct inet6_skb_parm *opt);
+extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
+ int proto);
+extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh);
+extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
+ u32 tbl_id);
+
+/* If the packet which invoked an ICMP error contains an SRH return
+ * the true destination address from within the SRH, otherwise use the
+ * destination address in the IP header.
+ */
+static inline const struct in6_addr *seg6_get_daddr(struct sk_buff *skb,
+ struct inet6_skb_parm *opt)
+{
+ struct ipv6_sr_hdr *srh;
+
+ if (opt->flags & IP6SKB_SEG6) {
+ srh = (struct ipv6_sr_hdr *)(skb->data + opt->srhoff);
+ return &srh->segments[0];
+ }
+
+ return NULL;
+}
+
+
+#endif
diff --git a/include/net/seg6_hmac.h b/include/net/seg6_hmac.h
new file mode 100644
index 000000000..2b5d2ee56
--- /dev/null
+++ b/include/net/seg6_hmac.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SR-IPv6 implementation
+ *
+ * Author:
+ * David Lebrun <david.lebrun@uclouvain.be>
+ */
+
+#ifndef _NET_SEG6_HMAC_H
+#define _NET_SEG6_HMAC_H
+
+#include <net/flow.h>
+#include <net/ip6_fib.h>
+#include <net/sock.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/route.h>
+#include <net/seg6.h>
+#include <linux/seg6_hmac.h>
+#include <linux/rhashtable-types.h>
+
+#define SEG6_HMAC_MAX_DIGESTSIZE 160
+#define SEG6_HMAC_RING_SIZE 256
+
+struct seg6_hmac_info {
+ struct rhash_head node;
+ struct rcu_head rcu;
+
+ u32 hmackeyid;
+ char secret[SEG6_HMAC_SECRET_LEN];
+ u8 slen;
+ u8 alg_id;
+};
+
+struct seg6_hmac_algo {
+ u8 alg_id;
+ char name[64];
+ struct crypto_shash * __percpu *tfms;
+ struct shash_desc * __percpu *shashs;
+};
+
+extern int seg6_hmac_compute(struct seg6_hmac_info *hinfo,
+ struct ipv6_sr_hdr *hdr, struct in6_addr *saddr,
+ u8 *output);
+extern struct seg6_hmac_info *seg6_hmac_info_lookup(struct net *net, u32 key);
+extern int seg6_hmac_info_add(struct net *net, u32 key,
+ struct seg6_hmac_info *hinfo);
+extern int seg6_hmac_info_del(struct net *net, u32 key);
+extern int seg6_push_hmac(struct net *net, struct in6_addr *saddr,
+ struct ipv6_sr_hdr *srh);
+extern bool seg6_hmac_validate_skb(struct sk_buff *skb);
+extern int seg6_hmac_init(void);
+extern void seg6_hmac_exit(void);
+extern int seg6_hmac_net_init(struct net *net);
+extern void seg6_hmac_net_exit(struct net *net);
+
+#endif
diff --git a/include/net/seg6_local.h b/include/net/seg6_local.h
new file mode 100644
index 000000000..3fab9dec2
--- /dev/null
+++ b/include/net/seg6_local.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SR-IPv6 implementation
+ *
+ * Authors:
+ * David Lebrun <david.lebrun@uclouvain.be>
+ * eBPF support: Mathieu Xhonneux <m.xhonneux@gmail.com>
+ */
+
+#ifndef _NET_SEG6_LOCAL_H
+#define _NET_SEG6_LOCAL_H
+
+#include <linux/percpu.h>
+#include <linux/net.h>
+#include <linux/ipv6.h>
+
+extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
+ u32 tbl_id);
+extern bool seg6_bpf_has_valid_srh(struct sk_buff *skb);
+
+struct seg6_bpf_srh_state {
+ struct ipv6_sr_hdr *srh;
+ u16 hdrlen;
+ bool valid;
+};
+
+DECLARE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
+
+#endif
diff --git a/include/net/selftests.h b/include/net/selftests.h
new file mode 100644
index 000000000..e65e8d230
--- /dev/null
+++ b/include/net/selftests.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_SELFTESTS
+#define _NET_SELFTESTS
+
+#include <linux/ethtool.h>
+
+#if IS_ENABLED(CONFIG_NET_SELFTESTS)
+
+void net_selftest(struct net_device *ndev, struct ethtool_test *etest,
+ u64 *buf);
+int net_selftest_get_count(void);
+void net_selftest_get_strings(u8 *data);
+
+#else
+
+static inline void net_selftest(struct net_device *ndev, struct ethtool_test *etest,
+ u64 *buf)
+{
+}
+
+static inline int net_selftest_get_count(void)
+{
+ return 0;
+}
+
+static inline void net_selftest_get_strings(u8 *data)
+{
+}
+
+#endif
+#endif /* _NET_SELFTESTS */
diff --git a/include/net/slhc_vj.h b/include/net/slhc_vj.h
new file mode 100644
index 000000000..8fcf8908a
--- /dev/null
+++ b/include/net/slhc_vj.h
@@ -0,0 +1,184 @@
+#ifndef _SLHC_H
+#define _SLHC_H
+/*
+ * Definitions for tcp compression routines.
+ *
+ * $Header: slcompress.h,v 1.10 89/12/31 08:53:02 van Exp $
+ *
+ * Copyright (c) 1989 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by the University of California, Berkeley. The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989:
+ * - Initial distribution.
+ *
+ *
+ * modified for KA9Q Internet Software Package by
+ * Katie Stevens (dkstevens@ucdavis.edu)
+ * University of California, Davis
+ * Computing Services
+ * - 01-31-90 initial adaptation
+ *
+ * - Feb 1991 Bill_Simpson@um.cc.umich.edu
+ * variable number of conversation slots
+ * allow zero or one slots
+ * separate routines
+ * status display
+ */
+
+/*
+ * Compressed packet format:
+ *
+ * The first octet contains the packet type (top 3 bits), TCP
+ * 'push' bit, and flags that indicate which of the 4 TCP sequence
+ * numbers have changed (bottom 5 bits). The next octet is a
+ * conversation number that associates a saved IP/TCP header with
+ * the compressed packet. The next two octets are the TCP checksum
+ * from the original datagram. The next 0 to 15 octets are
+ * sequence number changes, one change per bit set in the header
+ * (there may be no changes and there are two special cases where
+ * the receiver implicitly knows what changed -- see below).
+ *
+ * There are 5 numbers which can change (they are always inserted
+ * in the following order): TCP urgent pointer, window,
+ * acknowledgment, sequence number and IP ID. (The urgent pointer
+ * is different from the others in that its value is sent, not the
+ * change in value.) Since typical use of SLIP links is biased
+ * toward small packets (see comments on MTU/MSS below), changes
+ * use a variable length coding with one octet for numbers in the
+ * range 1 - 255 and 3 octets (0, MSB, LSB) for numbers in the
+ * range 256 - 65535 or 0. (If the change in sequence number or
+ * ack is more than 65535, an uncompressed packet is sent.)
+ */
+
+/*
+ * Packet types (must not conflict with IP protocol version)
+ *
+ * The top nibble of the first octet is the packet type. There are
+ * three possible types: IP (not proto TCP or tcp with one of the
+ * control flags set); uncompressed TCP (a normal IP/TCP packet but
+ * with the 8-bit protocol field replaced by an 8-bit connection id --
+ * this type of packet syncs the sender & receiver); and compressed
+ * TCP (described above).
+ *
+ * LSB of 4-bit field is TCP "PUSH" bit (a worthless anachronism) and
+ * is logically part of the 4-bit "changes" field that follows. Top
+ * three bits are actual packet type. For backward compatibility
+ * and in the interest of conserving bits, numbers are chosen so the
+ * IP protocol version number (4) which normally appears in this nibble
+ * means "IP packet".
+ */
+
+
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+/* SLIP compression masks for len/vers byte */
+#define SL_TYPE_IP 0x40
+#define SL_TYPE_UNCOMPRESSED_TCP 0x70
+#define SL_TYPE_COMPRESSED_TCP 0x80
+#define SL_TYPE_ERROR 0x00
+
+/* Bits in first octet of compressed packet */
+#define NEW_C 0x40 /* flag bits for what changed in a packet */
+#define NEW_I 0x20
+#define NEW_S 0x08
+#define NEW_A 0x04
+#define NEW_W 0x02
+#define NEW_U 0x01
+
+/* reserved, special-case values of above */
+#define SPECIAL_I (NEW_S|NEW_W|NEW_U) /* echoed interactive traffic */
+#define SPECIAL_D (NEW_S|NEW_A|NEW_W|NEW_U) /* unidirectional data */
+#define SPECIALS_MASK (NEW_S|NEW_A|NEW_W|NEW_U)
+
+#define TCP_PUSH_BIT 0x10
+
+/*
+ * data type and sizes conversion assumptions:
+ *
+ * VJ code KA9Q style generic
+ * u_char byte_t unsigned char 8 bits
+ * u_short int16 unsigned short 16 bits
+ * u_int int16 unsigned short 16 bits
+ * u_long unsigned long unsigned long 32 bits
+ * int int32 long 32 bits
+ */
+
+typedef __u8 byte_t;
+typedef __u32 int32;
+
+/*
+ * "state" data for each active tcp conversation on the wire. This is
+ * basically a copy of the entire IP/TCP header from the last packet
+ * we saw from the conversation together with a small identifier
+ * the transmit & receive ends of the line use to locate saved header.
+ */
+struct cstate {
+ byte_t cs_this; /* connection id number (xmit) */
+ bool initialized; /* true if initialized */
+ struct cstate *next; /* next in ring (xmit) */
+ struct iphdr cs_ip; /* ip/tcp hdr from most recent packet */
+ struct tcphdr cs_tcp;
+ unsigned char cs_ipopt[64];
+ unsigned char cs_tcpopt[64];
+ int cs_hsize;
+};
+#define NULLSLSTATE (struct cstate *)0
+
+/*
+ * all the state data for one serial line (we need one of these per line).
+ */
+struct slcompress {
+ struct cstate *tstate; /* transmit connection states (array)*/
+ struct cstate *rstate; /* receive connection states (array)*/
+
+ byte_t tslot_limit; /* highest transmit slot id (0-l)*/
+ byte_t rslot_limit; /* highest receive slot id (0-l)*/
+
+ byte_t xmit_oldest; /* oldest xmit in ring */
+ byte_t xmit_current; /* most recent xmit id */
+ byte_t recv_current; /* most recent rcvd id */
+
+ byte_t flags;
+#define SLF_TOSS 0x01 /* tossing rcvd frames until id received */
+
+ int32 sls_o_nontcp; /* outbound non-TCP packets */
+ int32 sls_o_tcp; /* outbound TCP packets */
+ int32 sls_o_uncompressed; /* outbound uncompressed packets */
+ int32 sls_o_compressed; /* outbound compressed packets */
+ int32 sls_o_searches; /* searches for connection state */
+ int32 sls_o_misses; /* times couldn't find conn. state */
+
+ int32 sls_i_uncompressed; /* inbound uncompressed packets */
+ int32 sls_i_compressed; /* inbound compressed packets */
+ int32 sls_i_error; /* inbound error packets */
+ int32 sls_i_tossed; /* inbound packets tossed because of error */
+
+ int32 sls_i_runt;
+ int32 sls_i_badcheck;
+};
+#define NULLSLCOMPR (struct slcompress *)0
+
+/* In slhc.c: */
+struct slcompress *slhc_init(int rslots, int tslots);
+void slhc_free(struct slcompress *comp);
+
+int slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
+ unsigned char *ocp, unsigned char **cpp, int compress_cid);
+int slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize);
+int slhc_remember(struct slcompress *comp, unsigned char *icp, int isize);
+int slhc_toss(struct slcompress *comp);
+
+#endif /* _SLHC_H */
diff --git a/include/net/smc.h b/include/net/smc.h
new file mode 100644
index 000000000..c926d3313
--- /dev/null
+++ b/include/net/smc.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Definitions for the SMC module (socket related)
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+#ifndef _SMC_H
+#define _SMC_H
+
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+struct sock;
+
+#define SMC_MAX_PNETID_LEN 16 /* Max. length of PNET id */
+
+struct smc_hashinfo {
+ rwlock_t lock;
+ struct hlist_head ht;
+};
+
+int smc_hash_sk(struct sock *sk);
+void smc_unhash_sk(struct sock *sk);
+
+/* SMCD/ISM device driver interface */
+struct smcd_dmb {
+ u64 dmb_tok;
+ u64 rgid;
+ u32 dmb_len;
+ u32 sba_idx;
+ u32 vlan_valid;
+ u32 vlan_id;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+};
+
+#define ISM_EVENT_DMB 0
+#define ISM_EVENT_GID 1
+#define ISM_EVENT_SWR 2
+
+#define ISM_RESERVED_VLANID 0x1FFF
+
+#define ISM_ERROR 0xFFFF
+
+struct smcd_event {
+ u32 type;
+ u32 code;
+ u64 tok;
+ u64 time;
+ u64 info;
+};
+
+struct smcd_dev;
+
+struct smcd_ops {
+ int (*query_remote_gid)(struct smcd_dev *dev, u64 rgid, u32 vid_valid,
+ u32 vid);
+ int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
+ int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
+ int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
+ int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
+ int (*set_vlan_required)(struct smcd_dev *dev);
+ int (*reset_vlan_required)(struct smcd_dev *dev);
+ int (*signal_event)(struct smcd_dev *dev, u64 rgid, u32 trigger_irq,
+ u32 event_code, u64 info);
+ int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
+ bool sf, unsigned int offset, void *data,
+ unsigned int size);
+ u8* (*get_system_eid)(void);
+ u16 (*get_chid)(struct smcd_dev *dev);
+};
+
+struct smcd_dev {
+ const struct smcd_ops *ops;
+ struct device dev;
+ void *priv;
+ u64 local_gid;
+ struct list_head list;
+ spinlock_t lock;
+ struct smc_connection **conn;
+ struct list_head vlan;
+ struct workqueue_struct *event_wq;
+ u8 pnetid[SMC_MAX_PNETID_LEN];
+ bool pnetid_by_user;
+ struct list_head lgr_list;
+ spinlock_t lgr_lock;
+ atomic_t lgr_cnt;
+ wait_queue_head_t lgrs_deleted;
+ u8 going_away : 1;
+};
+
+struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
+ const struct smcd_ops *ops, int max_dmbs);
+int smcd_register_dev(struct smcd_dev *smcd);
+void smcd_unregister_dev(struct smcd_dev *smcd);
+void smcd_free_dev(struct smcd_dev *smcd);
+void smcd_handle_event(struct smcd_dev *dev, struct smcd_event *event);
+void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit, u16 dmbemask);
+#endif /* _SMC_H */
diff --git a/include/net/snmp.h b/include/net/snmp.h
new file mode 100644
index 000000000..468a67836
--- /dev/null
+++ b/include/net/snmp.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *
+ * SNMP MIB entries for the IP subsystem.
+ *
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ *
+ * We don't chose to implement SNMP in the kernel (this would
+ * be silly as SNMP is a pain in the backside in places). We do
+ * however need to collect the MIB statistics and export them
+ * out of /proc (eventually)
+ */
+
+#ifndef _SNMP_H
+#define _SNMP_H
+
+#include <linux/cache.h>
+#include <linux/snmp.h>
+#include <linux/smp.h>
+
+/*
+ * Mibs are stored in array of unsigned long.
+ */
+/*
+ * struct snmp_mib{}
+ * - list of entries for particular API (such as /proc/net/snmp)
+ * - name of entries.
+ */
+struct snmp_mib {
+ const char *name;
+ int entry;
+};
+
+#define SNMP_MIB_ITEM(_name,_entry) { \
+ .name = _name, \
+ .entry = _entry, \
+}
+
+#define SNMP_MIB_SENTINEL { \
+ .name = NULL, \
+ .entry = 0, \
+}
+
+/*
+ * We use unsigned longs for most mibs but u64 for ipstats.
+ */
+#include <linux/u64_stats_sync.h>
+
+/* IPstats */
+#define IPSTATS_MIB_MAX __IPSTATS_MIB_MAX
+struct ipstats_mib {
+ /* mibs[] must be first field of struct ipstats_mib */
+ u64 mibs[IPSTATS_MIB_MAX];
+ struct u64_stats_sync syncp;
+};
+
+/* ICMP */
+#define ICMP_MIB_MAX __ICMP_MIB_MAX
+struct icmp_mib {
+ unsigned long mibs[ICMP_MIB_MAX];
+};
+
+#define ICMPMSG_MIB_MAX __ICMPMSG_MIB_MAX
+struct icmpmsg_mib {
+ atomic_long_t mibs[ICMPMSG_MIB_MAX];
+};
+
+/* ICMP6 (IPv6-ICMP) */
+#define ICMP6_MIB_MAX __ICMP6_MIB_MAX
+/* per network ns counters */
+struct icmpv6_mib {
+ unsigned long mibs[ICMP6_MIB_MAX];
+};
+/* per device counters, (shared on all cpus) */
+struct icmpv6_mib_device {
+ atomic_long_t mibs[ICMP6_MIB_MAX];
+};
+
+#define ICMP6MSG_MIB_MAX __ICMP6MSG_MIB_MAX
+/* per network ns counters */
+struct icmpv6msg_mib {
+ atomic_long_t mibs[ICMP6MSG_MIB_MAX];
+};
+/* per device counters, (shared on all cpus) */
+struct icmpv6msg_mib_device {
+ atomic_long_t mibs[ICMP6MSG_MIB_MAX];
+};
+
+
+/* TCP */
+#define TCP_MIB_MAX __TCP_MIB_MAX
+struct tcp_mib {
+ unsigned long mibs[TCP_MIB_MAX];
+};
+
+/* UDP */
+#define UDP_MIB_MAX __UDP_MIB_MAX
+struct udp_mib {
+ unsigned long mibs[UDP_MIB_MAX];
+};
+
+/* Linux */
+#define LINUX_MIB_MAX __LINUX_MIB_MAX
+struct linux_mib {
+ unsigned long mibs[LINUX_MIB_MAX];
+};
+
+/* Linux Xfrm */
+#define LINUX_MIB_XFRMMAX __LINUX_MIB_XFRMMAX
+struct linux_xfrm_mib {
+ unsigned long mibs[LINUX_MIB_XFRMMAX];
+};
+
+/* Linux TLS */
+#define LINUX_MIB_TLSMAX __LINUX_MIB_TLSMAX
+struct linux_tls_mib {
+ unsigned long mibs[LINUX_MIB_TLSMAX];
+};
+
+#define DEFINE_SNMP_STAT(type, name) \
+ __typeof__(type) __percpu *name
+#define DEFINE_SNMP_STAT_ATOMIC(type, name) \
+ __typeof__(type) *name
+#define DECLARE_SNMP_STAT(type, name) \
+ extern __typeof__(type) __percpu *name
+
+#define __SNMP_INC_STATS(mib, field) \
+ __this_cpu_inc(mib->mibs[field])
+
+#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
+ atomic_long_inc(&mib->mibs[field])
+
+#define SNMP_INC_STATS(mib, field) \
+ this_cpu_inc(mib->mibs[field])
+
+#define SNMP_DEC_STATS(mib, field) \
+ this_cpu_dec(mib->mibs[field])
+
+#define __SNMP_ADD_STATS(mib, field, addend) \
+ __this_cpu_add(mib->mibs[field], addend)
+
+#define SNMP_ADD_STATS(mib, field, addend) \
+ this_cpu_add(mib->mibs[field], addend)
+#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
+ do { \
+ __typeof__((mib->mibs) + 0) ptr = mib->mibs; \
+ this_cpu_inc(ptr[basefield##PKTS]); \
+ this_cpu_add(ptr[basefield##OCTETS], addend); \
+ } while (0)
+#define __SNMP_UPD_PO_STATS(mib, basefield, addend) \
+ do { \
+ __typeof__((mib->mibs) + 0) ptr = mib->mibs; \
+ __this_cpu_inc(ptr[basefield##PKTS]); \
+ __this_cpu_add(ptr[basefield##OCTETS], addend); \
+ } while (0)
+
+
+#if BITS_PER_LONG==32
+
+#define __SNMP_ADD_STATS64(mib, field, addend) \
+ do { \
+ __typeof__(*mib) *ptr = raw_cpu_ptr(mib); \
+ u64_stats_update_begin(&ptr->syncp); \
+ ptr->mibs[field] += addend; \
+ u64_stats_update_end(&ptr->syncp); \
+ } while (0)
+
+#define SNMP_ADD_STATS64(mib, field, addend) \
+ do { \
+ local_bh_disable(); \
+ __SNMP_ADD_STATS64(mib, field, addend); \
+ local_bh_enable(); \
+ } while (0)
+
+#define __SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
+#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
+#define __SNMP_UPD_PO_STATS64(mib, basefield, addend) \
+ do { \
+ __typeof__(*mib) *ptr; \
+ ptr = raw_cpu_ptr((mib)); \
+ u64_stats_update_begin(&ptr->syncp); \
+ ptr->mibs[basefield##PKTS]++; \
+ ptr->mibs[basefield##OCTETS] += addend; \
+ u64_stats_update_end(&ptr->syncp); \
+ } while (0)
+#define SNMP_UPD_PO_STATS64(mib, basefield, addend) \
+ do { \
+ local_bh_disable(); \
+ __SNMP_UPD_PO_STATS64(mib, basefield, addend); \
+ local_bh_enable(); \
+ } while (0)
+#else
+#define __SNMP_INC_STATS64(mib, field) __SNMP_INC_STATS(mib, field)
+#define SNMP_INC_STATS64(mib, field) SNMP_INC_STATS(mib, field)
+#define SNMP_DEC_STATS64(mib, field) SNMP_DEC_STATS(mib, field)
+#define __SNMP_ADD_STATS64(mib, field, addend) __SNMP_ADD_STATS(mib, field, addend)
+#define SNMP_ADD_STATS64(mib, field, addend) SNMP_ADD_STATS(mib, field, addend)
+#define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend)
+#define __SNMP_UPD_PO_STATS64(mib, basefield, addend) __SNMP_UPD_PO_STATS(mib, basefield, addend)
+#endif
+
+#endif
diff --git a/include/net/sock.h b/include/net/sock.h
new file mode 100644
index 000000000..579732d47
--- /dev/null
+++ b/include/net/sock.h
@@ -0,0 +1,3057 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the AF_INET socket handler.
+ *
+ * Version: @(#)sock.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Florian La Roche <flla@stud.uni-sb.de>
+ *
+ * Fixes:
+ * Alan Cox : Volatiles in skbuff pointers. See
+ * skbuff comments. May be overdone,
+ * better to prove they can be removed
+ * than the reverse.
+ * Alan Cox : Added a zapped field for tcp to note
+ * a socket is reset and must stay shut up
+ * Alan Cox : New fields for options
+ * Pauline Middelink : identd support
+ * Alan Cox : Eliminate low level recv/recvfrom
+ * David S. Miller : New socket lookup architecture.
+ * Steve Whitehouse: Default routines for sock_ops
+ * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made
+ * protinfo be just a void pointer, as the
+ * protocol specific parts were moved to
+ * respective headers and ipv4/v6, etc now
+ * use private slabcaches for its socks
+ * Pedro Hortas : New flags field for socket options
+ */
+#ifndef _SOCK_H
+#define _SOCK_H
+
+#include <linux/hardirq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/list_nulls.h>
+#include <linux/timer.h>
+#include <linux/cache.h>
+#include <linux/bitops.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h> /* struct sk_buff */
+#include <linux/mm.h>
+#include <linux/security.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/page_counter.h>
+#include <linux/memcontrol.h>
+#include <linux/static_key.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/cgroup-defs.h>
+#include <linux/rbtree.h>
+#include <linux/rculist_nulls.h>
+#include <linux/poll.h>
+#include <linux/sockptr.h>
+#include <linux/indirect_call_wrapper.h>
+#include <linux/atomic.h>
+#include <linux/refcount.h>
+#include <linux/llist.h>
+#include <net/dst.h>
+#include <net/checksum.h>
+#include <net/tcp_states.h>
+#include <linux/net_tstamp.h>
+#include <net/l3mdev.h>
+#include <uapi/linux/socket.h>
+
+/*
+ * This structure really needs to be cleaned up.
+ * Most of it is for TCP, and not used by any of
+ * the other protocols.
+ */
+
+/* Define this to get the SOCK_DBG debugging facility. */
+#define SOCK_DEBUGGING
+#ifdef SOCK_DEBUGGING
+#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
+ printk(KERN_DEBUG msg); } while (0)
+#else
+/* Validate arguments and do nothing */
+static inline __printf(2, 3)
+void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
+{
+}
+#endif
+
+/* This is the per-socket lock. The spinlock provides a synchronization
+ * between user contexts and software interrupt processing, whereas the
+ * mini-semaphore synchronizes multiple users amongst themselves.
+ */
+typedef struct {
+ spinlock_t slock;
+ int owned;
+ wait_queue_head_t wq;
+ /*
+ * We express the mutex-alike socket_lock semantics
+ * to the lock validator by explicitly managing
+ * the slock as a lock variant (in addition to
+ * the slock itself):
+ */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} socket_lock_t;
+
+struct sock;
+struct proto;
+struct net;
+
+typedef __u32 __bitwise __portpair;
+typedef __u64 __bitwise __addrpair;
+
+/**
+ * struct sock_common - minimal network layer representation of sockets
+ * @skc_daddr: Foreign IPv4 addr
+ * @skc_rcv_saddr: Bound local IPv4 addr
+ * @skc_addrpair: 8-byte-aligned __u64 union of @skc_daddr & @skc_rcv_saddr
+ * @skc_hash: hash value used with various protocol lookup tables
+ * @skc_u16hashes: two u16 hash values used by UDP lookup tables
+ * @skc_dport: placeholder for inet_dport/tw_dport
+ * @skc_num: placeholder for inet_num/tw_num
+ * @skc_portpair: __u32 union of @skc_dport & @skc_num
+ * @skc_family: network address family
+ * @skc_state: Connection state
+ * @skc_reuse: %SO_REUSEADDR setting
+ * @skc_reuseport: %SO_REUSEPORT setting
+ * @skc_ipv6only: socket is IPV6 only
+ * @skc_net_refcnt: socket is using net ref counting
+ * @skc_bound_dev_if: bound device index if != 0
+ * @skc_bind_node: bind hash linkage for various protocol lookup tables
+ * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
+ * @skc_prot: protocol handlers inside a network family
+ * @skc_net: reference to the network namespace of this socket
+ * @skc_v6_daddr: IPV6 destination address
+ * @skc_v6_rcv_saddr: IPV6 source address
+ * @skc_cookie: socket's cookie value
+ * @skc_node: main hash linkage for various protocol lookup tables
+ * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
+ * @skc_tx_queue_mapping: tx queue number for this connection
+ * @skc_rx_queue_mapping: rx queue number for this connection
+ * @skc_flags: place holder for sk_flags
+ * %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
+ * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
+ * @skc_listener: connection request listener socket (aka rsk_listener)
+ * [union with @skc_flags]
+ * @skc_tw_dr: (aka tw_dr) ptr to &struct inet_timewait_death_row
+ * [union with @skc_flags]
+ * @skc_incoming_cpu: record/match cpu processing incoming packets
+ * @skc_rcv_wnd: (aka rsk_rcv_wnd) TCP receive window size (possibly scaled)
+ * [union with @skc_incoming_cpu]
+ * @skc_tw_rcv_nxt: (aka tw_rcv_nxt) TCP window next expected seq number
+ * [union with @skc_incoming_cpu]
+ * @skc_refcnt: reference count
+ *
+ * This is the minimal network layer representation of sockets, the header
+ * for struct sock and struct inet_timewait_sock.
+ */
+struct sock_common {
+ union {
+ __addrpair skc_addrpair;
+ struct {
+ __be32 skc_daddr;
+ __be32 skc_rcv_saddr;
+ };
+ };
+ union {
+ unsigned int skc_hash;
+ __u16 skc_u16hashes[2];
+ };
+ /* skc_dport && skc_num must be grouped as well */
+ union {
+ __portpair skc_portpair;
+ struct {
+ __be16 skc_dport;
+ __u16 skc_num;
+ };
+ };
+
+ unsigned short skc_family;
+ volatile unsigned char skc_state;
+ unsigned char skc_reuse:4;
+ unsigned char skc_reuseport:1;
+ unsigned char skc_ipv6only:1;
+ unsigned char skc_net_refcnt:1;
+ int skc_bound_dev_if;
+ union {
+ struct hlist_node skc_bind_node;
+ struct hlist_node skc_portaddr_node;
+ };
+ struct proto *skc_prot;
+ possible_net_t skc_net;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr skc_v6_daddr;
+ struct in6_addr skc_v6_rcv_saddr;
+#endif
+
+ atomic64_t skc_cookie;
+
+ /* following fields are padding to force
+ * offset(struct sock, sk_refcnt) == 128 on 64bit arches
+ * assuming IPV6 is enabled. We use this padding differently
+ * for different kind of 'sockets'
+ */
+ union {
+ unsigned long skc_flags;
+ struct sock *skc_listener; /* request_sock */
+ struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */
+ };
+ /*
+ * fields between dontcopy_begin/dontcopy_end
+ * are not copied in sock_copy()
+ */
+ /* private: */
+ int skc_dontcopy_begin[0];
+ /* public: */
+ union {
+ struct hlist_node skc_node;
+ struct hlist_nulls_node skc_nulls_node;
+ };
+ unsigned short skc_tx_queue_mapping;
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
+ unsigned short skc_rx_queue_mapping;
+#endif
+ union {
+ int skc_incoming_cpu;
+ u32 skc_rcv_wnd;
+ u32 skc_tw_rcv_nxt; /* struct tcp_timewait_sock */
+ };
+
+ refcount_t skc_refcnt;
+ /* private: */
+ int skc_dontcopy_end[0];
+ union {
+ u32 skc_rxhash;
+ u32 skc_window_clamp;
+ u32 skc_tw_snd_nxt; /* struct tcp_timewait_sock */
+ };
+ /* public: */
+};
+
+struct bpf_local_storage;
+struct sk_filter;
+
+/**
+ * struct sock - network layer representation of sockets
+ * @__sk_common: shared layout with inet_timewait_sock
+ * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
+ * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
+ * @sk_lock: synchronizer
+ * @sk_kern_sock: True if sock is using kernel lock classes
+ * @sk_rcvbuf: size of receive buffer in bytes
+ * @sk_wq: sock wait queue and async head
+ * @sk_rx_dst: receive input route used by early demux
+ * @sk_rx_dst_ifindex: ifindex for @sk_rx_dst
+ * @sk_rx_dst_cookie: cookie for @sk_rx_dst
+ * @sk_dst_cache: destination cache
+ * @sk_dst_pending_confirm: need to confirm neighbour
+ * @sk_policy: flow policy
+ * @sk_receive_queue: incoming packets
+ * @sk_wmem_alloc: transmit queue bytes committed
+ * @sk_tsq_flags: TCP Small Queues flags
+ * @sk_write_queue: Packet sending queue
+ * @sk_omem_alloc: "o" is "option" or "other"
+ * @sk_wmem_queued: persistent queue size
+ * @sk_forward_alloc: space allocated forward
+ * @sk_reserved_mem: space reserved and non-reclaimable for the socket
+ * @sk_napi_id: id of the last napi context to receive data for sk
+ * @sk_ll_usec: usecs to busypoll when there is no data
+ * @sk_allocation: allocation mode
+ * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
+ * @sk_pacing_status: Pacing status (requested, handled by sch_fq)
+ * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
+ * @sk_sndbuf: size of send buffer in bytes
+ * @__sk_flags_offset: empty field used to determine location of bitfield
+ * @sk_padding: unused element for alignment
+ * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
+ * @sk_no_check_rx: allow zero checksum in RX packets
+ * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
+ * @sk_gso_disabled: if set, NETIF_F_GSO_MASK is forbidden.
+ * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
+ * @sk_gso_max_size: Maximum GSO segment size to build
+ * @sk_gso_max_segs: Maximum number of GSO segments
+ * @sk_pacing_shift: scaling factor for TCP Small Queues
+ * @sk_lingertime: %SO_LINGER l_linger setting
+ * @sk_backlog: always used with the per-socket spinlock held
+ * @sk_callback_lock: used with the callbacks in the end of this struct
+ * @sk_error_queue: rarely used
+ * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
+ * IPV6_ADDRFORM for instance)
+ * @sk_err: last error
+ * @sk_err_soft: errors that don't cause failure but are the cause of a
+ * persistent failure not just 'timed out'
+ * @sk_drops: raw/udp drops counter
+ * @sk_ack_backlog: current listen backlog
+ * @sk_max_ack_backlog: listen backlog set in listen()
+ * @sk_uid: user id of owner
+ * @sk_prefer_busy_poll: prefer busypolling over softirq processing
+ * @sk_busy_poll_budget: napi processing budget when busypolling
+ * @sk_priority: %SO_PRIORITY setting
+ * @sk_type: socket type (%SOCK_STREAM, etc)
+ * @sk_protocol: which protocol this socket belongs in this network family
+ * @sk_peer_lock: lock protecting @sk_peer_pid and @sk_peer_cred
+ * @sk_peer_pid: &struct pid for this socket's peer
+ * @sk_peer_cred: %SO_PEERCRED setting
+ * @sk_rcvlowat: %SO_RCVLOWAT setting
+ * @sk_rcvtimeo: %SO_RCVTIMEO setting
+ * @sk_sndtimeo: %SO_SNDTIMEO setting
+ * @sk_txhash: computed flow hash for use on transmit
+ * @sk_txrehash: enable TX hash rethink
+ * @sk_filter: socket filtering instructions
+ * @sk_timer: sock cleanup timer
+ * @sk_stamp: time stamp of last packet received
+ * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
+ * @sk_tsflags: SO_TIMESTAMPING flags
+ * @sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock
+ * for timestamping
+ * @sk_tskey: counter to disambiguate concurrent tstamp requests
+ * @sk_zckey: counter to order MSG_ZEROCOPY notifications
+ * @sk_socket: Identd and reporting IO signals
+ * @sk_user_data: RPC layer private data. Write-protected by @sk_callback_lock.
+ * @sk_frag: cached page frag
+ * @sk_peek_off: current peek_offset value
+ * @sk_send_head: front of stuff to transmit
+ * @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
+ * @sk_security: used by security modules
+ * @sk_mark: generic packet mark
+ * @sk_cgrp_data: cgroup data for this cgroup
+ * @sk_memcg: this socket's memory cgroup association
+ * @sk_write_pending: a write to stream socket waits to start
+ * @sk_disconnects: number of disconnect operations performed on this sock
+ * @sk_state_change: callback to indicate change in the state of the sock
+ * @sk_data_ready: callback to indicate there is data to be processed
+ * @sk_write_space: callback to indicate there is bf sending space available
+ * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
+ * @sk_backlog_rcv: callback to process the backlog
+ * @sk_validate_xmit_skb: ptr to an optional validate function
+ * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
+ * @sk_reuseport_cb: reuseport group container
+ * @sk_bpf_storage: ptr to cache and control for bpf_sk_storage
+ * @sk_rcu: used during RCU grace period
+ * @sk_clockid: clockid used by time-based scheduling (SO_TXTIME)
+ * @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
+ * @sk_txtime_report_errors: set report errors mode for SO_TXTIME
+ * @sk_txtime_unused: unused txtime flags
+ * @ns_tracker: tracker for netns reference
+ * @sk_bind2_node: bind node in the bhash2 table
+ */
+struct sock {
+ /*
+ * Now struct inet_timewait_sock also uses sock_common, so please just
+ * don't add nothing before this first member (__sk_common) --acme
+ */
+ struct sock_common __sk_common;
+#define sk_node __sk_common.skc_node
+#define sk_nulls_node __sk_common.skc_nulls_node
+#define sk_refcnt __sk_common.skc_refcnt
+#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
+#define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
+#endif
+
+#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
+#define sk_dontcopy_end __sk_common.skc_dontcopy_end
+#define sk_hash __sk_common.skc_hash
+#define sk_portpair __sk_common.skc_portpair
+#define sk_num __sk_common.skc_num
+#define sk_dport __sk_common.skc_dport
+#define sk_addrpair __sk_common.skc_addrpair
+#define sk_daddr __sk_common.skc_daddr
+#define sk_rcv_saddr __sk_common.skc_rcv_saddr
+#define sk_family __sk_common.skc_family
+#define sk_state __sk_common.skc_state
+#define sk_reuse __sk_common.skc_reuse
+#define sk_reuseport __sk_common.skc_reuseport
+#define sk_ipv6only __sk_common.skc_ipv6only
+#define sk_net_refcnt __sk_common.skc_net_refcnt
+#define sk_bound_dev_if __sk_common.skc_bound_dev_if
+#define sk_bind_node __sk_common.skc_bind_node
+#define sk_prot __sk_common.skc_prot
+#define sk_net __sk_common.skc_net
+#define sk_v6_daddr __sk_common.skc_v6_daddr
+#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
+#define sk_cookie __sk_common.skc_cookie
+#define sk_incoming_cpu __sk_common.skc_incoming_cpu
+#define sk_flags __sk_common.skc_flags
+#define sk_rxhash __sk_common.skc_rxhash
+
+ /* early demux fields */
+ struct dst_entry __rcu *sk_rx_dst;
+ int sk_rx_dst_ifindex;
+ u32 sk_rx_dst_cookie;
+
+ socket_lock_t sk_lock;
+ atomic_t sk_drops;
+ int sk_rcvlowat;
+ struct sk_buff_head sk_error_queue;
+ struct sk_buff_head sk_receive_queue;
+ /*
+ * The backlog queue is special, it is always used with
+ * the per-socket spinlock held and requires low latency
+ * access. Therefore we special case it's implementation.
+ * Note : rmem_alloc is in this structure to fill a hole
+ * on 64bit arches, not because its logically part of
+ * backlog.
+ */
+ struct {
+ atomic_t rmem_alloc;
+ int len;
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ } sk_backlog;
+
+#define sk_rmem_alloc sk_backlog.rmem_alloc
+
+ int sk_forward_alloc;
+ u32 sk_reserved_mem;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int sk_ll_usec;
+ /* ===== mostly read cache line ===== */
+ unsigned int sk_napi_id;
+#endif
+ int sk_rcvbuf;
+ int sk_disconnects;
+
+ struct sk_filter __rcu *sk_filter;
+ union {
+ struct socket_wq __rcu *sk_wq;
+ /* private: */
+ struct socket_wq *sk_wq_raw;
+ /* public: */
+ };
+#ifdef CONFIG_XFRM
+ struct xfrm_policy __rcu *sk_policy[2];
+#endif
+
+ struct dst_entry __rcu *sk_dst_cache;
+ atomic_t sk_omem_alloc;
+ int sk_sndbuf;
+
+ /* ===== cache line for TX ===== */
+ int sk_wmem_queued;
+ refcount_t sk_wmem_alloc;
+ unsigned long sk_tsq_flags;
+ union {
+ struct sk_buff *sk_send_head;
+ struct rb_root tcp_rtx_queue;
+ };
+ struct sk_buff_head sk_write_queue;
+ __s32 sk_peek_off;
+ int sk_write_pending;
+ __u32 sk_dst_pending_confirm;
+ u32 sk_pacing_status; /* see enum sk_pacing */
+ long sk_sndtimeo;
+ struct timer_list sk_timer;
+ __u32 sk_priority;
+ __u32 sk_mark;
+ unsigned long sk_pacing_rate; /* bytes per second */
+ unsigned long sk_max_pacing_rate;
+ struct page_frag sk_frag;
+ netdev_features_t sk_route_caps;
+ int sk_gso_type;
+ unsigned int sk_gso_max_size;
+ gfp_t sk_allocation;
+ __u32 sk_txhash;
+
+ /*
+ * Because of non atomicity rules, all
+ * changes are protected by socket lock.
+ */
+ u8 sk_gso_disabled : 1,
+ sk_kern_sock : 1,
+ sk_no_check_tx : 1,
+ sk_no_check_rx : 1,
+ sk_userlocks : 4;
+ u8 sk_pacing_shift;
+ u16 sk_type;
+ u16 sk_protocol;
+ u16 sk_gso_max_segs;
+ unsigned long sk_lingertime;
+ struct proto *sk_prot_creator;
+ rwlock_t sk_callback_lock;
+ int sk_err,
+ sk_err_soft;
+ u32 sk_ack_backlog;
+ u32 sk_max_ack_backlog;
+ kuid_t sk_uid;
+ u8 sk_txrehash;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ u8 sk_prefer_busy_poll;
+ u16 sk_busy_poll_budget;
+#endif
+ spinlock_t sk_peer_lock;
+ int sk_bind_phc;
+ struct pid *sk_peer_pid;
+ const struct cred *sk_peer_cred;
+
+ long sk_rcvtimeo;
+ ktime_t sk_stamp;
+#if BITS_PER_LONG==32
+ seqlock_t sk_stamp_seq;
+#endif
+ u16 sk_tsflags;
+ u8 sk_shutdown;
+ atomic_t sk_tskey;
+ atomic_t sk_zckey;
+
+ u8 sk_clockid;
+ u8 sk_txtime_deadline_mode : 1,
+ sk_txtime_report_errors : 1,
+ sk_txtime_unused : 6;
+
+ struct socket *sk_socket;
+ void *sk_user_data;
+#ifdef CONFIG_SECURITY
+ void *sk_security;
+#endif
+ struct sock_cgroup_data sk_cgrp_data;
+ struct mem_cgroup *sk_memcg;
+ void (*sk_state_change)(struct sock *sk);
+ void (*sk_data_ready)(struct sock *sk);
+ void (*sk_write_space)(struct sock *sk);
+ void (*sk_error_report)(struct sock *sk);
+ int (*sk_backlog_rcv)(struct sock *sk,
+ struct sk_buff *skb);
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb);
+#endif
+ void (*sk_destruct)(struct sock *sk);
+ struct sock_reuseport __rcu *sk_reuseport_cb;
+#ifdef CONFIG_BPF_SYSCALL
+ struct bpf_local_storage __rcu *sk_bpf_storage;
+#endif
+ struct rcu_head sk_rcu;
+ netns_tracker ns_tracker;
+ struct hlist_node sk_bind2_node;
+};
+
+enum sk_pacing {
+ SK_PACING_NONE = 0,
+ SK_PACING_NEEDED = 1,
+ SK_PACING_FQ = 2,
+};
+
+/* flag bits in sk_user_data
+ *
+ * - SK_USER_DATA_NOCOPY: Pointer stored in sk_user_data might
+ * not be suitable for copying when cloning the socket. For instance,
+ * it can point to a reference counted object. sk_user_data bottom
+ * bit is set if pointer must not be copied.
+ *
+ * - SK_USER_DATA_BPF: Mark whether sk_user_data field is
+ * managed/owned by a BPF reuseport array. This bit should be set
+ * when sk_user_data's sk is added to the bpf's reuseport_array.
+ *
+ * - SK_USER_DATA_PSOCK: Mark whether pointer stored in
+ * sk_user_data points to psock type. This bit should be set
+ * when sk_user_data is assigned to a psock object.
+ */
+#define SK_USER_DATA_NOCOPY 1UL
+#define SK_USER_DATA_BPF 2UL
+#define SK_USER_DATA_PSOCK 4UL
+#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF |\
+ SK_USER_DATA_PSOCK)
+
+/**
+ * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
+ * @sk: socket
+ */
+static inline bool sk_user_data_is_nocopy(const struct sock *sk)
+{
+ return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY);
+}
+
+#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
+
+/**
+ * __locked_read_sk_user_data_with_flags - return the pointer
+ * only if argument flags all has been set in sk_user_data. Otherwise
+ * return NULL
+ *
+ * @sk: socket
+ * @flags: flag bits
+ *
+ * The caller must be holding sk->sk_callback_lock.
+ */
+static inline void *
+__locked_read_sk_user_data_with_flags(const struct sock *sk,
+ uintptr_t flags)
+{
+ uintptr_t sk_user_data =
+ (uintptr_t)rcu_dereference_check(__sk_user_data(sk),
+ lockdep_is_held(&sk->sk_callback_lock));
+
+ WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
+
+ if ((sk_user_data & flags) == flags)
+ return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
+ return NULL;
+}
+
+/**
+ * __rcu_dereference_sk_user_data_with_flags - return the pointer
+ * only if argument flags all has been set in sk_user_data. Otherwise
+ * return NULL
+ *
+ * @sk: socket
+ * @flags: flag bits
+ */
+static inline void *
+__rcu_dereference_sk_user_data_with_flags(const struct sock *sk,
+ uintptr_t flags)
+{
+ uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk));
+
+ WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
+
+ if ((sk_user_data & flags) == flags)
+ return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
+ return NULL;
+}
+
+#define rcu_dereference_sk_user_data(sk) \
+ __rcu_dereference_sk_user_data_with_flags(sk, 0)
+#define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \
+({ \
+ uintptr_t __tmp1 = (uintptr_t)(ptr), \
+ __tmp2 = (uintptr_t)(flags); \
+ WARN_ON_ONCE(__tmp1 & ~SK_USER_DATA_PTRMASK); \
+ WARN_ON_ONCE(__tmp2 & SK_USER_DATA_PTRMASK); \
+ rcu_assign_pointer(__sk_user_data((sk)), \
+ __tmp1 | __tmp2); \
+})
+#define rcu_assign_sk_user_data(sk, ptr) \
+ __rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
+
+static inline
+struct net *sock_net(const struct sock *sk)
+{
+ return read_pnet(&sk->sk_net);
+}
+
+static inline
+void sock_net_set(struct sock *sk, struct net *net)
+{
+ write_pnet(&sk->sk_net, net);
+}
+
+/*
+ * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
+ * or not whether his port will be reused by someone else. SK_FORCE_REUSE
+ * on a socket means that the socket will reuse everybody else's port
+ * without looking at the other's sk_reuse value.
+ */
+
+#define SK_NO_REUSE 0
+#define SK_CAN_REUSE 1
+#define SK_FORCE_REUSE 2
+
+int sk_set_peek_off(struct sock *sk, int val);
+
+static inline int sk_peek_offset(const struct sock *sk, int flags)
+{
+ if (unlikely(flags & MSG_PEEK)) {
+ return READ_ONCE(sk->sk_peek_off);
+ }
+
+ return 0;
+}
+
+static inline void sk_peek_offset_bwd(struct sock *sk, int val)
+{
+ s32 off = READ_ONCE(sk->sk_peek_off);
+
+ if (unlikely(off >= 0)) {
+ off = max_t(s32, off - val, 0);
+ WRITE_ONCE(sk->sk_peek_off, off);
+ }
+}
+
+static inline void sk_peek_offset_fwd(struct sock *sk, int val)
+{
+ sk_peek_offset_bwd(sk, -val);
+}
+
+/*
+ * Hashed lists helper routines
+ */
+static inline struct sock *sk_entry(const struct hlist_node *node)
+{
+ return hlist_entry(node, struct sock, sk_node);
+}
+
+static inline struct sock *__sk_head(const struct hlist_head *head)
+{
+ return hlist_entry(head->first, struct sock, sk_node);
+}
+
+static inline struct sock *sk_head(const struct hlist_head *head)
+{
+ return hlist_empty(head) ? NULL : __sk_head(head);
+}
+
+static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
+{
+ return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
+}
+
+static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
+{
+ return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
+}
+
+static inline struct sock *sk_next(const struct sock *sk)
+{
+ return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
+}
+
+static inline struct sock *sk_nulls_next(const struct sock *sk)
+{
+ return (!is_a_nulls(sk->sk_nulls_node.next)) ?
+ hlist_nulls_entry(sk->sk_nulls_node.next,
+ struct sock, sk_nulls_node) :
+ NULL;
+}
+
+static inline bool sk_unhashed(const struct sock *sk)
+{
+ return hlist_unhashed(&sk->sk_node);
+}
+
+static inline bool sk_hashed(const struct sock *sk)
+{
+ return !sk_unhashed(sk);
+}
+
+static inline void sk_node_init(struct hlist_node *node)
+{
+ node->pprev = NULL;
+}
+
+static inline void __sk_del_node(struct sock *sk)
+{
+ __hlist_del(&sk->sk_node);
+}
+
+/* NB: equivalent to hlist_del_init_rcu */
+static inline bool __sk_del_node_init(struct sock *sk)
+{
+ if (sk_hashed(sk)) {
+ __sk_del_node(sk);
+ sk_node_init(&sk->sk_node);
+ return true;
+ }
+ return false;
+}
+
+/* Grab socket reference count. This operation is valid only
+ when sk is ALREADY grabbed f.e. it is found in hash table
+ or a list and the lookup is made under lock preventing hash table
+ modifications.
+ */
+
+static __always_inline void sock_hold(struct sock *sk)
+{
+ refcount_inc(&sk->sk_refcnt);
+}
+
+/* Ungrab socket in the context, which assumes that socket refcnt
+ cannot hit zero, f.e. it is true in context of any socketcall.
+ */
+static __always_inline void __sock_put(struct sock *sk)
+{
+ refcount_dec(&sk->sk_refcnt);
+}
+
+static inline bool sk_del_node_init(struct sock *sk)
+{
+ bool rc = __sk_del_node_init(sk);
+
+ if (rc) {
+ /* paranoid for a while -acme */
+ WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
+ __sock_put(sk);
+ }
+ return rc;
+}
+#define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
+
+static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
+{
+ if (sk_hashed(sk)) {
+ hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
+ return true;
+ }
+ return false;
+}
+
+static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
+{
+ bool rc = __sk_nulls_del_node_init_rcu(sk);
+
+ if (rc) {
+ /* paranoid for a while -acme */
+ WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
+ __sock_put(sk);
+ }
+ return rc;
+}
+
+static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
+{
+ hlist_add_head(&sk->sk_node, list);
+}
+
+static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
+{
+ sock_hold(sk);
+ __sk_add_node(sk, list);
+}
+
+static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
+{
+ sock_hold(sk);
+ if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+ sk->sk_family == AF_INET6)
+ hlist_add_tail_rcu(&sk->sk_node, list);
+ else
+ hlist_add_head_rcu(&sk->sk_node, list);
+}
+
+static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
+{
+ sock_hold(sk);
+ hlist_add_tail_rcu(&sk->sk_node, list);
+}
+
+static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+{
+ hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+}
+
+static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
+{
+ hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
+}
+
+static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+{
+ sock_hold(sk);
+ __sk_nulls_add_node_rcu(sk, list);
+}
+
+static inline void __sk_del_bind_node(struct sock *sk)
+{
+ __hlist_del(&sk->sk_bind_node);
+}
+
+static inline void sk_add_bind_node(struct sock *sk,
+ struct hlist_head *list)
+{
+ hlist_add_head(&sk->sk_bind_node, list);
+}
+
+static inline void __sk_del_bind2_node(struct sock *sk)
+{
+ __hlist_del(&sk->sk_bind2_node);
+}
+
+static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list)
+{
+ hlist_add_head(&sk->sk_bind2_node, list);
+}
+
+#define sk_for_each(__sk, list) \
+ hlist_for_each_entry(__sk, list, sk_node)
+#define sk_for_each_rcu(__sk, list) \
+ hlist_for_each_entry_rcu(__sk, list, sk_node)
+#define sk_nulls_for_each(__sk, node, list) \
+ hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
+#define sk_nulls_for_each_rcu(__sk, node, list) \
+ hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
+#define sk_for_each_from(__sk) \
+ hlist_for_each_entry_from(__sk, sk_node)
+#define sk_nulls_for_each_from(__sk, node) \
+ if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
+ hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
+#define sk_for_each_safe(__sk, tmp, list) \
+ hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
+#define sk_for_each_bound(__sk, list) \
+ hlist_for_each_entry(__sk, list, sk_bind_node)
+#define sk_for_each_bound_bhash2(__sk, list) \
+ hlist_for_each_entry(__sk, list, sk_bind2_node)
+
+/**
+ * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_node to use as a loop cursor.
+ * @head: the head for your list.
+ * @offset: offset of hlist_node within the struct.
+ *
+ */
+#define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \
+ for (pos = rcu_dereference(hlist_first_rcu(head)); \
+ pos != NULL && \
+ ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \
+ pos = rcu_dereference(hlist_next_rcu(pos)))
+
+static inline struct user_namespace *sk_user_ns(const struct sock *sk)
+{
+ /* Careful only use this in a context where these parameters
+ * can not change and must all be valid, such as recvmsg from
+ * userspace.
+ */
+ return sk->sk_socket->file->f_cred->user_ns;
+}
+
+/* Sock flags */
+enum sock_flags {
+ SOCK_DEAD,
+ SOCK_DONE,
+ SOCK_URGINLINE,
+ SOCK_KEEPOPEN,
+ SOCK_LINGER,
+ SOCK_DESTROY,
+ SOCK_BROADCAST,
+ SOCK_TIMESTAMP,
+ SOCK_ZAPPED,
+ SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
+ SOCK_DBG, /* %SO_DEBUG setting */
+ SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
+ SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
+ SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
+ SOCK_MEMALLOC, /* VM depends on this socket for swapping */
+ SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */
+ SOCK_FASYNC, /* fasync() active */
+ SOCK_RXQ_OVFL,
+ SOCK_ZEROCOPY, /* buffers from userspace */
+ SOCK_WIFI_STATUS, /* push wifi status to userspace */
+ SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS.
+ * Will use last 4 bytes of packet sent from
+ * user-space instead.
+ */
+ SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
+ SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
+ SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
+ SOCK_TXTIME,
+ SOCK_XDP, /* XDP is attached */
+ SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */
+ SOCK_RCVMARK, /* Receive SO_MARK ancillary data with packet */
+};
+
+#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
+
+static inline void sock_copy_flags(struct sock *nsk, const struct sock *osk)
+{
+ nsk->sk_flags = osk->sk_flags;
+}
+
+static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
+{
+ __set_bit(flag, &sk->sk_flags);
+}
+
+static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
+{
+ __clear_bit(flag, &sk->sk_flags);
+}
+
+static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit,
+ int valbool)
+{
+ if (valbool)
+ sock_set_flag(sk, bit);
+ else
+ sock_reset_flag(sk, bit);
+}
+
+static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
+{
+ return test_bit(flag, &sk->sk_flags);
+}
+
+#ifdef CONFIG_NET
+DECLARE_STATIC_KEY_FALSE(memalloc_socks_key);
+static inline int sk_memalloc_socks(void)
+{
+ return static_branch_unlikely(&memalloc_socks_key);
+}
+
+void __receive_sock(struct file *file);
+#else
+
+static inline int sk_memalloc_socks(void)
+{
+ return 0;
+}
+
+static inline void __receive_sock(struct file *file)
+{ }
+#endif
+
+static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
+{
+ return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
+}
+
+static inline void sk_acceptq_removed(struct sock *sk)
+{
+ WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1);
+}
+
+static inline void sk_acceptq_added(struct sock *sk)
+{
+ WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
+}
+
+/* Note: If you think the test should be:
+ * return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
+ * Then please take a look at commit 64a146513f8f ("[NET]: Revert incorrect accept queue backlog changes.")
+ */
+static inline bool sk_acceptq_is_full(const struct sock *sk)
+{
+ return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
+}
+
+/*
+ * Compute minimal free write space needed to queue new packets.
+ */
+static inline int sk_stream_min_wspace(const struct sock *sk)
+{
+ return READ_ONCE(sk->sk_wmem_queued) >> 1;
+}
+
+static inline int sk_stream_wspace(const struct sock *sk)
+{
+ return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
+}
+
+static inline void sk_wmem_queued_add(struct sock *sk, int val)
+{
+ WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
+}
+
+static inline void sk_forward_alloc_add(struct sock *sk, int val)
+{
+ /* Paired with lockless reads of sk->sk_forward_alloc */
+ WRITE_ONCE(sk->sk_forward_alloc, sk->sk_forward_alloc + val);
+}
+
+void sk_stream_write_space(struct sock *sk);
+
+/* OOB backlog add */
+static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+{
+ /* dont let skb dst not refcounted, we are going to leave rcu lock */
+ skb_dst_force(skb);
+
+ if (!sk->sk_backlog.tail)
+ WRITE_ONCE(sk->sk_backlog.head, skb);
+ else
+ sk->sk_backlog.tail->next = skb;
+
+ WRITE_ONCE(sk->sk_backlog.tail, skb);
+ skb->next = NULL;
+}
+
+/*
+ * Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
+ */
+static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
+{
+ unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
+
+ return qsize > limit;
+}
+
+/* The per-socket spinlock must be held here. */
+static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
+ unsigned int limit)
+{
+ if (sk_rcvqueues_full(sk, limit))
+ return -ENOBUFS;
+
+ /*
+ * If the skb was allocated from pfmemalloc reserves, only
+ * allow SOCK_MEMALLOC sockets to use it as this socket is
+ * helping free memory
+ */
+ if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
+ return -ENOMEM;
+
+ __sk_add_backlog(sk, skb);
+ sk->sk_backlog.len += skb->truesize;
+ return 0;
+}
+
+int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+
+INDIRECT_CALLABLE_DECLARE(int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb));
+
+static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ if (sk_memalloc_socks() && skb_pfmemalloc(skb))
+ return __sk_backlog_rcv(sk, skb);
+
+ return INDIRECT_CALL_INET(sk->sk_backlog_rcv,
+ tcp_v6_do_rcv,
+ tcp_v4_do_rcv,
+ sk, skb);
+}
+
+static inline void sk_incoming_cpu_update(struct sock *sk)
+{
+ int cpu = raw_smp_processor_id();
+
+ if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
+ WRITE_ONCE(sk->sk_incoming_cpu, cpu);
+}
+
+static inline void sock_rps_record_flow_hash(__u32 hash)
+{
+#ifdef CONFIG_RPS
+ struct rps_sock_flow_table *sock_flow_table;
+
+ rcu_read_lock();
+ sock_flow_table = rcu_dereference(rps_sock_flow_table);
+ rps_record_sock_flow(sock_flow_table, hash);
+ rcu_read_unlock();
+#endif
+}
+
+static inline void sock_rps_record_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ if (static_branch_unlikely(&rfs_needed)) {
+ /* Reading sk->sk_rxhash might incur an expensive cache line
+ * miss.
+ *
+ * TCP_ESTABLISHED does cover almost all states where RFS
+ * might be useful, and is cheaper [1] than testing :
+ * IPv4: inet_sk(sk)->inet_daddr
+ * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
+ * OR an additional socket flag
+ * [1] : sk_state and sk_prot are in the same cache line.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED) {
+ /* This READ_ONCE() is paired with the WRITE_ONCE()
+ * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
+ */
+ sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
+ }
+ }
+#endif
+}
+
+static inline void sock_rps_save_rxhash(struct sock *sk,
+ const struct sk_buff *skb)
+{
+#ifdef CONFIG_RPS
+ /* The following WRITE_ONCE() is paired with the READ_ONCE()
+ * here, and another one in sock_rps_record_flow().
+ */
+ if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
+ WRITE_ONCE(sk->sk_rxhash, skb->hash);
+#endif
+}
+
+static inline void sock_rps_reset_rxhash(struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ /* Paired with READ_ONCE() in sock_rps_record_flow() */
+ WRITE_ONCE(sk->sk_rxhash, 0);
+#endif
+}
+
+#define sk_wait_event(__sk, __timeo, __condition, __wait) \
+ ({ int __rc, __dis = __sk->sk_disconnects; \
+ release_sock(__sk); \
+ __rc = __condition; \
+ if (!__rc) { \
+ *(__timeo) = wait_woken(__wait, \
+ TASK_INTERRUPTIBLE, \
+ *(__timeo)); \
+ } \
+ sched_annotate_sleep(); \
+ lock_sock(__sk); \
+ __rc = __dis == __sk->sk_disconnects ? __condition : -EPIPE; \
+ __rc; \
+ })
+
+int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
+int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
+void sk_stream_wait_close(struct sock *sk, long timeo_p);
+int sk_stream_error(struct sock *sk, int flags, int err);
+void sk_stream_kill_queues(struct sock *sk);
+void sk_set_memalloc(struct sock *sk);
+void sk_clear_memalloc(struct sock *sk);
+
+void __sk_flush_backlog(struct sock *sk);
+
+static inline bool sk_flush_backlog(struct sock *sk)
+{
+ if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
+ __sk_flush_backlog(sk);
+ return true;
+ }
+ return false;
+}
+
+int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
+
+struct request_sock_ops;
+struct timewait_sock_ops;
+struct inet_hashinfo;
+struct raw_hashinfo;
+struct smc_hashinfo;
+struct module;
+struct sk_psock;
+
+/*
+ * caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes
+ * un-modified. Special care is taken when initializing object to zero.
+ */
+static inline void sk_prot_clear_nulls(struct sock *sk, int size)
+{
+ if (offsetof(struct sock, sk_node.next) != 0)
+ memset(sk, 0, offsetof(struct sock, sk_node.next));
+ memset(&sk->sk_node.pprev, 0,
+ size - offsetof(struct sock, sk_node.pprev));
+}
+
+/* Networking protocol blocks we attach to sockets.
+ * socket layer -> transport layer interface
+ */
+struct proto {
+ void (*close)(struct sock *sk,
+ long timeout);
+ int (*pre_connect)(struct sock *sk,
+ struct sockaddr *uaddr,
+ int addr_len);
+ int (*connect)(struct sock *sk,
+ struct sockaddr *uaddr,
+ int addr_len);
+ int (*disconnect)(struct sock *sk, int flags);
+
+ struct sock * (*accept)(struct sock *sk, int flags, int *err,
+ bool kern);
+
+ int (*ioctl)(struct sock *sk, int cmd,
+ unsigned long arg);
+ int (*init)(struct sock *sk);
+ void (*destroy)(struct sock *sk);
+ void (*shutdown)(struct sock *sk, int how);
+ int (*setsockopt)(struct sock *sk, int level,
+ int optname, sockptr_t optval,
+ unsigned int optlen);
+ int (*getsockopt)(struct sock *sk, int level,
+ int optname, char __user *optval,
+ int __user *option);
+ void (*keepalive)(struct sock *sk, int valbool);
+#ifdef CONFIG_COMPAT
+ int (*compat_ioctl)(struct sock *sk,
+ unsigned int cmd, unsigned long arg);
+#endif
+ int (*sendmsg)(struct sock *sk, struct msghdr *msg,
+ size_t len);
+ int (*recvmsg)(struct sock *sk, struct msghdr *msg,
+ size_t len, int flags, int *addr_len);
+ int (*sendpage)(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+ void (*splice_eof)(struct socket *sock);
+ int (*bind)(struct sock *sk,
+ struct sockaddr *addr, int addr_len);
+ int (*bind_add)(struct sock *sk,
+ struct sockaddr *addr, int addr_len);
+
+ int (*backlog_rcv) (struct sock *sk,
+ struct sk_buff *skb);
+ bool (*bpf_bypass_getsockopt)(int level,
+ int optname);
+
+ void (*release_cb)(struct sock *sk);
+
+ /* Keeping track of sk's, looking them up, and port selection methods. */
+ int (*hash)(struct sock *sk);
+ void (*unhash)(struct sock *sk);
+ void (*rehash)(struct sock *sk);
+ int (*get_port)(struct sock *sk, unsigned short snum);
+ void (*put_port)(struct sock *sk);
+#ifdef CONFIG_BPF_SYSCALL
+ int (*psock_update_sk_prot)(struct sock *sk,
+ struct sk_psock *psock,
+ bool restore);
+#endif
+
+ /* Keeping track of sockets in use */
+#ifdef CONFIG_PROC_FS
+ unsigned int inuse_idx;
+#endif
+
+#if IS_ENABLED(CONFIG_MPTCP)
+ int (*forward_alloc_get)(const struct sock *sk);
+#endif
+
+ bool (*stream_memory_free)(const struct sock *sk, int wake);
+ bool (*sock_is_readable)(struct sock *sk);
+ /* Memory pressure */
+ void (*enter_memory_pressure)(struct sock *sk);
+ void (*leave_memory_pressure)(struct sock *sk);
+ atomic_long_t *memory_allocated; /* Current allocated memory. */
+ int __percpu *per_cpu_fw_alloc;
+ struct percpu_counter *sockets_allocated; /* Current number of sockets. */
+
+ /*
+ * Pressure flag: try to collapse.
+ * Technical note: it is used by multiple contexts non atomically.
+ * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes.
+ * All the __sk_mem_schedule() is of this nature: accounting
+ * is strict, actions are advisory and have some latency.
+ */
+ unsigned long *memory_pressure;
+ long *sysctl_mem;
+
+ int *sysctl_wmem;
+ int *sysctl_rmem;
+ u32 sysctl_wmem_offset;
+ u32 sysctl_rmem_offset;
+
+ int max_header;
+ bool no_autobind;
+
+ struct kmem_cache *slab;
+ unsigned int obj_size;
+ slab_flags_t slab_flags;
+ unsigned int useroffset; /* Usercopy region offset */
+ unsigned int usersize; /* Usercopy region size */
+
+ unsigned int __percpu *orphan_count;
+
+ struct request_sock_ops *rsk_prot;
+ struct timewait_sock_ops *twsk_prot;
+
+ union {
+ struct inet_hashinfo *hashinfo;
+ struct udp_table *udp_table;
+ struct raw_hashinfo *raw_hash;
+ struct smc_hashinfo *smc_hash;
+ } h;
+
+ struct module *owner;
+
+ char name[32];
+
+ struct list_head node;
+#ifdef SOCK_REFCNT_DEBUG
+ atomic_t socks;
+#endif
+ int (*diag_destroy)(struct sock *sk, int err);
+} __randomize_layout;
+
+int proto_register(struct proto *prot, int alloc_slab);
+void proto_unregister(struct proto *prot);
+int sock_load_diag_module(int family, int protocol);
+
+#ifdef SOCK_REFCNT_DEBUG
+static inline void sk_refcnt_debug_inc(struct sock *sk)
+{
+ atomic_inc(&sk->sk_prot->socks);
+}
+
+static inline void sk_refcnt_debug_dec(struct sock *sk)
+{
+ atomic_dec(&sk->sk_prot->socks);
+ printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
+ sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
+}
+
+static inline void sk_refcnt_debug_release(const struct sock *sk)
+{
+ if (refcount_read(&sk->sk_refcnt) != 1)
+ printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
+ sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
+}
+#else /* SOCK_REFCNT_DEBUG */
+#define sk_refcnt_debug_inc(sk) do { } while (0)
+#define sk_refcnt_debug_dec(sk) do { } while (0)
+#define sk_refcnt_debug_release(sk) do { } while (0)
+#endif /* SOCK_REFCNT_DEBUG */
+
+INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
+
+static inline int sk_forward_alloc_get(const struct sock *sk)
+{
+#if IS_ENABLED(CONFIG_MPTCP)
+ if (sk->sk_prot->forward_alloc_get)
+ return sk->sk_prot->forward_alloc_get(sk);
+#endif
+ return READ_ONCE(sk->sk_forward_alloc);
+}
+
+static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
+{
+ if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
+ return false;
+
+ return sk->sk_prot->stream_memory_free ?
+ INDIRECT_CALL_INET_1(sk->sk_prot->stream_memory_free,
+ tcp_stream_memory_free, sk, wake) : true;
+}
+
+static inline bool sk_stream_memory_free(const struct sock *sk)
+{
+ return __sk_stream_memory_free(sk, 0);
+}
+
+static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
+{
+ return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
+ __sk_stream_memory_free(sk, wake);
+}
+
+static inline bool sk_stream_is_writeable(const struct sock *sk)
+{
+ return __sk_stream_is_writeable(sk, 0);
+}
+
+static inline int sk_under_cgroup_hierarchy(struct sock *sk,
+ struct cgroup *ancestor)
+{
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
+ ancestor);
+#else
+ return -ENOTSUPP;
+#endif
+}
+
+static inline bool sk_has_memory_pressure(const struct sock *sk)
+{
+ return sk->sk_prot->memory_pressure != NULL;
+}
+
+static inline bool sk_under_global_memory_pressure(const struct sock *sk)
+{
+ return sk->sk_prot->memory_pressure &&
+ !!READ_ONCE(*sk->sk_prot->memory_pressure);
+}
+
+static inline bool sk_under_memory_pressure(const struct sock *sk)
+{
+ if (!sk->sk_prot->memory_pressure)
+ return false;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
+ mem_cgroup_under_socket_pressure(sk->sk_memcg))
+ return true;
+
+ return !!READ_ONCE(*sk->sk_prot->memory_pressure);
+}
+
+static inline long
+proto_memory_allocated(const struct proto *prot)
+{
+ return max(0L, atomic_long_read(prot->memory_allocated));
+}
+
+static inline long
+sk_memory_allocated(const struct sock *sk)
+{
+ return proto_memory_allocated(sk->sk_prot);
+}
+
+/* 1 MB per cpu, in page units */
+#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
+
+static inline void
+sk_memory_allocated_add(struct sock *sk, int amt)
+{
+ int local_reserve;
+
+ preempt_disable();
+ local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
+ if (local_reserve >= SK_MEMORY_PCPU_RESERVE) {
+ __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
+ atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
+ }
+ preempt_enable();
+}
+
+static inline void
+sk_memory_allocated_sub(struct sock *sk, int amt)
+{
+ int local_reserve;
+
+ preempt_disable();
+ local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
+ if (local_reserve <= -SK_MEMORY_PCPU_RESERVE) {
+ __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
+ atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
+ }
+ preempt_enable();
+}
+
+#define SK_ALLOC_PERCPU_COUNTER_BATCH 16
+
+static inline void sk_sockets_allocated_dec(struct sock *sk)
+{
+ percpu_counter_add_batch(sk->sk_prot->sockets_allocated, -1,
+ SK_ALLOC_PERCPU_COUNTER_BATCH);
+}
+
+static inline void sk_sockets_allocated_inc(struct sock *sk)
+{
+ percpu_counter_add_batch(sk->sk_prot->sockets_allocated, 1,
+ SK_ALLOC_PERCPU_COUNTER_BATCH);
+}
+
+static inline u64
+sk_sockets_allocated_read_positive(struct sock *sk)
+{
+ return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
+}
+
+static inline int
+proto_sockets_allocated_sum_positive(struct proto *prot)
+{
+ return percpu_counter_sum_positive(prot->sockets_allocated);
+}
+
+static inline bool
+proto_memory_pressure(struct proto *prot)
+{
+ if (!prot->memory_pressure)
+ return false;
+ return !!READ_ONCE(*prot->memory_pressure);
+}
+
+
+#ifdef CONFIG_PROC_FS
+#define PROTO_INUSE_NR 64 /* should be enough for the first time */
+struct prot_inuse {
+ int all;
+ int val[PROTO_INUSE_NR];
+};
+
+static inline void sock_prot_inuse_add(const struct net *net,
+ const struct proto *prot, int val)
+{
+ this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
+}
+
+static inline void sock_inuse_add(const struct net *net, int val)
+{
+ this_cpu_add(net->core.prot_inuse->all, val);
+}
+
+int sock_prot_inuse_get(struct net *net, struct proto *proto);
+int sock_inuse_get(struct net *net);
+#else
+static inline void sock_prot_inuse_add(const struct net *net,
+ const struct proto *prot, int val)
+{
+}
+
+static inline void sock_inuse_add(const struct net *net, int val)
+{
+}
+#endif
+
+
+/* With per-bucket locks this operation is not-atomic, so that
+ * this version is not worse.
+ */
+static inline int __sk_prot_rehash(struct sock *sk)
+{
+ sk->sk_prot->unhash(sk);
+ return sk->sk_prot->hash(sk);
+}
+
+/* About 10 seconds */
+#define SOCK_DESTROY_TIME (10*HZ)
+
+/* Sockets 0-1023 can't be bound to unless you are superuser */
+#define PROT_SOCK 1024
+
+#define SHUTDOWN_MASK 3
+#define RCV_SHUTDOWN 1
+#define SEND_SHUTDOWN 2
+
+#define SOCK_BINDADDR_LOCK 4
+#define SOCK_BINDPORT_LOCK 8
+
+struct socket_alloc {
+ struct socket socket;
+ struct inode vfs_inode;
+};
+
+static inline struct socket *SOCKET_I(struct inode *inode)
+{
+ return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
+}
+
+static inline struct inode *SOCK_INODE(struct socket *socket)
+{
+ return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
+}
+
+/*
+ * Functions for memory accounting
+ */
+int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
+int __sk_mem_schedule(struct sock *sk, int size, int kind);
+void __sk_mem_reduce_allocated(struct sock *sk, int amount);
+void __sk_mem_reclaim(struct sock *sk, int amount);
+
+#define SK_MEM_SEND 0
+#define SK_MEM_RECV 1
+
+/* sysctl_mem values are in pages */
+static inline long sk_prot_mem_limits(const struct sock *sk, int index)
+{
+ return READ_ONCE(sk->sk_prot->sysctl_mem[index]);
+}
+
+static inline int sk_mem_pages(int amt)
+{
+ return (amt + PAGE_SIZE - 1) >> PAGE_SHIFT;
+}
+
+static inline bool sk_has_account(struct sock *sk)
+{
+ /* return true if protocol supports memory accounting */
+ return !!sk->sk_prot->memory_allocated;
+}
+
+static inline bool sk_wmem_schedule(struct sock *sk, int size)
+{
+ int delta;
+
+ if (!sk_has_account(sk))
+ return true;
+ delta = size - sk->sk_forward_alloc;
+ return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_SEND);
+}
+
+static inline bool
+sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
+{
+ int delta;
+
+ if (!sk_has_account(sk))
+ return true;
+ delta = size - sk->sk_forward_alloc;
+ return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) ||
+ skb_pfmemalloc(skb);
+}
+
+static inline int sk_unused_reserved_mem(const struct sock *sk)
+{
+ int unused_mem;
+
+ if (likely(!sk->sk_reserved_mem))
+ return 0;
+
+ unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued -
+ atomic_read(&sk->sk_rmem_alloc);
+
+ return unused_mem > 0 ? unused_mem : 0;
+}
+
+static inline void sk_mem_reclaim(struct sock *sk)
+{
+ int reclaimable;
+
+ if (!sk_has_account(sk))
+ return;
+
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+ if (reclaimable >= (int)PAGE_SIZE)
+ __sk_mem_reclaim(sk, reclaimable);
+}
+
+static inline void sk_mem_reclaim_final(struct sock *sk)
+{
+ sk->sk_reserved_mem = 0;
+ sk_mem_reclaim(sk);
+}
+
+static inline void sk_mem_charge(struct sock *sk, int size)
+{
+ if (!sk_has_account(sk))
+ return;
+ sk_forward_alloc_add(sk, -size);
+}
+
+static inline void sk_mem_uncharge(struct sock *sk, int size)
+{
+ if (!sk_has_account(sk))
+ return;
+ sk_forward_alloc_add(sk, size);
+ sk_mem_reclaim(sk);
+}
+
+/*
+ * Macro so as to not evaluate some arguments when
+ * lockdep is not enabled.
+ *
+ * Mark both the sk_lock and the sk_lock.slock as a
+ * per-address-family lock class.
+ */
+#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
+do { \
+ sk->sk_lock.owned = 0; \
+ init_waitqueue_head(&sk->sk_lock.wq); \
+ spin_lock_init(&(sk)->sk_lock.slock); \
+ debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
+ sizeof((sk)->sk_lock)); \
+ lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
+ (skey), (sname)); \
+ lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
+} while (0)
+
+static inline bool lockdep_sock_is_held(const struct sock *sk)
+{
+ return lockdep_is_held(&sk->sk_lock) ||
+ lockdep_is_held(&sk->sk_lock.slock);
+}
+
+void lock_sock_nested(struct sock *sk, int subclass);
+
+static inline void lock_sock(struct sock *sk)
+{
+ lock_sock_nested(sk, 0);
+}
+
+void __lock_sock(struct sock *sk);
+void __release_sock(struct sock *sk);
+void release_sock(struct sock *sk);
+
+/* BH context may only use the following locking interface. */
+#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
+#define bh_lock_sock_nested(__sk) \
+ spin_lock_nested(&((__sk)->sk_lock.slock), \
+ SINGLE_DEPTH_NESTING)
+#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
+
+bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
+
+/**
+ * lock_sock_fast - fast version of lock_sock
+ * @sk: socket
+ *
+ * This version should be used for very small section, where process wont block
+ * return false if fast path is taken:
+ *
+ * sk_lock.slock locked, owned = 0, BH disabled
+ *
+ * return true if slow path is taken:
+ *
+ * sk_lock.slock unlocked, owned = 1, BH enabled
+ */
+static inline bool lock_sock_fast(struct sock *sk)
+{
+ /* The sk_lock has mutex_lock() semantics here. */
+ mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+
+ return __lock_sock_fast(sk);
+}
+
+/* fast socket lock variant for caller already holding a [different] socket lock */
+static inline bool lock_sock_fast_nested(struct sock *sk)
+{
+ mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
+
+ return __lock_sock_fast(sk);
+}
+
+/**
+ * unlock_sock_fast - complement of lock_sock_fast
+ * @sk: socket
+ * @slow: slow mode
+ *
+ * fast unlock socket for user context.
+ * If slow mode is on, we call regular release_sock()
+ */
+static inline void unlock_sock_fast(struct sock *sk, bool slow)
+ __releases(&sk->sk_lock.slock)
+{
+ if (slow) {
+ release_sock(sk);
+ __release(&sk->sk_lock.slock);
+ } else {
+ mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
+ spin_unlock_bh(&sk->sk_lock.slock);
+ }
+}
+
+void sockopt_lock_sock(struct sock *sk);
+void sockopt_release_sock(struct sock *sk);
+bool sockopt_ns_capable(struct user_namespace *ns, int cap);
+bool sockopt_capable(int cap);
+
+/* Used by processes to "lock" a socket state, so that
+ * interrupts and bottom half handlers won't change it
+ * from under us. It essentially blocks any incoming
+ * packets, so that we won't get any new data or any
+ * packets that change the state of the socket.
+ *
+ * While locked, BH processing will add new packets to
+ * the backlog queue. This queue is processed by the
+ * owner of the socket lock right before it is released.
+ *
+ * Since ~2.3.5 it is also exclusive sleep lock serializing
+ * accesses from user process context.
+ */
+
+static inline void sock_owned_by_me(const struct sock *sk)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
+#endif
+}
+
+static inline bool sock_owned_by_user(const struct sock *sk)
+{
+ sock_owned_by_me(sk);
+ return sk->sk_lock.owned;
+}
+
+static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
+{
+ return sk->sk_lock.owned;
+}
+
+static inline void sock_release_ownership(struct sock *sk)
+{
+ if (sock_owned_by_user_nocheck(sk)) {
+ sk->sk_lock.owned = 0;
+
+ /* The sk_lock has mutex_unlock() semantics: */
+ mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
+ }
+}
+
+/* no reclassification while locks are held */
+static inline bool sock_allow_reclassification(const struct sock *csk)
+{
+ struct sock *sk = (struct sock *)csk;
+
+ return !sock_owned_by_user_nocheck(sk) &&
+ !spin_is_locked(&sk->sk_lock.slock);
+}
+
+struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
+ struct proto *prot, int kern);
+void sk_free(struct sock *sk);
+void sk_destruct(struct sock *sk);
+struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
+void sk_free_unlock_clone(struct sock *sk);
+
+struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
+ gfp_t priority);
+void __sock_wfree(struct sk_buff *skb);
+void sock_wfree(struct sk_buff *skb);
+struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
+ gfp_t priority);
+void skb_orphan_partial(struct sk_buff *skb);
+void sock_rfree(struct sk_buff *skb);
+void sock_efree(struct sk_buff *skb);
+#ifdef CONFIG_INET
+void sock_edemux(struct sk_buff *skb);
+void sock_pfree(struct sk_buff *skb);
+#else
+#define sock_edemux sock_efree
+#endif
+
+int sk_setsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
+int sock_setsockopt(struct socket *sock, int level, int op,
+ sockptr_t optval, unsigned int optlen);
+
+int sk_getsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
+int sock_getsockopt(struct socket *sock, int level, int op,
+ char __user *optval, int __user *optlen);
+int sock_gettstamp(struct socket *sock, void __user *userstamp,
+ bool timeval, bool time32);
+struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+ unsigned long data_len, int noblock,
+ int *errcode, int max_page_order);
+
+static inline struct sk_buff *sock_alloc_send_skb(struct sock *sk,
+ unsigned long size,
+ int noblock, int *errcode)
+{
+ return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
+}
+
+void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
+void sock_kfree_s(struct sock *sk, void *mem, int size);
+void sock_kzfree_s(struct sock *sk, void *mem, int size);
+void sk_send_sigurg(struct sock *sk);
+
+static inline void sock_replace_proto(struct sock *sk, struct proto *proto)
+{
+ if (sk->sk_socket)
+ clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
+ WRITE_ONCE(sk->sk_prot, proto);
+}
+
+struct sockcm_cookie {
+ u64 transmit_time;
+ u32 mark;
+ u16 tsflags;
+};
+
+static inline void sockcm_init(struct sockcm_cookie *sockc,
+ const struct sock *sk)
+{
+ *sockc = (struct sockcm_cookie) {
+ .tsflags = READ_ONCE(sk->sk_tsflags)
+ };
+}
+
+int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+ struct sockcm_cookie *sockc);
+int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
+ struct sockcm_cookie *sockc);
+
+/*
+ * Functions to fill in entries in struct proto_ops when a protocol
+ * does not implement a particular function.
+ */
+int sock_no_bind(struct socket *, struct sockaddr *, int);
+int sock_no_connect(struct socket *, struct sockaddr *, int, int);
+int sock_no_socketpair(struct socket *, struct socket *);
+int sock_no_accept(struct socket *, struct socket *, int, bool);
+int sock_no_getname(struct socket *, struct sockaddr *, int);
+int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
+int sock_no_listen(struct socket *, int);
+int sock_no_shutdown(struct socket *, int);
+int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
+int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
+int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
+int sock_no_mmap(struct file *file, struct socket *sock,
+ struct vm_area_struct *vma);
+ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
+ size_t size, int flags);
+ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+
+/*
+ * Functions to fill in entries in struct proto_ops when a protocol
+ * uses the inet style.
+ */
+int sock_common_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen);
+int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
+int sock_common_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
+
+void sk_common_release(struct sock *sk);
+
+/*
+ * Default socket callbacks and setup code
+ */
+
+/* Initialise core socket variables using an explicit uid. */
+void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid);
+
+/* Initialise core socket variables.
+ * Assumes struct socket *sock is embedded in a struct socket_alloc.
+ */
+void sock_init_data(struct socket *sock, struct sock *sk);
+
+/*
+ * Socket reference counting postulates.
+ *
+ * * Each user of socket SHOULD hold a reference count.
+ * * Each access point to socket (an hash table bucket, reference from a list,
+ * running timer, skb in flight MUST hold a reference count.
+ * * When reference count hits 0, it means it will never increase back.
+ * * When reference count hits 0, it means that no references from
+ * outside exist to this socket and current process on current CPU
+ * is last user and may/should destroy this socket.
+ * * sk_free is called from any context: process, BH, IRQ. When
+ * it is called, socket has no references from outside -> sk_free
+ * may release descendant resources allocated by the socket, but
+ * to the time when it is called, socket is NOT referenced by any
+ * hash tables, lists etc.
+ * * Packets, delivered from outside (from network or from another process)
+ * and enqueued on receive/error queues SHOULD NOT grab reference count,
+ * when they sit in queue. Otherwise, packets will leak to hole, when
+ * socket is looked up by one cpu and unhasing is made by another CPU.
+ * It is true for udp/raw, netlink (leak to receive and error queues), tcp
+ * (leak to backlog). Packet socket does all the processing inside
+ * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
+ * use separate SMP lock, so that they are prone too.
+ */
+
+/* Ungrab socket and destroy it, if it was the last reference. */
+static inline void sock_put(struct sock *sk)
+{
+ if (refcount_dec_and_test(&sk->sk_refcnt))
+ sk_free(sk);
+}
+/* Generic version of sock_put(), dealing with all sockets
+ * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...)
+ */
+void sock_gen_put(struct sock *sk);
+
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
+ unsigned int trim_cap, bool refcounted);
+static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+ const int nested)
+{
+ return __sk_receive_skb(sk, skb, nested, 1, true);
+}
+
+static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
+{
+ /* sk_tx_queue_mapping accept only upto a 16-bit value */
+ if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
+ return;
+ /* Paired with READ_ONCE() in sk_tx_queue_get() and
+ * other WRITE_ONCE() because socket lock might be not held.
+ */
+ WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
+}
+
+#define NO_QUEUE_MAPPING USHRT_MAX
+
+static inline void sk_tx_queue_clear(struct sock *sk)
+{
+ /* Paired with READ_ONCE() in sk_tx_queue_get() and
+ * other WRITE_ONCE() because socket lock might be not held.
+ */
+ WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
+}
+
+static inline int sk_tx_queue_get(const struct sock *sk)
+{
+ if (sk) {
+ /* Paired with WRITE_ONCE() in sk_tx_queue_clear()
+ * and sk_tx_queue_set().
+ */
+ int val = READ_ONCE(sk->sk_tx_queue_mapping);
+
+ if (val != NO_QUEUE_MAPPING)
+ return val;
+ }
+ return -1;
+}
+
+static inline void __sk_rx_queue_set(struct sock *sk,
+ const struct sk_buff *skb,
+ bool force_set)
+{
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
+ if (skb_rx_queue_recorded(skb)) {
+ u16 rx_queue = skb_get_rx_queue(skb);
+
+ if (force_set ||
+ unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
+ WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
+ }
+#endif
+}
+
+static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+{
+ __sk_rx_queue_set(sk, skb, true);
+}
+
+static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb)
+{
+ __sk_rx_queue_set(sk, skb, false);
+}
+
+static inline void sk_rx_queue_clear(struct sock *sk)
+{
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
+ WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING);
+#endif
+}
+
+static inline int sk_rx_queue_get(const struct sock *sk)
+{
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
+ if (sk) {
+ int res = READ_ONCE(sk->sk_rx_queue_mapping);
+
+ if (res != NO_QUEUE_MAPPING)
+ return res;
+ }
+#endif
+
+ return -1;
+}
+
+static inline void sk_set_socket(struct sock *sk, struct socket *sock)
+{
+ sk->sk_socket = sock;
+}
+
+static inline wait_queue_head_t *sk_sleep(struct sock *sk)
+{
+ BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
+ return &rcu_dereference_raw(sk->sk_wq)->wait;
+}
+/* Detach socket from process context.
+ * Announce socket dead, detach it from wait queue and inode.
+ * Note that parent inode held reference count on this struct sock,
+ * we do not release it in this function, because protocol
+ * probably wants some additional cleanups or even continuing
+ * to work with this socket (TCP).
+ */
+static inline void sock_orphan(struct sock *sk)
+{
+ write_lock_bh(&sk->sk_callback_lock);
+ sock_set_flag(sk, SOCK_DEAD);
+ sk_set_socket(sk, NULL);
+ sk->sk_wq = NULL;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static inline void sock_graft(struct sock *sk, struct socket *parent)
+{
+ WARN_ON(parent->sk);
+ write_lock_bh(&sk->sk_callback_lock);
+ rcu_assign_pointer(sk->sk_wq, &parent->wq);
+ parent->sk = sk;
+ sk_set_socket(sk, parent);
+ sk->sk_uid = SOCK_INODE(parent)->i_uid;
+ security_sock_graft(sk, parent);
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+kuid_t sock_i_uid(struct sock *sk);
+unsigned long __sock_i_ino(struct sock *sk);
+unsigned long sock_i_ino(struct sock *sk);
+
+static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
+{
+ return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
+}
+
+static inline u32 net_tx_rndhash(void)
+{
+ u32 v = get_random_u32();
+
+ return v ?: 1;
+}
+
+static inline void sk_set_txhash(struct sock *sk)
+{
+ /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
+ WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
+}
+
+static inline bool sk_rethink_txhash(struct sock *sk)
+{
+ if (sk->sk_txhash && sk->sk_txrehash == SOCK_TXREHASH_ENABLED) {
+ sk_set_txhash(sk);
+ return true;
+ }
+ return false;
+}
+
+static inline struct dst_entry *
+__sk_dst_get(struct sock *sk)
+{
+ return rcu_dereference_check(sk->sk_dst_cache,
+ lockdep_sock_is_held(sk));
+}
+
+static inline struct dst_entry *
+sk_dst_get(struct sock *sk)
+{
+ struct dst_entry *dst;
+
+ rcu_read_lock();
+ dst = rcu_dereference(sk->sk_dst_cache);
+ if (dst && !atomic_inc_not_zero(&dst->__refcnt))
+ dst = NULL;
+ rcu_read_unlock();
+ return dst;
+}
+
+static inline void __dst_negative_advice(struct sock *sk)
+{
+ struct dst_entry *ndst, *dst = __sk_dst_get(sk);
+
+ if (dst && dst->ops->negative_advice) {
+ ndst = dst->ops->negative_advice(dst);
+
+ if (ndst != dst) {
+ rcu_assign_pointer(sk->sk_dst_cache, ndst);
+ sk_tx_queue_clear(sk);
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ }
+ }
+}
+
+static inline void dst_negative_advice(struct sock *sk)
+{
+ sk_rethink_txhash(sk);
+ __dst_negative_advice(sk);
+}
+
+static inline void
+__sk_dst_set(struct sock *sk, struct dst_entry *dst)
+{
+ struct dst_entry *old_dst;
+
+ sk_tx_queue_clear(sk);
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ old_dst = rcu_dereference_protected(sk->sk_dst_cache,
+ lockdep_sock_is_held(sk));
+ rcu_assign_pointer(sk->sk_dst_cache, dst);
+ dst_release(old_dst);
+}
+
+static inline void
+sk_dst_set(struct sock *sk, struct dst_entry *dst)
+{
+ struct dst_entry *old_dst;
+
+ sk_tx_queue_clear(sk);
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+ dst_release(old_dst);
+}
+
+static inline void
+__sk_dst_reset(struct sock *sk)
+{
+ __sk_dst_set(sk, NULL);
+}
+
+static inline void
+sk_dst_reset(struct sock *sk)
+{
+ sk_dst_set(sk, NULL);
+}
+
+struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
+
+struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
+
+static inline void sk_dst_confirm(struct sock *sk)
+{
+ if (!READ_ONCE(sk->sk_dst_pending_confirm))
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
+}
+
+static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
+{
+ if (skb_get_dst_pending_confirm(skb)) {
+ struct sock *sk = skb->sk;
+
+ if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ neigh_confirm(n);
+ }
+}
+
+bool sk_mc_loop(struct sock *sk);
+
+static inline bool sk_can_gso(const struct sock *sk)
+{
+ return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
+}
+
+void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
+
+static inline void sk_gso_disable(struct sock *sk)
+{
+ sk->sk_gso_disabled = 1;
+ sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
+}
+
+static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
+ struct iov_iter *from, char *to,
+ int copy, int offset)
+{
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ __wsum csum = 0;
+ if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
+ return -EFAULT;
+ skb->csum = csum_block_add(skb->csum, csum, offset);
+ } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
+ if (!copy_from_iter_full_nocache(to, copy, from))
+ return -EFAULT;
+ } else if (!copy_from_iter_full(to, copy, from))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
+ struct iov_iter *from, int copy)
+{
+ int err, offset = skb->len;
+
+ err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
+ copy, offset);
+ if (err)
+ __skb_trim(skb, offset);
+
+ return err;
+}
+
+static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
+ struct sk_buff *skb,
+ struct page *page,
+ int off, int copy)
+{
+ int err;
+
+ err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
+ copy, skb->len);
+ if (err)
+ return err;
+
+ skb_len_add(skb, copy);
+ sk_wmem_queued_add(sk, copy);
+ sk_mem_charge(sk, copy);
+ return 0;
+}
+
+/**
+ * sk_wmem_alloc_get - returns write allocations
+ * @sk: socket
+ *
+ * Return: sk_wmem_alloc minus initial offset of one
+ */
+static inline int sk_wmem_alloc_get(const struct sock *sk)
+{
+ return refcount_read(&sk->sk_wmem_alloc) - 1;
+}
+
+/**
+ * sk_rmem_alloc_get - returns read allocations
+ * @sk: socket
+ *
+ * Return: sk_rmem_alloc
+ */
+static inline int sk_rmem_alloc_get(const struct sock *sk)
+{
+ return atomic_read(&sk->sk_rmem_alloc);
+}
+
+/**
+ * sk_has_allocations - check if allocations are outstanding
+ * @sk: socket
+ *
+ * Return: true if socket has write or read allocations
+ */
+static inline bool sk_has_allocations(const struct sock *sk)
+{
+ return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
+}
+
+/**
+ * skwq_has_sleeper - check if there are any waiting processes
+ * @wq: struct socket_wq
+ *
+ * Return: true if socket_wq has waiting processes
+ *
+ * The purpose of the skwq_has_sleeper and sock_poll_wait is to wrap the memory
+ * barrier call. They were added due to the race found within the tcp code.
+ *
+ * Consider following tcp code paths::
+ *
+ * CPU1 CPU2
+ * sys_select receive packet
+ * ... ...
+ * __add_wait_queue update tp->rcv_nxt
+ * ... ...
+ * tp->rcv_nxt check sock_def_readable
+ * ... {
+ * schedule rcu_read_lock();
+ * wq = rcu_dereference(sk->sk_wq);
+ * if (wq && waitqueue_active(&wq->wait))
+ * wake_up_interruptible(&wq->wait)
+ * ...
+ * }
+ *
+ * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
+ * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1
+ * could then endup calling schedule and sleep forever if there are no more
+ * data on the socket.
+ *
+ */
+static inline bool skwq_has_sleeper(struct socket_wq *wq)
+{
+ return wq && wq_has_sleeper(&wq->wait);
+}
+
+/**
+ * sock_poll_wait - place memory barrier behind the poll_wait call.
+ * @filp: file
+ * @sock: socket to wait on
+ * @p: poll_table
+ *
+ * See the comments in the wq_has_sleeper function.
+ */
+static inline void sock_poll_wait(struct file *filp, struct socket *sock,
+ poll_table *p)
+{
+ if (!poll_does_not_wait(p)) {
+ poll_wait(filp, &sock->wq.wait, p);
+ /* We need to be sure we are in sync with the
+ * socket flags modification.
+ *
+ * This memory barrier is paired in the wq_has_sleeper.
+ */
+ smp_mb();
+ }
+}
+
+static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
+{
+ /* This pairs with WRITE_ONCE() in sk_set_txhash() */
+ u32 txhash = READ_ONCE(sk->sk_txhash);
+
+ if (txhash) {
+ skb->l4_hash = 1;
+ skb->hash = txhash;
+ }
+}
+
+void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
+
+/*
+ * Queue a received datagram if it will fit. Stream and sequenced
+ * protocols can't normally use this as they need to fit buffers in
+ * and play with them.
+ *
+ * Inlined as it's very short and called for pretty much every
+ * packet ever received.
+ */
+static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
+{
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sock_rfree;
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+ sk_mem_charge(sk, skb->truesize);
+}
+
+static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
+{
+ if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
+ skb_orphan(skb);
+ skb->destructor = sock_efree;
+ skb->sk = sk;
+ return true;
+ }
+ return false;
+}
+
+static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk)
+{
+ skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
+ if (skb) {
+ if (sk_rmem_schedule(sk, skb, skb->truesize)) {
+ skb_set_owner_r(skb, sk);
+ return skb;
+ }
+ __kfree_skb(skb);
+ }
+ return NULL;
+}
+
+static inline void skb_prepare_for_gro(struct sk_buff *skb)
+{
+ if (skb->destructor != sock_wfree) {
+ skb_orphan(skb);
+ return;
+ }
+ skb->slow_gro = 1;
+}
+
+void sk_reset_timer(struct sock *sk, struct timer_list *timer,
+ unsigned long expires);
+
+void sk_stop_timer(struct sock *sk, struct timer_list *timer);
+
+void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
+
+int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
+ struct sk_buff *skb, unsigned int flags,
+ void (*destructor)(struct sock *sk,
+ struct sk_buff *skb));
+int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+
+int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *reason);
+
+static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ return sock_queue_rcv_skb_reason(sk, skb, NULL);
+}
+
+int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
+struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
+
+/*
+ * Recover an error report and clear atomically
+ */
+
+static inline int sock_error(struct sock *sk)
+{
+ int err;
+
+ /* Avoid an atomic operation for the common case.
+ * This is racy since another cpu/thread can change sk_err under us.
+ */
+ if (likely(data_race(!sk->sk_err)))
+ return 0;
+
+ err = xchg(&sk->sk_err, 0);
+ return -err;
+}
+
+void sk_error_report(struct sock *sk);
+
+static inline unsigned long sock_wspace(struct sock *sk)
+{
+ int amt = 0;
+
+ if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
+ amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
+ if (amt < 0)
+ amt = 0;
+ }
+ return amt;
+}
+
+/* Note:
+ * We use sk->sk_wq_raw, from contexts knowing this
+ * pointer is not NULL and cannot disappear/change.
+ */
+static inline void sk_set_bit(int nr, struct sock *sk)
+{
+ if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
+ !sock_flag(sk, SOCK_FASYNC))
+ return;
+
+ set_bit(nr, &sk->sk_wq_raw->flags);
+}
+
+static inline void sk_clear_bit(int nr, struct sock *sk)
+{
+ if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
+ !sock_flag(sk, SOCK_FASYNC))
+ return;
+
+ clear_bit(nr, &sk->sk_wq_raw->flags);
+}
+
+static inline void sk_wake_async(const struct sock *sk, int how, int band)
+{
+ if (sock_flag(sk, SOCK_FASYNC)) {
+ rcu_read_lock();
+ sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
+ rcu_read_unlock();
+ }
+}
+
+/* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might
+ * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak.
+ * Note: for send buffers, TCP works better if we can build two skbs at
+ * minimum.
+ */
+#define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff)))
+
+#define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2)
+#define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE
+
+static inline void sk_stream_moderate_sndbuf(struct sock *sk)
+{
+ u32 val;
+
+ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
+ return;
+
+ val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+ val = max_t(u32, val, sk_unused_reserved_mem(sk));
+
+ WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
+}
+
+/**
+ * sk_page_frag - return an appropriate page_frag
+ * @sk: socket
+ *
+ * Use the per task page_frag instead of the per socket one for
+ * optimization when we know that we're in process context and own
+ * everything that's associated with %current.
+ *
+ * Both direct reclaim and page faults can nest inside other
+ * socket operations and end up recursing into sk_page_frag()
+ * while it's already in use: explicitly avoid task page_frag
+ * usage if the caller is potentially doing any of them.
+ * This assumes that page fault handlers use the GFP_NOFS flags.
+ *
+ * Return: a per task page_frag if context allows that,
+ * otherwise a per socket one.
+ */
+static inline struct page_frag *sk_page_frag(struct sock *sk)
+{
+ if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) ==
+ (__GFP_DIRECT_RECLAIM | __GFP_FS))
+ return &current->task_frag;
+
+ return &sk->sk_frag;
+}
+
+bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
+
+/*
+ * Default write policy as shown to user space via poll/select/SIGIO
+ */
+static inline bool sock_writeable(const struct sock *sk)
+{
+ return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
+}
+
+static inline gfp_t gfp_any(void)
+{
+ return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
+}
+
+static inline gfp_t gfp_memcg_charge(void)
+{
+ return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
+}
+
+static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
+{
+ return noblock ? 0 : sk->sk_rcvtimeo;
+}
+
+static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
+{
+ return noblock ? 0 : sk->sk_sndtimeo;
+}
+
+static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
+{
+ int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
+
+ return v ?: 1;
+}
+
+/* Alas, with timeout socket operations are not restartable.
+ * Compare this to poll().
+ */
+static inline int sock_intr_errno(long timeo)
+{
+ return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
+}
+
+struct sock_skb_cb {
+ u32 dropcount;
+};
+
+/* Store sock_skb_cb at the end of skb->cb[] so protocol families
+ * using skb->cb[] would keep using it directly and utilize its
+ * alignement guarantee.
+ */
+#define SOCK_SKB_CB_OFFSET ((sizeof_field(struct sk_buff, cb) - \
+ sizeof(struct sock_skb_cb)))
+
+#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
+ SOCK_SKB_CB_OFFSET))
+
+#define sock_skb_cb_check_size(size) \
+ BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
+
+static inline void
+sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
+{
+ SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
+ atomic_read(&sk->sk_drops) : 0;
+}
+
+static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
+{
+ int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
+
+ atomic_add(segs, &sk->sk_drops);
+}
+
+static inline ktime_t sock_read_timestamp(struct sock *sk)
+{
+#if BITS_PER_LONG==32
+ unsigned int seq;
+ ktime_t kt;
+
+ do {
+ seq = read_seqbegin(&sk->sk_stamp_seq);
+ kt = sk->sk_stamp;
+ } while (read_seqretry(&sk->sk_stamp_seq, seq));
+
+ return kt;
+#else
+ return READ_ONCE(sk->sk_stamp);
+#endif
+}
+
+static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
+{
+#if BITS_PER_LONG==32
+ write_seqlock(&sk->sk_stamp_seq);
+ sk->sk_stamp = kt;
+ write_sequnlock(&sk->sk_stamp_seq);
+#else
+ WRITE_ONCE(sk->sk_stamp, kt);
+#endif
+}
+
+void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
+void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
+
+static inline void
+sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ u32 tsflags = READ_ONCE(sk->sk_tsflags);
+ ktime_t kt = skb->tstamp;
+ /*
+ * generate control messages if
+ * - receive time stamping in software requested
+ * - software time stamp available and wanted
+ * - hardware time stamps available and wanted
+ */
+ if (sock_flag(sk, SOCK_RCVTSTAMP) ||
+ (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
+ (kt && tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
+ (hwtstamps->hwtstamp &&
+ (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
+ __sock_recv_timestamp(msg, sk, skb);
+ else
+ sock_write_timestamp(sk, kt);
+
+ if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
+ __sock_recv_wifi_status(msg, sk, skb);
+}
+
+void __sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
+
+#define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
+static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb)
+{
+#define FLAGS_RECV_CMSGS ((1UL << SOCK_RXQ_OVFL) | \
+ (1UL << SOCK_RCVTSTAMP) | \
+ (1UL << SOCK_RCVMARK))
+#define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
+ SOF_TIMESTAMPING_RAW_HARDWARE)
+
+ if (sk->sk_flags & FLAGS_RECV_CMSGS ||
+ READ_ONCE(sk->sk_tsflags) & TSFLAGS_ANY)
+ __sock_recv_cmsgs(msg, sk, skb);
+ else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
+ sock_write_timestamp(sk, skb->tstamp);
+ else if (unlikely(sock_read_timestamp(sk) == SK_DEFAULT_STAMP))
+ sock_write_timestamp(sk, 0);
+}
+
+void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
+
+/**
+ * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
+ * @sk: socket sending this packet
+ * @tsflags: timestamping flags to use
+ * @tx_flags: completed with instructions for time stamping
+ * @tskey: filled in with next sk_tskey (not for TCP, which uses seqno)
+ *
+ * Note: callers should take care of initial ``*tx_flags`` value (usually 0)
+ */
+static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+ __u8 *tx_flags, __u32 *tskey)
+{
+ if (unlikely(tsflags)) {
+ __sock_tx_timestamp(tsflags, tx_flags);
+ if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
+ tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
+ *tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+ }
+ if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
+ *tx_flags |= SKBTX_WIFI_STATUS;
+}
+
+static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+ __u8 *tx_flags)
+{
+ _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
+}
+
+static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
+{
+ _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
+ &skb_shinfo(skb)->tskey);
+}
+
+static inline bool sk_is_inet(const struct sock *sk)
+{
+ int family = READ_ONCE(sk->sk_family);
+
+ return family == AF_INET || family == AF_INET6;
+}
+
+static inline bool sk_is_tcp(const struct sock *sk)
+{
+ return sk_is_inet(sk) &&
+ sk->sk_type == SOCK_STREAM &&
+ sk->sk_protocol == IPPROTO_TCP;
+}
+
+static inline bool sk_is_udp(const struct sock *sk)
+{
+ return sk_is_inet(sk) &&
+ sk->sk_type == SOCK_DGRAM &&
+ sk->sk_protocol == IPPROTO_UDP;
+}
+
+static inline bool sk_is_stream_unix(const struct sock *sk)
+{
+ return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM;
+}
+
+/**
+ * sk_eat_skb - Release a skb if it is no longer needed
+ * @sk: socket to eat this skb from
+ * @skb: socket buffer to eat
+ *
+ * This routine must be called with interrupts disabled or with the socket
+ * locked so that the sk_buff queue operation is ok.
+*/
+static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
+{
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ __kfree_skb(skb);
+}
+
+static inline bool
+skb_sk_is_prefetched(struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ return skb->destructor == sock_pfree;
+#else
+ return false;
+#endif /* CONFIG_INET */
+}
+
+/* This helper checks if a socket is a full socket,
+ * ie _not_ a timewait or request socket.
+ */
+static inline bool sk_fullsock(const struct sock *sk)
+{
+ return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
+}
+
+static inline bool
+sk_is_refcounted(struct sock *sk)
+{
+ /* Only full sockets have sk->sk_flags. */
+ return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
+}
+
+/**
+ * skb_steal_sock - steal a socket from an sk_buff
+ * @skb: sk_buff to steal the socket from
+ * @refcounted: is set to true if the socket is reference-counted
+ */
+static inline struct sock *
+skb_steal_sock(struct sk_buff *skb, bool *refcounted)
+{
+ if (skb->sk) {
+ struct sock *sk = skb->sk;
+
+ *refcounted = true;
+ if (skb_sk_is_prefetched(skb))
+ *refcounted = sk_is_refcounted(sk);
+ skb->destructor = NULL;
+ skb->sk = NULL;
+ return sk;
+ }
+ *refcounted = false;
+ return NULL;
+}
+
+/* Checks if this SKB belongs to an HW offloaded socket
+ * and whether any SW fallbacks are required based on dev.
+ * Check decrypted mark in case skb_orphan() cleared socket.
+ */
+static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
+ struct net_device *dev)
+{
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ struct sock *sk = skb->sk;
+
+ if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
+ skb = sk->sk_validate_xmit_skb(sk, dev, skb);
+#ifdef CONFIG_TLS_DEVICE
+ } else if (unlikely(skb->decrypted)) {
+ pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
+ kfree_skb(skb);
+ skb = NULL;
+#endif
+ }
+#endif
+
+ return skb;
+}
+
+/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV
+ * SYNACK messages can be attached to either ones (depending on SYNCOOKIE)
+ */
+static inline bool sk_listener(const struct sock *sk)
+{
+ return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
+}
+
+void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
+int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
+ int type);
+
+bool sk_ns_capable(const struct sock *sk,
+ struct user_namespace *user_ns, int cap);
+bool sk_capable(const struct sock *sk, int cap);
+bool sk_net_capable(const struct sock *sk, int cap);
+
+void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
+
+/* Take into consideration the size of the struct sk_buff overhead in the
+ * determination of these values, since that is non-constant across
+ * platforms. This makes socket queueing behavior and performance
+ * not depend upon such differences.
+ */
+#define _SK_MEM_PACKETS 256
+#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
+#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
+#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
+
+extern __u32 sysctl_wmem_max;
+extern __u32 sysctl_rmem_max;
+
+extern int sysctl_tstamp_allow_data;
+extern int sysctl_optmem_max;
+
+extern __u32 sysctl_wmem_default;
+extern __u32 sysctl_rmem_default;
+
+#define SKB_FRAG_PAGE_ORDER get_order(32768)
+DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
+
+static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
+{
+ /* Does this proto have per netns sysctl_wmem ? */
+ if (proto->sysctl_wmem_offset)
+ return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset));
+
+ return READ_ONCE(*proto->sysctl_wmem);
+}
+
+static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
+{
+ /* Does this proto have per netns sysctl_rmem ? */
+ if (proto->sysctl_rmem_offset)
+ return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset));
+
+ return READ_ONCE(*proto->sysctl_rmem);
+}
+
+/* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
+ * Some wifi drivers need to tweak it to get more chunks.
+ * They can use this helper from their ndo_start_xmit()
+ */
+static inline void sk_pacing_shift_update(struct sock *sk, int val)
+{
+ if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val)
+ return;
+ WRITE_ONCE(sk->sk_pacing_shift, val);
+}
+
+/* if a socket is bound to a device, check that the given device
+ * index is either the same or that the socket is bound to an L3
+ * master device and the given device index is also enslaved to
+ * that L3 master
+ */
+static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
+{
+ int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
+ int mdif;
+
+ if (!bound_dev_if || bound_dev_if == dif)
+ return true;
+
+ mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
+ if (mdif && mdif == bound_dev_if)
+ return true;
+
+ return false;
+}
+
+void sock_def_readable(struct sock *sk);
+
+int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
+void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
+int sock_set_timestamping(struct sock *sk, int optname,
+ struct so_timestamping timestamping);
+
+void sock_enable_timestamps(struct sock *sk);
+void sock_no_linger(struct sock *sk);
+void sock_set_keepalive(struct sock *sk);
+void sock_set_priority(struct sock *sk, u32 priority);
+void sock_set_rcvbuf(struct sock *sk, int val);
+void sock_set_mark(struct sock *sk, u32 val);
+void sock_set_reuseaddr(struct sock *sk);
+void sock_set_reuseport(struct sock *sk);
+void sock_set_sndtimeo(struct sock *sk, s64 secs);
+
+int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
+
+int sock_get_timeout(long timeo, void *optval, bool old_timeval);
+int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
+ sockptr_t optval, int optlen, bool old_timeval);
+
+static inline bool sk_is_readable(struct sock *sk)
+{
+ if (sk->sk_prot->sock_is_readable)
+ return sk->sk_prot->sock_is_readable(sk);
+ return false;
+}
+#endif /* _SOCK_H */
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
new file mode 100644
index 000000000..6ec140b0a
--- /dev/null
+++ b/include/net/sock_reuseport.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SOCK_REUSEPORT_H
+#define _SOCK_REUSEPORT_H
+
+#include <linux/filter.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <net/sock.h>
+
+extern spinlock_t reuseport_lock;
+
+struct sock_reuseport {
+ struct rcu_head rcu;
+
+ u16 max_socks; /* length of socks */
+ u16 num_socks; /* elements in socks */
+ u16 num_closed_socks; /* closed elements in socks */
+ u16 incoming_cpu;
+ /* The last synq overflow event timestamp of this
+ * reuse->socks[] group.
+ */
+ unsigned int synq_overflow_ts;
+ /* ID stays the same even after the size of socks[] grows. */
+ unsigned int reuseport_id;
+ unsigned int bind_inany:1;
+ unsigned int has_conns:1;
+ struct bpf_prog __rcu *prog; /* optional BPF sock selector */
+ struct sock *socks[]; /* array of sock pointers */
+};
+
+extern int reuseport_alloc(struct sock *sk, bool bind_inany);
+extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
+ bool bind_inany);
+extern void reuseport_detach_sock(struct sock *sk);
+void reuseport_stop_listen_sock(struct sock *sk);
+extern struct sock *reuseport_select_sock(struct sock *sk,
+ u32 hash,
+ struct sk_buff *skb,
+ int hdr_len);
+struct sock *reuseport_migrate_sock(struct sock *sk,
+ struct sock *migrating_sk,
+ struct sk_buff *skb);
+extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
+extern int reuseport_detach_prog(struct sock *sk);
+
+static inline bool reuseport_has_conns(struct sock *sk)
+{
+ struct sock_reuseport *reuse;
+ bool ret = false;
+
+ rcu_read_lock();
+ reuse = rcu_dereference(sk->sk_reuseport_cb);
+ if (reuse && reuse->has_conns)
+ ret = true;
+ rcu_read_unlock();
+
+ return ret;
+}
+
+void reuseport_has_conns_set(struct sock *sk);
+void reuseport_update_incoming_cpu(struct sock *sk, int val);
+
+#endif /* _SOCK_REUSEPORT_H */
diff --git a/include/net/stp.h b/include/net/stp.h
new file mode 100644
index 000000000..528103fce
--- /dev/null
+++ b/include/net/stp.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_STP_H
+#define _NET_STP_H
+
+#include <linux/if_ether.h>
+
+struct stp_proto {
+ unsigned char group_address[ETH_ALEN];
+ void (*rcv)(const struct stp_proto *, struct sk_buff *,
+ struct net_device *);
+ void *data;
+};
+
+int stp_proto_register(const struct stp_proto *proto);
+void stp_proto_unregister(const struct stp_proto *proto);
+
+#endif /* _NET_STP_H */
diff --git a/include/net/strparser.h b/include/net/strparser.h
new file mode 100644
index 000000000..41e2ce9e9
--- /dev/null
+++ b/include/net/strparser.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Stream Parser
+ *
+ * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
+ */
+
+#ifndef __NET_STRPARSER_H_
+#define __NET_STRPARSER_H_
+
+#include <linux/skbuff.h>
+#include <net/sock.h>
+
+#define STRP_STATS_ADD(stat, count) ((stat) += (count))
+#define STRP_STATS_INCR(stat) ((stat)++)
+
+struct strp_stats {
+ unsigned long long msgs;
+ unsigned long long bytes;
+ unsigned int mem_fail;
+ unsigned int need_more_hdr;
+ unsigned int msg_too_big;
+ unsigned int msg_timeouts;
+ unsigned int bad_hdr_len;
+};
+
+struct strp_aggr_stats {
+ unsigned long long msgs;
+ unsigned long long bytes;
+ unsigned int mem_fail;
+ unsigned int need_more_hdr;
+ unsigned int msg_too_big;
+ unsigned int msg_timeouts;
+ unsigned int bad_hdr_len;
+ unsigned int aborts;
+ unsigned int interrupted;
+ unsigned int unrecov_intr;
+};
+
+struct strparser;
+
+/* Callbacks are called with lock held for the attached socket */
+struct strp_callbacks {
+ int (*parse_msg)(struct strparser *strp, struct sk_buff *skb);
+ void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb);
+ int (*read_sock_done)(struct strparser *strp, int err);
+ void (*abort_parser)(struct strparser *strp, int err);
+ void (*lock)(struct strparser *strp);
+ void (*unlock)(struct strparser *strp);
+};
+
+struct strp_msg {
+ int full_len;
+ int offset;
+};
+
+struct _strp_msg {
+ /* Internal cb structure. struct strp_msg must be first for passing
+ * to upper layer.
+ */
+ struct strp_msg strp;
+ int accum_len;
+};
+
+struct sk_skb_cb {
+#define SK_SKB_CB_PRIV_LEN 20
+ unsigned char data[SK_SKB_CB_PRIV_LEN];
+ /* align strp on cache line boundary within skb->cb[] */
+ unsigned char pad[4];
+ struct _strp_msg strp;
+
+ /* strp users' data follows */
+ struct tls_msg {
+ u8 control;
+ } tls;
+ /* temp_reg is a temporary register used for bpf_convert_data_end_access
+ * when dst_reg == src_reg.
+ */
+ u64 temp_reg;
+};
+
+static inline struct strp_msg *strp_msg(struct sk_buff *skb)
+{
+ return (struct strp_msg *)((void *)skb->cb +
+ offsetof(struct sk_skb_cb, strp));
+}
+
+/* Structure for an attached lower socket */
+struct strparser {
+ struct sock *sk;
+
+ u32 stopped : 1;
+ u32 paused : 1;
+ u32 aborted : 1;
+ u32 interrupted : 1;
+ u32 unrecov_intr : 1;
+
+ struct sk_buff **skb_nextp;
+ struct sk_buff *skb_head;
+ unsigned int need_bytes;
+ struct delayed_work msg_timer_work;
+ struct work_struct work;
+ struct strp_stats stats;
+ struct strp_callbacks cb;
+};
+
+/* Must be called with lock held for attached socket */
+static inline void strp_pause(struct strparser *strp)
+{
+ strp->paused = 1;
+}
+
+/* May be called without holding lock for attached socket */
+void strp_unpause(struct strparser *strp);
+/* Must be called with process lock held (lock_sock) */
+void __strp_unpause(struct strparser *strp);
+
+static inline void save_strp_stats(struct strparser *strp,
+ struct strp_aggr_stats *agg_stats)
+{
+ /* Save psock statistics in the mux when psock is being unattached. */
+
+#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += \
+ strp->stats._stat)
+ SAVE_PSOCK_STATS(msgs);
+ SAVE_PSOCK_STATS(bytes);
+ SAVE_PSOCK_STATS(mem_fail);
+ SAVE_PSOCK_STATS(need_more_hdr);
+ SAVE_PSOCK_STATS(msg_too_big);
+ SAVE_PSOCK_STATS(msg_timeouts);
+ SAVE_PSOCK_STATS(bad_hdr_len);
+#undef SAVE_PSOCK_STATS
+
+ if (strp->aborted)
+ agg_stats->aborts++;
+ if (strp->interrupted)
+ agg_stats->interrupted++;
+ if (strp->unrecov_intr)
+ agg_stats->unrecov_intr++;
+}
+
+static inline void aggregate_strp_stats(struct strp_aggr_stats *stats,
+ struct strp_aggr_stats *agg_stats)
+{
+#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat)
+ SAVE_PSOCK_STATS(msgs);
+ SAVE_PSOCK_STATS(bytes);
+ SAVE_PSOCK_STATS(mem_fail);
+ SAVE_PSOCK_STATS(need_more_hdr);
+ SAVE_PSOCK_STATS(msg_too_big);
+ SAVE_PSOCK_STATS(msg_timeouts);
+ SAVE_PSOCK_STATS(bad_hdr_len);
+ SAVE_PSOCK_STATS(aborts);
+ SAVE_PSOCK_STATS(interrupted);
+ SAVE_PSOCK_STATS(unrecov_intr);
+#undef SAVE_PSOCK_STATS
+
+}
+
+void strp_done(struct strparser *strp);
+void strp_stop(struct strparser *strp);
+void strp_check_rcv(struct strparser *strp);
+int strp_init(struct strparser *strp, struct sock *sk,
+ const struct strp_callbacks *cb);
+void strp_data_ready(struct strparser *strp);
+int strp_process(struct strparser *strp, struct sk_buff *orig_skb,
+ unsigned int orig_offset, size_t orig_len,
+ size_t max_msg_size, long timeo);
+
+#endif /* __NET_STRPARSER_H_ */
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
new file mode 100644
index 000000000..7dcdc97c0
--- /dev/null
+++ b/include/net/switchdev.h
@@ -0,0 +1,524 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/net/switchdev.h - Switch device API
+ * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
+ */
+#ifndef _LINUX_SWITCHDEV_H_
+#define _LINUX_SWITCHDEV_H_
+
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/list.h>
+#include <net/ip_fib.h>
+
+#define SWITCHDEV_F_NO_RECURSE BIT(0)
+#define SWITCHDEV_F_SKIP_EOPNOTSUPP BIT(1)
+#define SWITCHDEV_F_DEFER BIT(2)
+
+enum switchdev_attr_id {
+ SWITCHDEV_ATTR_ID_UNDEFINED,
+ SWITCHDEV_ATTR_ID_PORT_STP_STATE,
+ SWITCHDEV_ATTR_ID_PORT_MST_STATE,
+ SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
+ SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS,
+ SWITCHDEV_ATTR_ID_PORT_MROUTER,
+ SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
+ SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
+ SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
+ SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
+ SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
+ SWITCHDEV_ATTR_ID_BRIDGE_MST,
+ SWITCHDEV_ATTR_ID_MRP_PORT_ROLE,
+ SWITCHDEV_ATTR_ID_VLAN_MSTI,
+};
+
+struct switchdev_mst_state {
+ u16 msti;
+ u8 state;
+};
+
+struct switchdev_brport_flags {
+ unsigned long val;
+ unsigned long mask;
+};
+
+struct switchdev_vlan_msti {
+ u16 vid;
+ u16 msti;
+};
+
+struct switchdev_attr {
+ struct net_device *orig_dev;
+ enum switchdev_attr_id id;
+ u32 flags;
+ void *complete_priv;
+ void (*complete)(struct net_device *dev, int err, void *priv);
+ union {
+ u8 stp_state; /* PORT_STP_STATE */
+ struct switchdev_mst_state mst_state; /* PORT_MST_STATE */
+ struct switchdev_brport_flags brport_flags; /* PORT_BRIDGE_FLAGS */
+ bool mrouter; /* PORT_MROUTER */
+ clock_t ageing_time; /* BRIDGE_AGEING_TIME */
+ bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
+ u16 vlan_protocol; /* BRIDGE_VLAN_PROTOCOL */
+ bool mst; /* BRIDGE_MST */
+ bool mc_disabled; /* MC_DISABLED */
+ u8 mrp_port_role; /* MRP_PORT_ROLE */
+ struct switchdev_vlan_msti vlan_msti; /* VLAN_MSTI */
+ } u;
+};
+
+enum switchdev_obj_id {
+ SWITCHDEV_OBJ_ID_UNDEFINED,
+ SWITCHDEV_OBJ_ID_PORT_VLAN,
+ SWITCHDEV_OBJ_ID_PORT_MDB,
+ SWITCHDEV_OBJ_ID_HOST_MDB,
+ SWITCHDEV_OBJ_ID_MRP,
+ SWITCHDEV_OBJ_ID_RING_TEST_MRP,
+ SWITCHDEV_OBJ_ID_RING_ROLE_MRP,
+ SWITCHDEV_OBJ_ID_RING_STATE_MRP,
+ SWITCHDEV_OBJ_ID_IN_TEST_MRP,
+ SWITCHDEV_OBJ_ID_IN_ROLE_MRP,
+ SWITCHDEV_OBJ_ID_IN_STATE_MRP,
+};
+
+struct switchdev_obj {
+ struct list_head list;
+ struct net_device *orig_dev;
+ enum switchdev_obj_id id;
+ u32 flags;
+ void *complete_priv;
+ void (*complete)(struct net_device *dev, int err, void *priv);
+};
+
+/* SWITCHDEV_OBJ_ID_PORT_VLAN */
+struct switchdev_obj_port_vlan {
+ struct switchdev_obj obj;
+ u16 flags;
+ u16 vid;
+ /* If set, the notifier signifies a change of one of the following
+ * flags for a VLAN that already exists:
+ * - BRIDGE_VLAN_INFO_PVID
+ * - BRIDGE_VLAN_INFO_UNTAGGED
+ * Entries with BRIDGE_VLAN_INFO_BRENTRY unset are not notified at all.
+ */
+ bool changed;
+};
+
+#define SWITCHDEV_OBJ_PORT_VLAN(OBJ) \
+ container_of((OBJ), struct switchdev_obj_port_vlan, obj)
+
+/* SWITCHDEV_OBJ_ID_PORT_MDB */
+struct switchdev_obj_port_mdb {
+ struct switchdev_obj obj;
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+};
+
+#define SWITCHDEV_OBJ_PORT_MDB(OBJ) \
+ container_of((OBJ), struct switchdev_obj_port_mdb, obj)
+
+
+/* SWITCHDEV_OBJ_ID_MRP */
+struct switchdev_obj_mrp {
+ struct switchdev_obj obj;
+ struct net_device *p_port;
+ struct net_device *s_port;
+ u32 ring_id;
+ u16 prio;
+};
+
+#define SWITCHDEV_OBJ_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_mrp, obj)
+
+/* SWITCHDEV_OBJ_ID_RING_TEST_MRP */
+struct switchdev_obj_ring_test_mrp {
+ struct switchdev_obj obj;
+ /* The value is in us and a value of 0 represents to stop */
+ u32 interval;
+ u8 max_miss;
+ u32 ring_id;
+ u32 period;
+ bool monitor;
+};
+
+#define SWITCHDEV_OBJ_RING_TEST_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_ring_test_mrp, obj)
+
+/* SWICHDEV_OBJ_ID_RING_ROLE_MRP */
+struct switchdev_obj_ring_role_mrp {
+ struct switchdev_obj obj;
+ u8 ring_role;
+ u32 ring_id;
+ u8 sw_backup;
+};
+
+#define SWITCHDEV_OBJ_RING_ROLE_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_ring_role_mrp, obj)
+
+struct switchdev_obj_ring_state_mrp {
+ struct switchdev_obj obj;
+ u8 ring_state;
+ u32 ring_id;
+};
+
+#define SWITCHDEV_OBJ_RING_STATE_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_ring_state_mrp, obj)
+
+/* SWITCHDEV_OBJ_ID_IN_TEST_MRP */
+struct switchdev_obj_in_test_mrp {
+ struct switchdev_obj obj;
+ /* The value is in us and a value of 0 represents to stop */
+ u32 interval;
+ u32 in_id;
+ u32 period;
+ u8 max_miss;
+};
+
+#define SWITCHDEV_OBJ_IN_TEST_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_in_test_mrp, obj)
+
+/* SWICHDEV_OBJ_ID_IN_ROLE_MRP */
+struct switchdev_obj_in_role_mrp {
+ struct switchdev_obj obj;
+ struct net_device *i_port;
+ u32 ring_id;
+ u16 in_id;
+ u8 in_role;
+ u8 sw_backup;
+};
+
+#define SWITCHDEV_OBJ_IN_ROLE_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_in_role_mrp, obj)
+
+struct switchdev_obj_in_state_mrp {
+ struct switchdev_obj obj;
+ u32 in_id;
+ u8 in_state;
+};
+
+#define SWITCHDEV_OBJ_IN_STATE_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_in_state_mrp, obj)
+
+typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
+
+struct switchdev_brport {
+ struct net_device *dev;
+ const void *ctx;
+ struct notifier_block *atomic_nb;
+ struct notifier_block *blocking_nb;
+ bool tx_fwd_offload;
+};
+
+enum switchdev_notifier_type {
+ SWITCHDEV_FDB_ADD_TO_BRIDGE = 1,
+ SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ SWITCHDEV_FDB_ADD_TO_DEVICE,
+ SWITCHDEV_FDB_DEL_TO_DEVICE,
+ SWITCHDEV_FDB_OFFLOADED,
+ SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
+
+ SWITCHDEV_PORT_OBJ_ADD, /* Blocking. */
+ SWITCHDEV_PORT_OBJ_DEL, /* Blocking. */
+ SWITCHDEV_PORT_ATTR_SET, /* May be blocking . */
+
+ SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE,
+ SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE,
+ SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE,
+ SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE,
+ SWITCHDEV_VXLAN_FDB_OFFLOADED,
+
+ SWITCHDEV_BRPORT_OFFLOADED,
+ SWITCHDEV_BRPORT_UNOFFLOADED,
+};
+
+struct switchdev_notifier_info {
+ struct net_device *dev;
+ struct netlink_ext_ack *extack;
+ const void *ctx;
+};
+
+/* Remember to update br_switchdev_fdb_populate() when adding
+ * new members to this structure
+ */
+struct switchdev_notifier_fdb_info {
+ struct switchdev_notifier_info info; /* must be first */
+ const unsigned char *addr;
+ u16 vid;
+ u8 added_by_user:1,
+ is_local:1,
+ offloaded:1;
+};
+
+struct switchdev_notifier_port_obj_info {
+ struct switchdev_notifier_info info; /* must be first */
+ const struct switchdev_obj *obj;
+ bool handled;
+};
+
+struct switchdev_notifier_port_attr_info {
+ struct switchdev_notifier_info info; /* must be first */
+ const struct switchdev_attr *attr;
+ bool handled;
+};
+
+struct switchdev_notifier_brport_info {
+ struct switchdev_notifier_info info; /* must be first */
+ const struct switchdev_brport brport;
+};
+
+static inline struct net_device *
+switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
+{
+ return info->dev;
+}
+
+static inline struct netlink_ext_ack *
+switchdev_notifier_info_to_extack(const struct switchdev_notifier_info *info)
+{
+ return info->extack;
+}
+
+static inline bool
+switchdev_fdb_is_dynamically_learned(const struct switchdev_notifier_fdb_info *fdb_info)
+{
+ return !fdb_info->added_by_user && !fdb_info->is_local;
+}
+
+#ifdef CONFIG_NET_SWITCHDEV
+
+int switchdev_bridge_port_offload(struct net_device *brport_dev,
+ struct net_device *dev, const void *ctx,
+ struct notifier_block *atomic_nb,
+ struct notifier_block *blocking_nb,
+ bool tx_fwd_offload,
+ struct netlink_ext_ack *extack);
+void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
+ const void *ctx,
+ struct notifier_block *atomic_nb,
+ struct notifier_block *blocking_nb);
+
+void switchdev_deferred_process(void);
+int switchdev_port_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack);
+int switchdev_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack);
+int switchdev_port_obj_del(struct net_device *dev,
+ const struct switchdev_obj *obj);
+
+int register_switchdev_notifier(struct notifier_block *nb);
+int unregister_switchdev_notifier(struct notifier_block *nb);
+int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack);
+
+int register_switchdev_blocking_notifier(struct notifier_block *nb);
+int unregister_switchdev_blocking_notifier(struct notifier_block *nb);
+int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack);
+
+void switchdev_port_fwd_mark_set(struct net_device *dev,
+ struct net_device *group_dev,
+ bool joining);
+
+int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
+ const struct switchdev_notifier_fdb_info *fdb_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info));
+
+int switchdev_handle_port_obj_add(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*add_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack));
+int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*add_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack));
+int switchdev_handle_port_obj_del(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*del_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj));
+int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*del_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj));
+
+int switchdev_handle_port_attr_set(struct net_device *dev,
+ struct switchdev_notifier_port_attr_info *port_attr_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*set_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack));
+#else
+
+static inline int
+switchdev_bridge_port_offload(struct net_device *brport_dev,
+ struct net_device *dev, const void *ctx,
+ struct notifier_block *atomic_nb,
+ struct notifier_block *blocking_nb,
+ bool tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+switchdev_bridge_port_unoffload(struct net_device *brport_dev,
+ const void *ctx,
+ struct notifier_block *atomic_nb,
+ struct notifier_block *blocking_nb)
+{
+}
+
+static inline void switchdev_deferred_process(void)
+{
+}
+
+static inline int switchdev_port_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_obj_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int register_switchdev_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int unregister_switchdev_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int call_switchdev_notifiers(unsigned long val,
+ struct net_device *dev,
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack)
+{
+ return NOTIFY_DONE;
+}
+
+static inline int
+register_switchdev_blocking_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int
+unregister_switchdev_blocking_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int
+call_switchdev_blocking_notifiers(unsigned long val,
+ struct net_device *dev,
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack)
+{
+ return NOTIFY_DONE;
+}
+
+static inline int
+switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
+ const struct switchdev_notifier_fdb_info *fdb_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info))
+{
+ return 0;
+}
+
+static inline int
+switchdev_handle_port_obj_add(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*add_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack))
+{
+ return 0;
+}
+
+static inline int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*add_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack))
+{
+ return 0;
+}
+
+static inline int
+switchdev_handle_port_obj_del(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*del_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj))
+{
+ return 0;
+}
+
+static inline int
+switchdev_handle_port_obj_del_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*del_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj))
+{
+ return 0;
+}
+
+static inline int
+switchdev_handle_port_attr_set(struct net_device *dev,
+ struct switchdev_notifier_port_attr_info *port_attr_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*set_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack))
+{
+ return 0;
+}
+#endif
+
+#endif /* _LINUX_SWITCHDEV_H_ */
diff --git a/include/net/tc_act/tc_bpf.h b/include/net/tc_act/tc_bpf.h
new file mode 100644
index 000000000..5897162c6
--- /dev/null
+++ b/include/net/tc_act/tc_bpf.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
+ */
+
+#ifndef __NET_TC_BPF_H
+#define __NET_TC_BPF_H
+
+#include <linux/filter.h>
+#include <net/act_api.h>
+
+struct tcf_bpf {
+ struct tc_action common;
+ struct bpf_prog __rcu *filter;
+ union {
+ u32 bpf_fd;
+ u16 bpf_num_ops;
+ };
+ struct sock_filter *bpf_ops;
+ const char *bpf_name;
+};
+#define to_bpf(a) ((struct tcf_bpf *)a)
+
+#endif /* __NET_TC_BPF_H */
diff --git a/include/net/tc_act/tc_connmark.h b/include/net/tc_act/tc_connmark.h
new file mode 100644
index 000000000..1f4cb477b
--- /dev/null
+++ b/include/net/tc_act/tc_connmark.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_CONNMARK_H
+#define __NET_TC_CONNMARK_H
+
+#include <net/act_api.h>
+
+struct tcf_connmark_info {
+ struct tc_action common;
+ struct net *net;
+ u16 zone;
+};
+
+#define to_connmark(a) ((struct tcf_connmark_info *)a)
+
+#endif /* __NET_TC_CONNMARK_H */
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
new file mode 100644
index 000000000..68269e458
--- /dev/null
+++ b/include/net/tc_act/tc_csum.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_CSUM_H
+#define __NET_TC_CSUM_H
+
+#include <linux/types.h>
+#include <net/act_api.h>
+#include <linux/tc_act/tc_csum.h>
+
+struct tcf_csum_params {
+ u32 update_flags;
+ struct rcu_head rcu;
+};
+
+struct tcf_csum {
+ struct tc_action common;
+
+ struct tcf_csum_params __rcu *params;
+};
+#define to_tcf_csum(a) ((struct tcf_csum *)a)
+
+static inline bool is_tcf_csum(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->id == TCA_ID_CSUM)
+ return true;
+#endif
+ return false;
+}
+
+static inline u32 tcf_csum_update_flags(const struct tc_action *a)
+{
+ u32 update_flags;
+
+ rcu_read_lock();
+ update_flags = rcu_dereference(to_tcf_csum(a)->params)->update_flags;
+ rcu_read_unlock();
+
+ return update_flags;
+}
+
+#endif /* __NET_TC_CSUM_H */
diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
new file mode 100644
index 000000000..8250d6f0a
--- /dev/null
+++ b/include/net/tc_act/tc_ct.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_CT_H
+#define __NET_TC_CT_H
+
+#include <net/act_api.h>
+#include <uapi/linux/tc_act/tc_ct.h>
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_labels.h>
+
+struct tcf_ct_params {
+ struct nf_conn *tmpl;
+ u16 zone;
+
+ u32 mark;
+ u32 mark_mask;
+
+ u32 labels[NF_CT_LABELS_MAX_SIZE / sizeof(u32)];
+ u32 labels_mask[NF_CT_LABELS_MAX_SIZE / sizeof(u32)];
+
+ struct nf_nat_range2 range;
+ bool ipv4_range;
+
+ u16 ct_action;
+
+ struct rcu_head rcu;
+
+ struct tcf_ct_flow_table *ct_ft;
+ struct nf_flowtable *nf_ft;
+};
+
+struct tcf_ct {
+ struct tc_action common;
+ struct tcf_ct_params __rcu *params;
+};
+
+#define to_ct(a) ((struct tcf_ct *)a)
+#define to_ct_params(a) \
+ ((struct tcf_ct_params *) \
+ rcu_dereference_protected(to_ct(a)->params, \
+ lockdep_is_held(&a->tcfa_lock)))
+
+static inline uint16_t tcf_ct_zone(const struct tc_action *a)
+{
+ return to_ct_params(a)->zone;
+}
+
+static inline int tcf_ct_action(const struct tc_action *a)
+{
+ return to_ct_params(a)->ct_action;
+}
+
+static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+{
+ return to_ct_params(a)->nf_ft;
+}
+
+#else
+static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; }
+static inline int tcf_ct_action(const struct tc_action *a) { return 0; }
+static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+{
+ return NULL;
+}
+#endif /* CONFIG_NF_CONNTRACK */
+
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+static inline void
+tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie)
+{
+ enum ip_conntrack_info ctinfo = cookie & NFCT_INFOMASK;
+ struct nf_conn *ct;
+
+ ct = (struct nf_conn *)(cookie & NFCT_PTRMASK);
+ nf_conntrack_get(&ct->ct_general);
+ nf_ct_set(skb, ct, ctinfo);
+}
+#else
+static inline void
+tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) { }
+#endif
+
+static inline bool is_tcf_ct(const struct tc_action *a)
+{
+#if defined(CONFIG_NET_CLS_ACT) && IS_ENABLED(CONFIG_NF_CONNTRACK)
+ if (a->ops && a->ops->id == TCA_ID_CT)
+ return true;
+#endif
+ return false;
+}
+
+#endif /* __NET_TC_CT_H */
diff --git a/include/net/tc_act/tc_ctinfo.h b/include/net/tc_act/tc_ctinfo.h
new file mode 100644
index 000000000..f071c1d70
--- /dev/null
+++ b/include/net/tc_act/tc_ctinfo.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_CTINFO_H
+#define __NET_TC_CTINFO_H
+
+#include <net/act_api.h>
+
+struct tcf_ctinfo_params {
+ struct rcu_head rcu;
+ struct net *net;
+ u32 dscpmask;
+ u32 dscpstatemask;
+ u32 cpmarkmask;
+ u16 zone;
+ u8 mode;
+ u8 dscpmaskshift;
+};
+
+struct tcf_ctinfo {
+ struct tc_action common;
+ struct tcf_ctinfo_params __rcu *params;
+ u64 stats_dscp_set;
+ u64 stats_dscp_error;
+ u64 stats_cpmark_set;
+};
+
+enum {
+ CTINFO_MODE_DSCP = BIT(0),
+ CTINFO_MODE_CPMARK = BIT(1)
+};
+
+#define to_ctinfo(a) ((struct tcf_ctinfo *)a)
+
+#endif /* __NET_TC_CTINFO_H */
diff --git a/include/net/tc_act/tc_defact.h b/include/net/tc_act/tc_defact.h
new file mode 100644
index 000000000..d7ba0402a
--- /dev/null
+++ b/include/net/tc_act/tc_defact.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_DEF_H
+#define __NET_TC_DEF_H
+
+#include <net/act_api.h>
+
+struct tcf_defact {
+ struct tc_action common;
+ u32 tcfd_datalen;
+ void *tcfd_defdata;
+};
+#define to_defact(a) ((struct tcf_defact *)a)
+
+#endif /* __NET_TC_DEF_H */
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
new file mode 100644
index 000000000..832efd40e
--- /dev/null
+++ b/include/net/tc_act/tc_gact.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_GACT_H
+#define __NET_TC_GACT_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_gact.h>
+
+struct tcf_gact {
+ struct tc_action common;
+#ifdef CONFIG_GACT_PROB
+ u16 tcfg_ptype;
+ u16 tcfg_pval;
+ int tcfg_paction;
+ atomic_t packets;
+#endif
+};
+#define to_gact(a) ((struct tcf_gact *)a)
+
+static inline bool __is_tcf_gact_act(const struct tc_action *a, int act,
+ bool is_ext)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_gact *gact;
+
+ if (a->ops && a->ops->id != TCA_ID_GACT)
+ return false;
+
+ gact = to_gact(a);
+ if ((!is_ext && gact->tcf_action == act) ||
+ (is_ext && TC_ACT_EXT_CMP(gact->tcf_action, act)))
+ return true;
+
+#endif
+ return false;
+}
+
+static inline bool is_tcf_gact_ok(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_OK, false);
+}
+
+static inline bool is_tcf_gact_shot(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_SHOT, false);
+}
+
+static inline bool is_tcf_gact_trap(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_TRAP, false);
+}
+
+static inline bool is_tcf_gact_goto_chain(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_GOTO_CHAIN, true);
+}
+
+static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a)
+{
+ return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK;
+}
+
+static inline bool is_tcf_gact_continue(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_UNSPEC, false);
+}
+
+static inline bool is_tcf_gact_reclassify(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_RECLASSIFY, false);
+}
+
+static inline bool is_tcf_gact_pipe(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_PIPE, false);
+}
+
+#endif /* __NET_TC_GACT_H */
diff --git a/include/net/tc_act/tc_gate.h b/include/net/tc_act/tc_gate.h
new file mode 100644
index 000000000..c8fa11ebb
--- /dev/null
+++ b/include/net/tc_act/tc_gate.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright 2020 NXP */
+
+#ifndef __NET_TC_GATE_H
+#define __NET_TC_GATE_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_gate.h>
+
+struct action_gate_entry {
+ u8 gate_state;
+ u32 interval;
+ s32 ipv;
+ s32 maxoctets;
+};
+
+struct tcfg_gate_entry {
+ int index;
+ u8 gate_state;
+ u32 interval;
+ s32 ipv;
+ s32 maxoctets;
+ struct list_head list;
+};
+
+struct tcf_gate_params {
+ s32 tcfg_priority;
+ u64 tcfg_basetime;
+ u64 tcfg_cycletime;
+ u64 tcfg_cycletime_ext;
+ u32 tcfg_flags;
+ s32 tcfg_clockid;
+ size_t num_entries;
+ struct list_head entries;
+};
+
+#define GATE_ACT_GATE_OPEN BIT(0)
+#define GATE_ACT_PENDING BIT(1)
+
+struct tcf_gate {
+ struct tc_action common;
+ struct tcf_gate_params param;
+ u8 current_gate_status;
+ ktime_t current_close_time;
+ u32 current_entry_octets;
+ s32 current_max_octets;
+ struct tcfg_gate_entry *next_entry;
+ struct hrtimer hitimer;
+ enum tk_offsets tk_offset;
+};
+
+#define to_gate(a) ((struct tcf_gate *)a)
+
+static inline bool is_tcf_gate(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->id == TCA_ID_GATE)
+ return true;
+#endif
+ return false;
+}
+
+static inline s32 tcf_gate_prio(const struct tc_action *a)
+{
+ s32 tcfg_prio;
+
+ tcfg_prio = to_gate(a)->param.tcfg_priority;
+
+ return tcfg_prio;
+}
+
+static inline u64 tcf_gate_basetime(const struct tc_action *a)
+{
+ u64 tcfg_basetime;
+
+ tcfg_basetime = to_gate(a)->param.tcfg_basetime;
+
+ return tcfg_basetime;
+}
+
+static inline u64 tcf_gate_cycletime(const struct tc_action *a)
+{
+ u64 tcfg_cycletime;
+
+ tcfg_cycletime = to_gate(a)->param.tcfg_cycletime;
+
+ return tcfg_cycletime;
+}
+
+static inline u64 tcf_gate_cycletimeext(const struct tc_action *a)
+{
+ u64 tcfg_cycletimeext;
+
+ tcfg_cycletimeext = to_gate(a)->param.tcfg_cycletime_ext;
+
+ return tcfg_cycletimeext;
+}
+
+static inline u32 tcf_gate_num_entries(const struct tc_action *a)
+{
+ u32 num_entries;
+
+ num_entries = to_gate(a)->param.num_entries;
+
+ return num_entries;
+}
+
+static inline struct action_gate_entry
+ *tcf_gate_get_list(const struct tc_action *a)
+{
+ struct action_gate_entry *oe;
+ struct tcf_gate_params *p;
+ struct tcfg_gate_entry *entry;
+ u32 num_entries;
+ int i = 0;
+
+ p = &to_gate(a)->param;
+ num_entries = p->num_entries;
+
+ list_for_each_entry(entry, &p->entries, list)
+ i++;
+
+ if (i != num_entries)
+ return NULL;
+
+ oe = kcalloc(num_entries, sizeof(*oe), GFP_ATOMIC);
+ if (!oe)
+ return NULL;
+
+ i = 0;
+ list_for_each_entry(entry, &p->entries, list) {
+ oe[i].gate_state = entry->gate_state;
+ oe[i].interval = entry->interval;
+ oe[i].ipv = entry->ipv;
+ oe[i].maxoctets = entry->maxoctets;
+ i++;
+ }
+
+ return oe;
+}
+#endif
diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
new file mode 100644
index 000000000..c7f24a2da
--- /dev/null
+++ b/include/net/tc_act/tc_ife.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_IFE_H
+#define __NET_TC_IFE_H
+
+#include <net/act_api.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+
+struct module;
+
+struct tcf_ife_params {
+ u8 eth_dst[ETH_ALEN];
+ u8 eth_src[ETH_ALEN];
+ u16 eth_type;
+ u16 flags;
+
+ struct rcu_head rcu;
+};
+
+struct tcf_ife_info {
+ struct tc_action common;
+ struct tcf_ife_params __rcu *params;
+ /* list of metaids allowed */
+ struct list_head metalist;
+};
+#define to_ife(a) ((struct tcf_ife_info *)a)
+
+struct tcf_meta_info {
+ const struct tcf_meta_ops *ops;
+ void *metaval;
+ u16 metaid;
+ struct list_head metalist;
+};
+
+struct tcf_meta_ops {
+ u16 metaid; /*Maintainer provided ID */
+ u16 metatype; /*netlink attribute type (look at net/netlink.h) */
+ const char *name;
+ const char *synopsis;
+ struct list_head list;
+ int (*check_presence)(struct sk_buff *, struct tcf_meta_info *);
+ int (*encode)(struct sk_buff *, void *, struct tcf_meta_info *);
+ int (*decode)(struct sk_buff *, void *, u16 len);
+ int (*get)(struct sk_buff *skb, struct tcf_meta_info *mi);
+ int (*alloc)(struct tcf_meta_info *, void *, gfp_t);
+ void (*release)(struct tcf_meta_info *);
+ int (*validate)(void *val, int len);
+ struct module *owner;
+};
+
+#define MODULE_ALIAS_IFE_META(metan) MODULE_ALIAS("ife-meta-" metan)
+
+int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi);
+int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi);
+int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
+int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
+int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
+int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi);
+int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
+int ife_validate_meta_u32(void *val, int len);
+int ife_validate_meta_u16(void *val, int len);
+int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi);
+void ife_release_meta_gen(struct tcf_meta_info *mi);
+int register_ife_op(struct tcf_meta_ops *mops);
+int unregister_ife_op(struct tcf_meta_ops *mops);
+
+#endif /* __NET_TC_IFE_H */
diff --git a/include/net/tc_act/tc_ipt.h b/include/net/tc_act/tc_ipt.h
new file mode 100644
index 000000000..4225fcb1c
--- /dev/null
+++ b/include/net/tc_act/tc_ipt.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_IPT_H
+#define __NET_TC_IPT_H
+
+#include <net/act_api.h>
+
+struct xt_entry_target;
+
+struct tcf_ipt {
+ struct tc_action common;
+ u32 tcfi_hook;
+ char *tcfi_tname;
+ struct xt_entry_target *tcfi_t;
+};
+#define to_ipt(a) ((struct tcf_ipt *)a)
+
+#endif /* __NET_TC_IPT_H */
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
new file mode 100644
index 000000000..32ce8ea36
--- /dev/null
+++ b/include/net/tc_act/tc_mirred.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_MIR_H
+#define __NET_TC_MIR_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_mirred.h>
+
+struct tcf_mirred {
+ struct tc_action common;
+ int tcfm_eaction;
+ bool tcfm_mac_header_xmit;
+ struct net_device __rcu *tcfm_dev;
+ netdevice_tracker tcfm_dev_tracker;
+ struct list_head tcfm_list;
+};
+#define to_mirred(a) ((struct tcf_mirred *)a)
+
+static inline bool is_tcf_mirred_egress_redirect(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->id == TCA_ID_MIRRED)
+ return to_mirred(a)->tcfm_eaction == TCA_EGRESS_REDIR;
+#endif
+ return false;
+}
+
+static inline bool is_tcf_mirred_egress_mirror(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->id == TCA_ID_MIRRED)
+ return to_mirred(a)->tcfm_eaction == TCA_EGRESS_MIRROR;
+#endif
+ return false;
+}
+
+static inline bool is_tcf_mirred_ingress_redirect(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->id == TCA_ID_MIRRED)
+ return to_mirred(a)->tcfm_eaction == TCA_INGRESS_REDIR;
+#endif
+ return false;
+}
+
+static inline bool is_tcf_mirred_ingress_mirror(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->id == TCA_ID_MIRRED)
+ return to_mirred(a)->tcfm_eaction == TCA_INGRESS_MIRROR;
+#endif
+ return false;
+}
+
+static inline struct net_device *tcf_mirred_dev(const struct tc_action *a)
+{
+ return rtnl_dereference(to_mirred(a)->tcfm_dev);
+}
+
+#endif /* __NET_TC_MIR_H */
diff --git a/include/net/tc_act/tc_mpls.h b/include/net/tc_act/tc_mpls.h
new file mode 100644
index 000000000..721de4f57
--- /dev/null
+++ b/include/net/tc_act/tc_mpls.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef __NET_TC_MPLS_H
+#define __NET_TC_MPLS_H
+
+#include <linux/tc_act/tc_mpls.h>
+#include <net/act_api.h>
+
+struct tcf_mpls_params {
+ int tcfm_action;
+ u32 tcfm_label;
+ u8 tcfm_tc;
+ u8 tcfm_ttl;
+ u8 tcfm_bos;
+ __be16 tcfm_proto;
+ struct rcu_head rcu;
+};
+
+#define ACT_MPLS_TC_NOT_SET 0xff
+#define ACT_MPLS_BOS_NOT_SET 0xff
+#define ACT_MPLS_LABEL_NOT_SET 0xffffffff
+
+struct tcf_mpls {
+ struct tc_action common;
+ struct tcf_mpls_params __rcu *mpls_p;
+};
+#define to_mpls(a) ((struct tcf_mpls *)a)
+
+static inline bool is_tcf_mpls(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->id == TCA_ID_MPLS)
+ return true;
+#endif
+ return false;
+}
+
+static inline u32 tcf_mpls_action(const struct tc_action *a)
+{
+ u32 tcfm_action;
+
+ rcu_read_lock();
+ tcfm_action = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_action;
+ rcu_read_unlock();
+
+ return tcfm_action;
+}
+
+static inline __be16 tcf_mpls_proto(const struct tc_action *a)
+{
+ __be16 tcfm_proto;
+
+ rcu_read_lock();
+ tcfm_proto = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_proto;
+ rcu_read_unlock();
+
+ return tcfm_proto;
+}
+
+static inline u32 tcf_mpls_label(const struct tc_action *a)
+{
+ u32 tcfm_label;
+
+ rcu_read_lock();
+ tcfm_label = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_label;
+ rcu_read_unlock();
+
+ return tcfm_label;
+}
+
+static inline u8 tcf_mpls_tc(const struct tc_action *a)
+{
+ u8 tcfm_tc;
+
+ rcu_read_lock();
+ tcfm_tc = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_tc;
+ rcu_read_unlock();
+
+ return tcfm_tc;
+}
+
+static inline u8 tcf_mpls_bos(const struct tc_action *a)
+{
+ u8 tcfm_bos;
+
+ rcu_read_lock();
+ tcfm_bos = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_bos;
+ rcu_read_unlock();
+
+ return tcfm_bos;
+}
+
+static inline u8 tcf_mpls_ttl(const struct tc_action *a)
+{
+ u8 tcfm_ttl;
+
+ rcu_read_lock();
+ tcfm_ttl = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_ttl;
+ rcu_read_unlock();
+
+ return tcfm_ttl;
+}
+
+#endif /* __NET_TC_MPLS_H */
diff --git a/include/net/tc_act/tc_nat.h b/include/net/tc_act/tc_nat.h
new file mode 100644
index 000000000..c14407160
--- /dev/null
+++ b/include/net/tc_act/tc_nat.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_NAT_H
+#define __NET_TC_NAT_H
+
+#include <linux/types.h>
+#include <net/act_api.h>
+
+struct tcf_nat {
+ struct tc_action common;
+
+ __be32 old_addr;
+ __be32 new_addr;
+ __be32 mask;
+ u32 flags;
+};
+
+#define to_tcf_nat(a) ((struct tcf_nat *)a)
+
+#endif /* __NET_TC_NAT_H */
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
new file mode 100644
index 000000000..83fe39931
--- /dev/null
+++ b/include/net/tc_act/tc_pedit.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_PED_H
+#define __NET_TC_PED_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_pedit.h>
+#include <linux/types.h>
+
+struct tcf_pedit_key_ex {
+ enum pedit_header_type htype;
+ enum pedit_cmd cmd;
+};
+
+struct tcf_pedit_parms {
+ struct tc_pedit_key *tcfp_keys;
+ struct tcf_pedit_key_ex *tcfp_keys_ex;
+ u32 tcfp_off_max_hint;
+ unsigned char tcfp_nkeys;
+ unsigned char tcfp_flags;
+ struct rcu_head rcu;
+};
+
+struct tcf_pedit {
+ struct tc_action common;
+ struct tcf_pedit_parms __rcu *parms;
+};
+
+#define to_pedit(a) ((struct tcf_pedit *)a)
+#define to_pedit_parms(a) (rcu_dereference(to_pedit(a)->parms))
+
+static inline bool is_tcf_pedit(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->id == TCA_ID_PEDIT)
+ return true;
+#endif
+ return false;
+}
+
+static inline int tcf_pedit_nkeys(const struct tc_action *a)
+{
+ struct tcf_pedit_parms *parms;
+ int nkeys;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ nkeys = parms->tcfp_nkeys;
+ rcu_read_unlock();
+
+ return nkeys;
+}
+
+static inline u32 tcf_pedit_htype(const struct tc_action *a, int index)
+{
+ u32 htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+ struct tcf_pedit_parms *parms;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ if (parms->tcfp_keys_ex)
+ htype = parms->tcfp_keys_ex[index].htype;
+ rcu_read_unlock();
+
+ return htype;
+}
+
+static inline u32 tcf_pedit_cmd(const struct tc_action *a, int index)
+{
+ struct tcf_pedit_parms *parms;
+ u32 cmd = __PEDIT_CMD_MAX;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ if (parms->tcfp_keys_ex)
+ cmd = parms->tcfp_keys_ex[index].cmd;
+ rcu_read_unlock();
+
+ return cmd;
+}
+
+static inline u32 tcf_pedit_mask(const struct tc_action *a, int index)
+{
+ struct tcf_pedit_parms *parms;
+ u32 mask;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ mask = parms->tcfp_keys[index].mask;
+ rcu_read_unlock();
+
+ return mask;
+}
+
+static inline u32 tcf_pedit_val(const struct tc_action *a, int index)
+{
+ struct tcf_pedit_parms *parms;
+ u32 val;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ val = parms->tcfp_keys[index].val;
+ rcu_read_unlock();
+
+ return val;
+}
+
+static inline u32 tcf_pedit_offset(const struct tc_action *a, int index)
+{
+ struct tcf_pedit_parms *parms;
+ u32 off;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ off = parms->tcfp_keys[index].off;
+ rcu_read_unlock();
+
+ return off;
+}
+#endif /* __NET_TC_PED_H */
diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h
new file mode 100644
index 000000000..283bde711
--- /dev/null
+++ b/include/net/tc_act/tc_police.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_POLICE_H
+#define __NET_TC_POLICE_H
+
+#include <net/act_api.h>
+
+struct tcf_police_params {
+ int tcfp_result;
+ u32 tcfp_ewma_rate;
+ s64 tcfp_burst;
+ u32 tcfp_mtu;
+ s64 tcfp_mtu_ptoks;
+ s64 tcfp_pkt_burst;
+ struct psched_ratecfg rate;
+ bool rate_present;
+ struct psched_ratecfg peak;
+ bool peak_present;
+ struct psched_pktrate ppsrate;
+ bool pps_present;
+ struct rcu_head rcu;
+};
+
+struct tcf_police {
+ struct tc_action common;
+ struct tcf_police_params __rcu *params;
+
+ spinlock_t tcfp_lock ____cacheline_aligned_in_smp;
+ s64 tcfp_toks;
+ s64 tcfp_ptoks;
+ s64 tcfp_pkttoks;
+ s64 tcfp_t_c;
+};
+
+#define to_police(pc) ((struct tcf_police *)pc)
+
+/* old policer structure from before tc actions */
+struct tc_police_compat {
+ u32 index;
+ int action;
+ u32 limit;
+ u32 burst;
+ u32 mtu;
+ struct tc_ratespec rate;
+ struct tc_ratespec peakrate;
+};
+
+static inline bool is_tcf_police(const struct tc_action *act)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (act->ops && act->ops->id == TCA_ID_POLICE)
+ return true;
+#endif
+ return false;
+}
+
+static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->rate.rate_bytes_ps;
+}
+
+static inline u32 tcf_police_burst(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+ u32 burst;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+
+ /*
+ * "rate" bytes "burst" nanoseconds
+ * ------------ * -------------------
+ * 1 second 2^6 ticks
+ *
+ * ------------------------------------
+ * NSEC_PER_SEC nanoseconds
+ * ------------------------
+ * 2^6 ticks
+ *
+ * "rate" bytes "burst" nanoseconds 2^6 ticks
+ * = ------------ * ------------------- * ------------------------
+ * 1 second 2^6 ticks NSEC_PER_SEC nanoseconds
+ *
+ * "rate" * "burst"
+ * = ---------------- bytes/nanosecond
+ * NSEC_PER_SEC^2
+ *
+ *
+ * "rate" * "burst"
+ * = ---------------- bytes/second
+ * NSEC_PER_SEC
+ */
+ burst = div_u64(params->tcfp_burst * params->rate.rate_bytes_ps,
+ NSEC_PER_SEC);
+
+ return burst;
+}
+
+static inline u64 tcf_police_rate_pkt_ps(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->ppsrate.rate_pkts_ps;
+}
+
+static inline u32 tcf_police_burst_pkt(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+ u32 burst;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+
+ /*
+ * "rate" pkts "burst" nanoseconds
+ * ------------ * -------------------
+ * 1 second 2^6 ticks
+ *
+ * ------------------------------------
+ * NSEC_PER_SEC nanoseconds
+ * ------------------------
+ * 2^6 ticks
+ *
+ * "rate" pkts "burst" nanoseconds 2^6 ticks
+ * = ------------ * ------------------- * ------------------------
+ * 1 second 2^6 ticks NSEC_PER_SEC nanoseconds
+ *
+ * "rate" * "burst"
+ * = ---------------- pkts/nanosecond
+ * NSEC_PER_SEC^2
+ *
+ *
+ * "rate" * "burst"
+ * = ---------------- pkts/second
+ * NSEC_PER_SEC
+ */
+ burst = div_u64(params->tcfp_pkt_burst * params->ppsrate.rate_pkts_ps,
+ NSEC_PER_SEC);
+
+ return burst;
+}
+
+static inline u32 tcf_police_tcfp_mtu(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->tcfp_mtu;
+}
+
+static inline u64 tcf_police_peakrate_bytes_ps(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->peak.rate_bytes_ps;
+}
+
+static inline u32 tcf_police_tcfp_ewma_rate(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->tcfp_ewma_rate;
+}
+
+static inline u16 tcf_police_rate_overhead(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->rate.overhead;
+}
+
+#endif /* __NET_TC_POLICE_H */
diff --git a/include/net/tc_act/tc_sample.h b/include/net/tc_act/tc_sample.h
new file mode 100644
index 000000000..b5d76305e
--- /dev/null
+++ b/include/net/tc_act/tc_sample.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_SAMPLE_H
+#define __NET_TC_SAMPLE_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_sample.h>
+#include <net/psample.h>
+
+struct tcf_sample {
+ struct tc_action common;
+ u32 rate;
+ bool truncate;
+ u32 trunc_size;
+ struct psample_group __rcu *psample_group;
+ u32 psample_group_num;
+ struct list_head tcfm_list;
+};
+#define to_sample(a) ((struct tcf_sample *)a)
+
+static inline bool is_tcf_sample(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return a->ops && a->ops->id == TCA_ID_SAMPLE;
+#else
+ return false;
+#endif
+}
+
+static inline __u32 tcf_sample_rate(const struct tc_action *a)
+{
+ return to_sample(a)->rate;
+}
+
+static inline bool tcf_sample_truncate(const struct tc_action *a)
+{
+ return to_sample(a)->truncate;
+}
+
+static inline int tcf_sample_trunc_size(const struct tc_action *a)
+{
+ return to_sample(a)->trunc_size;
+}
+
+#endif /* __NET_TC_SAMPLE_H */
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
new file mode 100644
index 000000000..dc1079f28
--- /dev/null
+++ b/include/net/tc_act/tc_skbedit.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2008, Intel Corporation.
+ *
+ * Author: Alexander Duyck <alexander.h.duyck@intel.com>
+ */
+
+#ifndef __NET_TC_SKBEDIT_H
+#define __NET_TC_SKBEDIT_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_skbedit.h>
+
+struct tcf_skbedit_params {
+ u32 flags;
+ u32 priority;
+ u32 mark;
+ u32 mask;
+ u16 queue_mapping;
+ u16 mapping_mod;
+ u16 ptype;
+ struct rcu_head rcu;
+};
+
+struct tcf_skbedit {
+ struct tc_action common;
+ struct tcf_skbedit_params __rcu *params;
+};
+#define to_skbedit(a) ((struct tcf_skbedit *)a)
+
+/* Return true iff action is the one identified by FLAG. */
+static inline bool is_tcf_skbedit_with_flag(const struct tc_action *a, u32 flag)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ u32 flags;
+
+ if (a->ops && a->ops->id == TCA_ID_SKBEDIT) {
+ rcu_read_lock();
+ flags = rcu_dereference(to_skbedit(a)->params)->flags;
+ rcu_read_unlock();
+ return flags == flag;
+ }
+#endif
+ return false;
+}
+
+/* Return true iff action is mark */
+static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
+{
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_MARK);
+}
+
+static inline u32 tcf_skbedit_mark(const struct tc_action *a)
+{
+ u32 mark;
+
+ rcu_read_lock();
+ mark = rcu_dereference(to_skbedit(a)->params)->mark;
+ rcu_read_unlock();
+
+ return mark;
+}
+
+/* Return true iff action is ptype */
+static inline bool is_tcf_skbedit_ptype(const struct tc_action *a)
+{
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_PTYPE);
+}
+
+static inline u32 tcf_skbedit_ptype(const struct tc_action *a)
+{
+ u16 ptype;
+
+ rcu_read_lock();
+ ptype = rcu_dereference(to_skbedit(a)->params)->ptype;
+ rcu_read_unlock();
+
+ return ptype;
+}
+
+/* Return true iff action is priority */
+static inline bool is_tcf_skbedit_priority(const struct tc_action *a)
+{
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_PRIORITY);
+}
+
+static inline u32 tcf_skbedit_priority(const struct tc_action *a)
+{
+ u32 priority;
+
+ rcu_read_lock();
+ priority = rcu_dereference(to_skbedit(a)->params)->priority;
+ rcu_read_unlock();
+
+ return priority;
+}
+
+/* Return true iff action is queue_mapping */
+static inline bool is_tcf_skbedit_queue_mapping(const struct tc_action *a)
+{
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_QUEUE_MAPPING);
+}
+
+/* Return true iff action is inheritdsfield */
+static inline bool is_tcf_skbedit_inheritdsfield(const struct tc_action *a)
+{
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_INHERITDSFIELD);
+}
+
+#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tc_act/tc_skbmod.h b/include/net/tc_act/tc_skbmod.h
new file mode 100644
index 000000000..7c240d2fe
--- /dev/null
+++ b/include/net/tc_act/tc_skbmod.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2016, Jamal Hadi Salim
+*/
+
+#ifndef __NET_TC_SKBMOD_H
+#define __NET_TC_SKBMOD_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_skbmod.h>
+
+struct tcf_skbmod_params {
+ struct rcu_head rcu;
+ u64 flags; /*up to 64 types of operations; extend if needed */
+ u8 eth_dst[ETH_ALEN];
+ u16 eth_type;
+ u8 eth_src[ETH_ALEN];
+};
+
+struct tcf_skbmod {
+ struct tc_action common;
+ struct tcf_skbmod_params __rcu *skbmod_p;
+};
+#define to_skbmod(a) ((struct tcf_skbmod *)a)
+
+#endif /* __NET_TC_SKBMOD_H */
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
new file mode 100644
index 000000000..879fe8cff
--- /dev/null
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef __NET_TC_TUNNEL_KEY_H
+#define __NET_TC_TUNNEL_KEY_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_tunnel_key.h>
+#include <net/dst_metadata.h>
+
+struct tcf_tunnel_key_params {
+ struct rcu_head rcu;
+ int tcft_action;
+ struct metadata_dst *tcft_enc_metadata;
+};
+
+struct tcf_tunnel_key {
+ struct tc_action common;
+ struct tcf_tunnel_key_params __rcu *params;
+};
+
+#define to_tunnel_key(a) ((struct tcf_tunnel_key *)a)
+
+static inline bool is_tcf_tunnel_set(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_tunnel_key *t = to_tunnel_key(a);
+ struct tcf_tunnel_key_params *params;
+
+ params = rcu_dereference_protected(t->params,
+ lockdep_is_held(&a->tcfa_lock));
+ if (a->ops && a->ops->id == TCA_ID_TUNNEL_KEY)
+ return params->tcft_action == TCA_TUNNEL_KEY_ACT_SET;
+#endif
+ return false;
+}
+
+static inline bool is_tcf_tunnel_release(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_tunnel_key *t = to_tunnel_key(a);
+ struct tcf_tunnel_key_params *params;
+
+ params = rcu_dereference_protected(t->params,
+ lockdep_is_held(&a->tcfa_lock));
+ if (a->ops && a->ops->id == TCA_ID_TUNNEL_KEY)
+ return params->tcft_action == TCA_TUNNEL_KEY_ACT_RELEASE;
+#endif
+ return false;
+}
+
+static inline struct ip_tunnel_info *tcf_tunnel_info(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_tunnel_key *t = to_tunnel_key(a);
+ struct tcf_tunnel_key_params *params;
+
+ params = rcu_dereference_protected(t->params,
+ lockdep_is_held(&a->tcfa_lock));
+
+ return &params->tcft_enc_metadata->u.tun_info;
+#else
+ return NULL;
+#endif
+}
+
+static inline struct ip_tunnel_info *
+tcf_tunnel_info_copy(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ struct ip_tunnel_info *tun = tcf_tunnel_info(a);
+
+ if (tun) {
+ size_t tun_size = sizeof(*tun) + tun->options_len;
+ struct ip_tunnel_info *tun_copy = kmemdup(tun, tun_size,
+ GFP_ATOMIC);
+
+ return tun_copy;
+ }
+#endif
+ return NULL;
+}
+#endif /* __NET_TC_TUNNEL_KEY_H */
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
new file mode 100644
index 000000000..904eddfc1
--- /dev/null
+++ b/include/net/tc_act/tc_vlan.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ */
+
+#ifndef __NET_TC_VLAN_H
+#define __NET_TC_VLAN_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_vlan.h>
+
+struct tcf_vlan_params {
+ int tcfv_action;
+ unsigned char tcfv_push_dst[ETH_ALEN];
+ unsigned char tcfv_push_src[ETH_ALEN];
+ u16 tcfv_push_vid;
+ __be16 tcfv_push_proto;
+ u8 tcfv_push_prio;
+ bool tcfv_push_prio_exists;
+ struct rcu_head rcu;
+};
+
+struct tcf_vlan {
+ struct tc_action common;
+ struct tcf_vlan_params __rcu *vlan_p;
+};
+#define to_vlan(a) ((struct tcf_vlan *)a)
+
+static inline bool is_tcf_vlan(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->id == TCA_ID_VLAN)
+ return true;
+#endif
+ return false;
+}
+
+static inline u32 tcf_vlan_action(const struct tc_action *a)
+{
+ u32 tcfv_action;
+
+ rcu_read_lock();
+ tcfv_action = rcu_dereference(to_vlan(a)->vlan_p)->tcfv_action;
+ rcu_read_unlock();
+
+ return tcfv_action;
+}
+
+static inline u16 tcf_vlan_push_vid(const struct tc_action *a)
+{
+ u16 tcfv_push_vid;
+
+ rcu_read_lock();
+ tcfv_push_vid = rcu_dereference(to_vlan(a)->vlan_p)->tcfv_push_vid;
+ rcu_read_unlock();
+
+ return tcfv_push_vid;
+}
+
+static inline __be16 tcf_vlan_push_proto(const struct tc_action *a)
+{
+ __be16 tcfv_push_proto;
+
+ rcu_read_lock();
+ tcfv_push_proto = rcu_dereference(to_vlan(a)->vlan_p)->tcfv_push_proto;
+ rcu_read_unlock();
+
+ return tcfv_push_proto;
+}
+
+static inline u8 tcf_vlan_push_prio(const struct tc_action *a)
+{
+ u8 tcfv_push_prio;
+
+ rcu_read_lock();
+ tcfv_push_prio = rcu_dereference(to_vlan(a)->vlan_p)->tcfv_push_prio;
+ rcu_read_unlock();
+
+ return tcfv_push_prio;
+}
+
+static inline void tcf_vlan_push_eth(unsigned char *src, unsigned char *dest,
+ const struct tc_action *a)
+{
+ rcu_read_lock();
+ memcpy(dest, rcu_dereference(to_vlan(a)->vlan_p)->tcfv_push_dst, ETH_ALEN);
+ memcpy(src, rcu_dereference(to_vlan(a)->vlan_p)->tcfv_push_src, ETH_ALEN);
+ rcu_read_unlock();
+}
+
+#endif /* __NET_TC_VLAN_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
new file mode 100644
index 000000000..4c838f729
--- /dev/null
+++ b/include/net/tcp.h
@@ -0,0 +1,2488 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the TCP module.
+ *
+ * Version: @(#)tcp.h 1.0.5 05/23/93
+ *
+ * Authors: Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ */
+#ifndef _TCP_H
+#define _TCP_H
+
+#define FASTRETRANS_DEBUG 1
+
+#include <linux/list.h>
+#include <linux/tcp.h>
+#include <linux/bug.h>
+#include <linux/slab.h>
+#include <linux/cache.h>
+#include <linux/percpu.h>
+#include <linux/skbuff.h>
+#include <linux/kref.h>
+#include <linux/ktime.h>
+#include <linux/indirect_call_wrapper.h>
+
+#include <net/inet_connection_sock.h>
+#include <net/inet_timewait_sock.h>
+#include <net/inet_hashtables.h>
+#include <net/checksum.h>
+#include <net/request_sock.h>
+#include <net/sock_reuseport.h>
+#include <net/sock.h>
+#include <net/snmp.h>
+#include <net/ip.h>
+#include <net/tcp_states.h>
+#include <net/inet_ecn.h>
+#include <net/dst.h>
+#include <net/mptcp.h>
+
+#include <linux/seq_file.h>
+#include <linux/memcontrol.h>
+#include <linux/bpf-cgroup.h>
+#include <linux/siphash.h>
+
+extern struct inet_hashinfo tcp_hashinfo;
+
+DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
+int tcp_orphan_count_sum(void);
+
+void tcp_time_wait(struct sock *sk, int state, int timeo);
+
+#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
+#define MAX_TCP_OPTION_SPACE 40
+#define TCP_MIN_SND_MSS 48
+#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
+
+/*
+ * Never offer a window over 32767 without using window scaling. Some
+ * poor stacks do signed 16bit maths!
+ */
+#define MAX_TCP_WINDOW 32767U
+
+/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
+#define TCP_MIN_MSS 88U
+
+/* The initial MTU to use for probing */
+#define TCP_BASE_MSS 1024
+
+/* probing interval, default to 10 minutes as per RFC4821 */
+#define TCP_PROBE_INTERVAL 600
+
+/* Specify interval when tcp mtu probing will stop */
+#define TCP_PROBE_THRESHOLD 8
+
+/* After receiving this amount of duplicate ACKs fast retransmit starts. */
+#define TCP_FASTRETRANS_THRESH 3
+
+/* Maximal number of ACKs sent quickly to accelerate slow-start. */
+#define TCP_MAX_QUICKACKS 16U
+
+/* Maximal number of window scale according to RFC1323 */
+#define TCP_MAX_WSCALE 14U
+
+/* urg_data states */
+#define TCP_URG_VALID 0x0100
+#define TCP_URG_NOTYET 0x0200
+#define TCP_URG_READ 0x0400
+
+#define TCP_RETR1 3 /*
+ * This is how many retries it does before it
+ * tries to figure out if the gateway is
+ * down. Minimal RFC value is 3; it corresponds
+ * to ~3sec-8min depending on RTO.
+ */
+
+#define TCP_RETR2 15 /*
+ * This should take at least
+ * 90 minutes to time out.
+ * RFC1122 says that the limit is 100 sec.
+ * 15 is ~13-30min depending on RTO.
+ */
+
+#define TCP_SYN_RETRIES 6 /* This is how many retries are done
+ * when active opening a connection.
+ * RFC1122 says the minimum retry MUST
+ * be at least 180secs. Nevertheless
+ * this value is corresponding to
+ * 63secs of retransmission with the
+ * current initial RTO.
+ */
+
+#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
+ * when passive opening a connection.
+ * This is corresponding to 31secs of
+ * retransmission with the current
+ * initial RTO.
+ */
+
+#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
+ * state, about 60 seconds */
+#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
+ /* BSD style FIN_WAIT2 deadlock breaker.
+ * It used to be 3min, new value is 60sec,
+ * to combine FIN-WAIT-2 timeout with
+ * TIME-WAIT timer.
+ */
+#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
+
+#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
+#if HZ >= 100
+#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
+#define TCP_ATO_MIN ((unsigned)(HZ/25))
+#else
+#define TCP_DELACK_MIN 4U
+#define TCP_ATO_MIN 4U
+#endif
+#define TCP_RTO_MAX ((unsigned)(120*HZ))
+#define TCP_RTO_MIN ((unsigned)(HZ/5))
+#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
+
+#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
+
+#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
+#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
+ * used as a fallback RTO for the
+ * initial data transmission if no
+ * valid RTT sample has been acquired,
+ * most likely due to retrans in 3WHS.
+ */
+
+#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
+ * for local resources.
+ */
+#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
+#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
+#define TCP_KEEPALIVE_INTVL (75*HZ)
+
+#define MAX_TCP_KEEPIDLE 32767
+#define MAX_TCP_KEEPINTVL 32767
+#define MAX_TCP_KEEPCNT 127
+#define MAX_TCP_SYNCNT 127
+
+#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
+
+#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
+#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
+ * after this time. It should be equal
+ * (or greater than) TCP_TIMEWAIT_LEN
+ * to provide reliability equal to one
+ * provided by timewait state.
+ */
+#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
+ * timestamps. It must be less than
+ * minimal timewait lifetime.
+ */
+/*
+ * TCP option
+ */
+
+#define TCPOPT_NOP 1 /* Padding */
+#define TCPOPT_EOL 0 /* End of options */
+#define TCPOPT_MSS 2 /* Segment size negotiating */
+#define TCPOPT_WINDOW 3 /* Window scaling */
+#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
+#define TCPOPT_SACK 5 /* SACK Block */
+#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
+#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
+#define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
+#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
+#define TCPOPT_EXP 254 /* Experimental */
+/* Magic number to be after the option value for sharing TCP
+ * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
+ */
+#define TCPOPT_FASTOPEN_MAGIC 0xF989
+#define TCPOPT_SMC_MAGIC 0xE2D4C3D9
+
+/*
+ * TCP option lengths
+ */
+
+#define TCPOLEN_MSS 4
+#define TCPOLEN_WINDOW 3
+#define TCPOLEN_SACK_PERM 2
+#define TCPOLEN_TIMESTAMP 10
+#define TCPOLEN_MD5SIG 18
+#define TCPOLEN_FASTOPEN_BASE 2
+#define TCPOLEN_EXP_FASTOPEN_BASE 4
+#define TCPOLEN_EXP_SMC_BASE 6
+
+/* But this is what stacks really send out. */
+#define TCPOLEN_TSTAMP_ALIGNED 12
+#define TCPOLEN_WSCALE_ALIGNED 4
+#define TCPOLEN_SACKPERM_ALIGNED 4
+#define TCPOLEN_SACK_BASE 2
+#define TCPOLEN_SACK_BASE_ALIGNED 4
+#define TCPOLEN_SACK_PERBLOCK 8
+#define TCPOLEN_MD5SIG_ALIGNED 20
+#define TCPOLEN_MSS_ALIGNED 4
+#define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
+
+/* Flags in tp->nonagle */
+#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
+#define TCP_NAGLE_CORK 2 /* Socket is corked */
+#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
+
+/* TCP thin-stream limits */
+#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
+
+/* TCP initial congestion window as per rfc6928 */
+#define TCP_INIT_CWND 10
+
+/* Bit Flags for sysctl_tcp_fastopen */
+#define TFO_CLIENT_ENABLE 1
+#define TFO_SERVER_ENABLE 2
+#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
+
+/* Accept SYN data w/o any cookie option */
+#define TFO_SERVER_COOKIE_NOT_REQD 0x200
+
+/* Force enable TFO on all listeners, i.e., not requiring the
+ * TCP_FASTOPEN socket option.
+ */
+#define TFO_SERVER_WO_SOCKOPT1 0x400
+
+
+/* sysctl variables for tcp */
+extern int sysctl_tcp_max_orphans;
+extern long sysctl_tcp_mem[3];
+
+#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
+#define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */
+#define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
+
+extern atomic_long_t tcp_memory_allocated;
+DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
+
+extern struct percpu_counter tcp_sockets_allocated;
+extern unsigned long tcp_memory_pressure;
+
+/* optimized version of sk_under_memory_pressure() for TCP sockets */
+static inline bool tcp_under_memory_pressure(const struct sock *sk)
+{
+ if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
+ mem_cgroup_under_socket_pressure(sk->sk_memcg))
+ return true;
+
+ return READ_ONCE(tcp_memory_pressure);
+}
+/*
+ * The next routines deal with comparing 32 bit unsigned ints
+ * and worry about wraparound (automatic with unsigned arithmetic).
+ */
+
+static inline bool before(__u32 seq1, __u32 seq2)
+{
+ return (__s32)(seq1-seq2) < 0;
+}
+#define after(seq2, seq1) before(seq1, seq2)
+
+/* is s2<=s1<=s3 ? */
+static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
+{
+ return seq3 - seq2 >= seq1 - seq2;
+}
+
+static inline bool tcp_out_of_memory(struct sock *sk)
+{
+ if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
+ sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
+ return true;
+ return false;
+}
+
+static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
+{
+ sk_wmem_queued_add(sk, -skb->truesize);
+ if (!skb_zcopy_pure(skb))
+ sk_mem_uncharge(sk, skb->truesize);
+ else
+ sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
+ __kfree_skb(skb);
+}
+
+void sk_forced_mem_schedule(struct sock *sk, int size);
+
+bool tcp_check_oom(struct sock *sk, int shift);
+
+
+extern struct proto tcp_prot;
+
+#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
+#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
+#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
+#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
+
+void tcp_tasklet_init(void);
+
+int tcp_v4_err(struct sk_buff *skb, u32);
+
+void tcp_shutdown(struct sock *sk, int how);
+
+int tcp_v4_early_demux(struct sk_buff *skb);
+int tcp_v4_rcv(struct sk_buff *skb);
+
+void tcp_remove_empty_skb(struct sock *sk);
+int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
+int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
+int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
+ size_t size, struct ubuf_info *uarg);
+void tcp_splice_eof(struct socket *sock);
+int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
+ int flags);
+int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
+ size_t size, int flags);
+ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
+ size_t size, int flags);
+int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
+void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
+ int size_goal);
+void tcp_release_cb(struct sock *sk);
+void tcp_wfree(struct sk_buff *skb);
+void tcp_write_timer_handler(struct sock *sk);
+void tcp_delack_timer_handler(struct sock *sk);
+int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
+void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
+void tcp_rcv_space_adjust(struct sock *sk);
+int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
+void tcp_twsk_destructor(struct sock *sk);
+void tcp_twsk_purge(struct list_head *net_exit_list, int family);
+ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+ bool force_schedule);
+
+static inline void tcp_dec_quickack_mode(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (icsk->icsk_ack.quick) {
+ /* How many ACKs S/ACKing new data have we sent? */
+ const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
+
+ if (pkts >= icsk->icsk_ack.quick) {
+ icsk->icsk_ack.quick = 0;
+ /* Leaving quickack mode we deflate ATO. */
+ icsk->icsk_ack.ato = TCP_ATO_MIN;
+ } else
+ icsk->icsk_ack.quick -= pkts;
+ }
+}
+
+#define TCP_ECN_OK 1
+#define TCP_ECN_QUEUE_CWR 2
+#define TCP_ECN_DEMAND_CWR 4
+#define TCP_ECN_SEEN 8
+
+enum tcp_tw_status {
+ TCP_TW_SUCCESS = 0,
+ TCP_TW_RST = 1,
+ TCP_TW_ACK = 2,
+ TCP_TW_SYN = 3
+};
+
+
+enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
+ struct sk_buff *skb,
+ const struct tcphdr *th);
+struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req, bool fastopen,
+ bool *lost_race);
+int tcp_child_process(struct sock *parent, struct sock *child,
+ struct sk_buff *skb);
+void tcp_enter_loss(struct sock *sk);
+void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
+void tcp_clear_retrans(struct tcp_sock *tp);
+void tcp_update_metrics(struct sock *sk);
+void tcp_init_metrics(struct sock *sk);
+void tcp_metrics_init(void);
+bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
+void __tcp_close(struct sock *sk, long timeout);
+void tcp_close(struct sock *sk, long timeout);
+void tcp_init_sock(struct sock *sk);
+void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
+__poll_t tcp_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
+int do_tcp_getsockopt(struct sock *sk, int level,
+ int optname, sockptr_t optval, sockptr_t optlen);
+int tcp_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+bool tcp_bpf_bypass_getsockopt(int level, int optname);
+int do_tcp_setsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
+int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+ unsigned int optlen);
+void tcp_set_keepalive(struct sock *sk, int val);
+void tcp_syn_ack_timeout(const struct request_sock *req);
+int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int flags, int *addr_len);
+int tcp_set_rcvlowat(struct sock *sk, int val);
+int tcp_set_window_clamp(struct sock *sk, int val);
+void tcp_update_recv_tstamps(struct sk_buff *skb,
+ struct scm_timestamping_internal *tss);
+void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
+ struct scm_timestamping_internal *tss);
+void tcp_data_ready(struct sock *sk);
+#ifdef CONFIG_MMU
+int tcp_mmap(struct file *file, struct socket *sock,
+ struct vm_area_struct *vma);
+#endif
+void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
+ struct tcp_options_received *opt_rx,
+ int estab, struct tcp_fastopen_cookie *foc);
+const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
+
+/*
+ * BPF SKB-less helpers
+ */
+u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
+ struct tcphdr *th, u32 *cookie);
+u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
+ struct tcphdr *th, u32 *cookie);
+u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
+u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
+ const struct tcp_request_sock_ops *af_ops,
+ struct sock *sk, struct tcphdr *th);
+/*
+ * TCP v4 functions exported for the inet6 API
+ */
+
+void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
+void tcp_v4_mtu_reduced(struct sock *sk);
+void tcp_req_err(struct sock *sk, u32 seq, bool abort);
+void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
+int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
+struct sock *tcp_create_openreq_child(const struct sock *sk,
+ struct request_sock *req,
+ struct sk_buff *skb);
+void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
+struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct dst_entry *dst,
+ struct request_sock *req_unhash,
+ bool *own_req);
+int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
+int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+int tcp_connect(struct sock *sk);
+enum tcp_synack_type {
+ TCP_SYNACK_NORMAL,
+ TCP_SYNACK_FASTOPEN,
+ TCP_SYNACK_COOKIE,
+};
+struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
+ struct request_sock *req,
+ struct tcp_fastopen_cookie *foc,
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb);
+int tcp_disconnect(struct sock *sk, int flags);
+
+void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
+int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
+void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
+
+/* From syncookies.c */
+struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct dst_entry *dst, u32 tsoff);
+int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
+ u32 cookie);
+struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
+struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
+ const struct tcp_request_sock_ops *af_ops,
+ struct sock *sk, struct sk_buff *skb);
+#ifdef CONFIG_SYN_COOKIES
+
+/* Syncookies use a monotonic timer which increments every 60 seconds.
+ * This counter is used both as a hash input and partially encoded into
+ * the cookie value. A cookie is only validated further if the delta
+ * between the current counter value and the encoded one is less than this,
+ * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
+ * the counter advances immediately after a cookie is generated).
+ */
+#define MAX_SYNCOOKIE_AGE 2
+#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
+#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
+
+/* syncookies: remember time of last synqueue overflow
+ * But do not dirty this field too often (once per second is enough)
+ * It is racy as we do not hold a lock, but race is very minor.
+ */
+static inline void tcp_synq_overflow(const struct sock *sk)
+{
+ unsigned int last_overflow;
+ unsigned int now = jiffies;
+
+ if (sk->sk_reuseport) {
+ struct sock_reuseport *reuse;
+
+ reuse = rcu_dereference(sk->sk_reuseport_cb);
+ if (likely(reuse)) {
+ last_overflow = READ_ONCE(reuse->synq_overflow_ts);
+ if (!time_between32(now, last_overflow,
+ last_overflow + HZ))
+ WRITE_ONCE(reuse->synq_overflow_ts, now);
+ return;
+ }
+ }
+
+ last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
+ if (!time_between32(now, last_overflow, last_overflow + HZ))
+ WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
+}
+
+/* syncookies: no recent synqueue overflow on this listening socket? */
+static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
+{
+ unsigned int last_overflow;
+ unsigned int now = jiffies;
+
+ if (sk->sk_reuseport) {
+ struct sock_reuseport *reuse;
+
+ reuse = rcu_dereference(sk->sk_reuseport_cb);
+ if (likely(reuse)) {
+ last_overflow = READ_ONCE(reuse->synq_overflow_ts);
+ return !time_between32(now, last_overflow - HZ,
+ last_overflow +
+ TCP_SYNCOOKIE_VALID);
+ }
+ }
+
+ last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
+
+ /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
+ * then we're under synflood. However, we have to use
+ * 'last_overflow - HZ' as lower bound. That's because a concurrent
+ * tcp_synq_overflow() could update .ts_recent_stamp after we read
+ * jiffies but before we store .ts_recent_stamp into last_overflow,
+ * which could lead to rejecting a valid syncookie.
+ */
+ return !time_between32(now, last_overflow - HZ,
+ last_overflow + TCP_SYNCOOKIE_VALID);
+}
+
+static inline u32 tcp_cookie_time(void)
+{
+ u64 val = get_jiffies_64();
+
+ do_div(val, TCP_SYNCOOKIE_PERIOD);
+ return val;
+}
+
+u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
+ u16 *mssp);
+__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
+u64 cookie_init_timestamp(struct request_sock *req, u64 now);
+bool cookie_timestamp_decode(const struct net *net,
+ struct tcp_options_received *opt);
+bool cookie_ecn_ok(const struct tcp_options_received *opt,
+ const struct net *net, const struct dst_entry *dst);
+
+/* From net/ipv6/syncookies.c */
+int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
+ u32 cookie);
+struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
+
+u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
+ const struct tcphdr *th, u16 *mssp);
+__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
+#endif
+/* tcp_output.c */
+
+void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
+void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
+void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
+ int nonagle);
+int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
+int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
+void tcp_retransmit_timer(struct sock *sk);
+void tcp_xmit_retransmit_queue(struct sock *);
+void tcp_simple_retransmit(struct sock *);
+void tcp_enter_recovery(struct sock *sk, bool ece_ack);
+int tcp_trim_head(struct sock *, struct sk_buff *, u32);
+enum tcp_queue {
+ TCP_FRAG_IN_WRITE_QUEUE,
+ TCP_FRAG_IN_RTX_QUEUE,
+};
+int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
+ struct sk_buff *skb, u32 len,
+ unsigned int mss_now, gfp_t gfp);
+
+void tcp_send_probe0(struct sock *);
+void tcp_send_partial(struct sock *);
+int tcp_write_wakeup(struct sock *, int mib);
+void tcp_send_fin(struct sock *sk);
+void tcp_send_active_reset(struct sock *sk, gfp_t priority);
+int tcp_send_synack(struct sock *);
+void tcp_push_one(struct sock *, unsigned int mss_now);
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
+void tcp_send_ack(struct sock *sk);
+void tcp_send_delayed_ack(struct sock *sk);
+void tcp_send_loss_probe(struct sock *sk);
+bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
+void tcp_skb_collapse_tstamp(struct sk_buff *skb,
+ const struct sk_buff *next_skb);
+
+/* tcp_input.c */
+void tcp_rearm_rto(struct sock *sk);
+void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
+void tcp_reset(struct sock *sk, struct sk_buff *skb);
+void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
+void tcp_fin(struct sock *sk);
+void tcp_check_space(struct sock *sk);
+void tcp_sack_compress_send_ack(struct sock *sk);
+
+/* tcp_timer.c */
+void tcp_init_xmit_timers(struct sock *);
+static inline void tcp_clear_xmit_timers(struct sock *sk)
+{
+ if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
+ __sock_put(sk);
+
+ if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
+ __sock_put(sk);
+
+ inet_csk_clear_xmit_timers(sk);
+}
+
+unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
+unsigned int tcp_current_mss(struct sock *sk);
+u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
+
+/* Bound MSS / TSO packet size with the half of the window */
+static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
+{
+ int cutoff;
+
+ /* When peer uses tiny windows, there is no use in packetizing
+ * to sub-MSS pieces for the sake of SWS or making sure there
+ * are enough packets in the pipe for fast recovery.
+ *
+ * On the other hand, for extremely large MSS devices, handling
+ * smaller than MSS windows in this way does make sense.
+ */
+ if (tp->max_window > TCP_MSS_DEFAULT)
+ cutoff = (tp->max_window >> 1);
+ else
+ cutoff = tp->max_window;
+
+ if (cutoff && pktsize > cutoff)
+ return max_t(int, cutoff, 68U - tp->tcp_header_len);
+ else
+ return pktsize;
+}
+
+/* tcp.c */
+void tcp_get_info(struct sock *, struct tcp_info *);
+
+/* Read 'sendfile()'-style from a TCP socket */
+int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor);
+int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
+struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
+void tcp_read_done(struct sock *sk, size_t len);
+
+void tcp_initialize_rcv_mss(struct sock *sk);
+
+int tcp_mtu_to_mss(struct sock *sk, int pmtu);
+int tcp_mss_to_mtu(struct sock *sk, int mss);
+void tcp_mtup_init(struct sock *sk);
+
+static inline void tcp_bound_rto(const struct sock *sk)
+{
+ if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
+ inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
+}
+
+static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
+{
+ return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
+}
+
+static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
+{
+ /* mptcp hooks are only on the slow path */
+ if (sk_is_mptcp((struct sock *)tp))
+ return;
+
+ tp->pred_flags = htonl((tp->tcp_header_len << 26) |
+ ntohl(TCP_FLAG_ACK) |
+ snd_wnd);
+}
+
+static inline void tcp_fast_path_on(struct tcp_sock *tp)
+{
+ __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
+}
+
+static inline void tcp_fast_path_check(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
+ tp->rcv_wnd &&
+ atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+ !tp->urg_data)
+ tcp_fast_path_on(tp);
+}
+
+/* Compute the actual rto_min value */
+static inline u32 tcp_rto_min(struct sock *sk)
+{
+ const struct dst_entry *dst = __sk_dst_get(sk);
+ u32 rto_min = inet_csk(sk)->icsk_rto_min;
+
+ if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
+ rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
+ return rto_min;
+}
+
+static inline u32 tcp_rto_min_us(struct sock *sk)
+{
+ return jiffies_to_usecs(tcp_rto_min(sk));
+}
+
+static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
+{
+ return dst_metric_locked(dst, RTAX_CC_ALGO);
+}
+
+/* Minimum RTT in usec. ~0 means not available. */
+static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
+{
+ return minmax_get(&tp->rtt_min);
+}
+
+/* Compute the actual receive window we are currently advertising.
+ * Rcv_nxt can be after the window if our peer push more data
+ * than the offered window.
+ */
+static inline u32 tcp_receive_window(const struct tcp_sock *tp)
+{
+ s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
+
+ if (win < 0)
+ win = 0;
+ return (u32) win;
+}
+
+/* Choose a new window, without checks for shrinking, and without
+ * scaling applied to the result. The caller does these things
+ * if necessary. This is a "raw" window selection.
+ */
+u32 __tcp_select_window(struct sock *sk);
+
+void tcp_send_window_probe(struct sock *sk);
+
+/* TCP uses 32bit jiffies to save some space.
+ * Note that this is different from tcp_time_stamp, which
+ * historically has been the same until linux-4.13.
+ */
+#define tcp_jiffies32 ((u32)jiffies)
+
+/*
+ * Deliver a 32bit value for TCP timestamp option (RFC 7323)
+ * It is no longer tied to jiffies, but to 1 ms clock.
+ * Note: double check if you want to use tcp_jiffies32 instead of this.
+ */
+#define TCP_TS_HZ 1000
+
+static inline u64 tcp_clock_ns(void)
+{
+ return ktime_get_ns();
+}
+
+static inline u64 tcp_clock_us(void)
+{
+ return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
+}
+
+/* This should only be used in contexts where tp->tcp_mstamp is up to date */
+static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
+{
+ return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
+}
+
+/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
+static inline u64 tcp_ns_to_ts(u64 ns)
+{
+ return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
+}
+
+/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
+static inline u32 tcp_time_stamp_raw(void)
+{
+ return tcp_ns_to_ts(tcp_clock_ns());
+}
+
+void tcp_mstamp_refresh(struct tcp_sock *tp);
+
+static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
+{
+ return max_t(s64, t1 - t0, 0);
+}
+
+static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
+{
+ return tcp_ns_to_ts(skb->skb_mstamp_ns);
+}
+
+/* provide the departure time in us unit */
+static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
+{
+ return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
+}
+
+
+#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
+
+#define TCPHDR_FIN 0x01
+#define TCPHDR_SYN 0x02
+#define TCPHDR_RST 0x04
+#define TCPHDR_PSH 0x08
+#define TCPHDR_ACK 0x10
+#define TCPHDR_URG 0x20
+#define TCPHDR_ECE 0x40
+#define TCPHDR_CWR 0x80
+
+#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
+
+/* This is what the send packet queuing engine uses to pass
+ * TCP per-packet control information to the transmission code.
+ * We also store the host-order sequence numbers in here too.
+ * This is 44 bytes if IPV6 is enabled.
+ * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
+ */
+struct tcp_skb_cb {
+ __u32 seq; /* Starting sequence number */
+ __u32 end_seq; /* SEQ + FIN + SYN + datalen */
+ union {
+ /* Note : tcp_tw_isn is used in input path only
+ * (isn chosen by tcp_timewait_state_process())
+ *
+ * tcp_gso_segs/size are used in write queue only,
+ * cf tcp_skb_pcount()/tcp_skb_mss()
+ */
+ __u32 tcp_tw_isn;
+ struct {
+ u16 tcp_gso_segs;
+ u16 tcp_gso_size;
+ };
+ };
+ __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
+
+ __u8 sacked; /* State flags for SACK. */
+#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
+#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
+#define TCPCB_LOST 0x04 /* SKB is lost */
+#define TCPCB_TAGBITS 0x07 /* All tag bits */
+#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */
+#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
+#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
+ TCPCB_REPAIRED)
+
+ __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
+ __u8 txstamp_ack:1, /* Record TX timestamp for ack? */
+ eor:1, /* Is skb MSG_EOR marked? */
+ has_rxtstamp:1, /* SKB has a RX timestamp */
+ unused:5;
+ __u32 ack_seq; /* Sequence number ACK'd */
+ union {
+ struct {
+#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
+ /* There is space for up to 24 bytes */
+ __u32 is_app_limited:1, /* cwnd not fully used? */
+ delivered_ce:20,
+ unused:11;
+ /* pkts S/ACKed so far upon tx of skb, incl retrans: */
+ __u32 delivered;
+ /* start of send pipeline phase */
+ u64 first_tx_mstamp;
+ /* when we reached the "delivered" count */
+ u64 delivered_mstamp;
+ } tx; /* only used for outgoing skbs */
+ union {
+ struct inet_skb_parm h4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_skb_parm h6;
+#endif
+ } header; /* For incoming skbs */
+ };
+};
+
+#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
+
+extern const struct inet_connection_sock_af_ops ipv4_specific;
+
+#if IS_ENABLED(CONFIG_IPV6)
+/* This is the variant of inet6_iif() that must be used by TCP,
+ * as TCP moves IP6CB into a different location in skb->cb[]
+ */
+static inline int tcp_v6_iif(const struct sk_buff *skb)
+{
+ return TCP_SKB_CB(skb)->header.h6.iif;
+}
+
+static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
+{
+ bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
+
+ return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
+}
+
+/* TCP_SKB_CB reference means this can not be used from early demux */
+static inline int tcp_v6_sdif(const struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
+ return TCP_SKB_CB(skb)->header.h6.iif;
+#endif
+ return 0;
+}
+
+extern const struct inet_connection_sock_af_ops ipv6_specific;
+
+INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
+void tcp_v6_early_demux(struct sk_buff *skb);
+
+#endif
+
+/* TCP_SKB_CB reference means this can not be used from early demux */
+static inline int tcp_v4_sdif(struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
+ return TCP_SKB_CB(skb)->header.h4.iif;
+#endif
+ return 0;
+}
+
+/* Due to TSO, an SKB can be composed of multiple actual
+ * packets. To keep these tracked properly, we use this.
+ */
+static inline int tcp_skb_pcount(const struct sk_buff *skb)
+{
+ return TCP_SKB_CB(skb)->tcp_gso_segs;
+}
+
+static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
+{
+ TCP_SKB_CB(skb)->tcp_gso_segs = segs;
+}
+
+static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
+{
+ TCP_SKB_CB(skb)->tcp_gso_segs += segs;
+}
+
+/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
+static inline int tcp_skb_mss(const struct sk_buff *skb)
+{
+ return TCP_SKB_CB(skb)->tcp_gso_size;
+}
+
+static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
+{
+ return likely(!TCP_SKB_CB(skb)->eor);
+}
+
+static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
+ const struct sk_buff *from)
+{
+ return likely(tcp_skb_can_collapse_to(to) &&
+ mptcp_skb_can_collapse(to, from) &&
+ skb_pure_zcopy_same(to, from));
+}
+
+/* Events passed to congestion control interface */
+enum tcp_ca_event {
+ CA_EVENT_TX_START, /* first transmit when no packets in flight */
+ CA_EVENT_CWND_RESTART, /* congestion window restart */
+ CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
+ CA_EVENT_LOSS, /* loss timeout */
+ CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
+ CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
+};
+
+/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
+enum tcp_ca_ack_event_flags {
+ CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
+ CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
+ CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
+};
+
+/*
+ * Interface for adding new TCP congestion control handlers
+ */
+#define TCP_CA_NAME_MAX 16
+#define TCP_CA_MAX 128
+#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
+
+#define TCP_CA_UNSPEC 0
+
+/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
+#define TCP_CONG_NON_RESTRICTED 0x1
+/* Requires ECN/ECT set on all packets */
+#define TCP_CONG_NEEDS_ECN 0x2
+#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
+
+union tcp_cc_info;
+
+struct ack_sample {
+ u32 pkts_acked;
+ s32 rtt_us;
+ u32 in_flight;
+};
+
+/* A rate sample measures the number of (original/retransmitted) data
+ * packets delivered "delivered" over an interval of time "interval_us".
+ * The tcp_rate.c code fills in the rate sample, and congestion
+ * control modules that define a cong_control function to run at the end
+ * of ACK processing can optionally chose to consult this sample when
+ * setting cwnd and pacing rate.
+ * A sample is invalid if "delivered" or "interval_us" is negative.
+ */
+struct rate_sample {
+ u64 prior_mstamp; /* starting timestamp for interval */
+ u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
+ u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
+ s32 delivered; /* number of packets delivered over interval */
+ s32 delivered_ce; /* number of packets delivered w/ CE marks*/
+ long interval_us; /* time for tp->delivered to incr "delivered" */
+ u32 snd_interval_us; /* snd interval for delivered packets */
+ u32 rcv_interval_us; /* rcv interval for delivered packets */
+ long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
+ int losses; /* number of packets marked lost upon ACK */
+ u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
+ u32 prior_in_flight; /* in flight before this ACK */
+ u32 last_end_seq; /* end_seq of most recently ACKed packet */
+ bool is_app_limited; /* is sample from packet with bubble in pipe? */
+ bool is_retrans; /* is sample from retransmission? */
+ bool is_ack_delayed; /* is this (likely) a delayed ACK? */
+};
+
+struct tcp_congestion_ops {
+/* fast path fields are put first to fill one cache line */
+
+ /* return slow start threshold (required) */
+ u32 (*ssthresh)(struct sock *sk);
+
+ /* do new cwnd calculation (required) */
+ void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
+
+ /* call before changing ca_state (optional) */
+ void (*set_state)(struct sock *sk, u8 new_state);
+
+ /* call when cwnd event occurs (optional) */
+ void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
+
+ /* call when ack arrives (optional) */
+ void (*in_ack_event)(struct sock *sk, u32 flags);
+
+ /* hook for packet ack accounting (optional) */
+ void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
+
+ /* override sysctl_tcp_min_tso_segs */
+ u32 (*min_tso_segs)(struct sock *sk);
+
+ /* call when packets are delivered to update cwnd and pacing rate,
+ * after all the ca_state processing. (optional)
+ */
+ void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
+
+
+ /* new value of cwnd after loss (required) */
+ u32 (*undo_cwnd)(struct sock *sk);
+ /* returns the multiplier used in tcp_sndbuf_expand (optional) */
+ u32 (*sndbuf_expand)(struct sock *sk);
+
+/* control/slow paths put last */
+ /* get info for inet_diag (optional) */
+ size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info);
+
+ char name[TCP_CA_NAME_MAX];
+ struct module *owner;
+ struct list_head list;
+ u32 key;
+ u32 flags;
+
+ /* initialize private data (optional) */
+ void (*init)(struct sock *sk);
+ /* cleanup private data (optional) */
+ void (*release)(struct sock *sk);
+} ____cacheline_aligned_in_smp;
+
+int tcp_register_congestion_control(struct tcp_congestion_ops *type);
+void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
+
+void tcp_assign_congestion_control(struct sock *sk);
+void tcp_init_congestion_control(struct sock *sk);
+void tcp_cleanup_congestion_control(struct sock *sk);
+int tcp_set_default_congestion_control(struct net *net, const char *name);
+void tcp_get_default_congestion_control(struct net *net, char *name);
+void tcp_get_available_congestion_control(char *buf, size_t len);
+void tcp_get_allowed_congestion_control(char *buf, size_t len);
+int tcp_set_allowed_congestion_control(char *allowed);
+int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
+ bool cap_net_admin);
+u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
+
+u32 tcp_reno_ssthresh(struct sock *sk);
+u32 tcp_reno_undo_cwnd(struct sock *sk);
+void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
+extern struct tcp_congestion_ops tcp_reno;
+
+struct tcp_congestion_ops *tcp_ca_find(const char *name);
+struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
+u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
+#ifdef CONFIG_INET
+char *tcp_ca_get_name_by_key(u32 key, char *buffer);
+#else
+static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
+{
+ return NULL;
+}
+#endif
+
+static inline bool tcp_ca_needs_ecn(const struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+
+ return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
+}
+
+static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (icsk->icsk_ca_ops->cwnd_event)
+ icsk->icsk_ca_ops->cwnd_event(sk, event);
+}
+
+/* From tcp_cong.c */
+void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
+
+/* From tcp_rate.c */
+void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
+void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
+ struct rate_sample *rs);
+void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
+ bool is_sack_reneg, struct rate_sample *rs);
+void tcp_rate_check_app_limited(struct sock *sk);
+
+static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
+{
+ return t1 > t2 || (t1 == t2 && after(seq1, seq2));
+}
+
+/* These functions determine how the current flow behaves in respect of SACK
+ * handling. SACK is negotiated with the peer, and therefore it can vary
+ * between different flows.
+ *
+ * tcp_is_sack - SACK enabled
+ * tcp_is_reno - No SACK
+ */
+static inline int tcp_is_sack(const struct tcp_sock *tp)
+{
+ return likely(tp->rx_opt.sack_ok);
+}
+
+static inline bool tcp_is_reno(const struct tcp_sock *tp)
+{
+ return !tcp_is_sack(tp);
+}
+
+static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
+{
+ return tp->sacked_out + tp->lost_out;
+}
+
+/* This determines how many packets are "in the network" to the best
+ * of our knowledge. In many cases it is conservative, but where
+ * detailed information is available from the receiver (via SACK
+ * blocks etc.) we can make more aggressive calculations.
+ *
+ * Use this for decisions involving congestion control, use just
+ * tp->packets_out to determine if the send queue is empty or not.
+ *
+ * Read this equation as:
+ *
+ * "Packets sent once on transmission queue" MINUS
+ * "Packets left network, but not honestly ACKed yet" PLUS
+ * "Packets fast retransmitted"
+ */
+static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
+{
+ return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
+}
+
+#define TCP_INFINITE_SSTHRESH 0x7fffffff
+
+static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
+{
+ return tp->snd_cwnd;
+}
+
+static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
+{
+ WARN_ON_ONCE((int)val <= 0);
+ tp->snd_cwnd = val;
+}
+
+static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
+{
+ return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
+}
+
+static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
+{
+ return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
+}
+
+static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
+{
+ return (TCPF_CA_CWR | TCPF_CA_Recovery) &
+ (1 << inet_csk(sk)->icsk_ca_state);
+}
+
+/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
+ * The exception is cwnd reduction phase, when cwnd is decreasing towards
+ * ssthresh.
+ */
+static inline __u32 tcp_current_ssthresh(const struct sock *sk)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+ if (tcp_in_cwnd_reduction(sk))
+ return tp->snd_ssthresh;
+ else
+ return max(tp->snd_ssthresh,
+ ((tcp_snd_cwnd(tp) >> 1) +
+ (tcp_snd_cwnd(tp) >> 2)));
+}
+
+/* Use define here intentionally to get WARN_ON location shown at the caller */
+#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
+
+void tcp_enter_cwr(struct sock *sk);
+__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
+
+/* The maximum number of MSS of available cwnd for which TSO defers
+ * sending if not using sysctl_tcp_tso_win_divisor.
+ */
+static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
+{
+ return 3;
+}
+
+/* Returns end sequence number of the receiver's advertised window */
+static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
+{
+ return tp->snd_una + tp->snd_wnd;
+}
+
+/* We follow the spirit of RFC2861 to validate cwnd but implement a more
+ * flexible approach. The RFC suggests cwnd should not be raised unless
+ * it was fully used previously. And that's exactly what we do in
+ * congestion avoidance mode. But in slow start we allow cwnd to grow
+ * as long as the application has used half the cwnd.
+ * Example :
+ * cwnd is 10 (IW10), but application sends 9 frames.
+ * We allow cwnd to reach 18 when all frames are ACKed.
+ * This check is safe because it's as aggressive as slow start which already
+ * risks 100% overshoot. The advantage is that we discourage application to
+ * either send more filler packets or data to artificially blow up the cwnd
+ * usage, and allow application-limited process to probe bw more aggressively.
+ */
+static inline bool tcp_is_cwnd_limited(const struct sock *sk)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+ if (tp->is_cwnd_limited)
+ return true;
+
+ /* If in slow start, ensure cwnd grows to twice what was ACKed. */
+ if (tcp_in_slow_start(tp))
+ return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
+
+ return false;
+}
+
+/* BBR congestion control needs pacing.
+ * Same remark for SO_MAX_PACING_RATE.
+ * sch_fq packet scheduler is efficiently handling pacing,
+ * but is not always installed/used.
+ * Return true if TCP stack should pace packets itself.
+ */
+static inline bool tcp_needs_internal_pacing(const struct sock *sk)
+{
+ return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
+}
+
+/* Estimates in how many jiffies next packet for this flow can be sent.
+ * Scheduling a retransmit timer too early would be silly.
+ */
+static inline unsigned long tcp_pacing_delay(const struct sock *sk)
+{
+ s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
+
+ return delay > 0 ? nsecs_to_jiffies(delay) : 0;
+}
+
+static inline void tcp_reset_xmit_timer(struct sock *sk,
+ const int what,
+ unsigned long when,
+ const unsigned long max_when)
+{
+ inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
+ max_when);
+}
+
+/* Something is really bad, we could not queue an additional packet,
+ * because qdisc is full or receiver sent a 0 window, or we are paced.
+ * We do not want to add fuel to the fire, or abort too early,
+ * so make sure the timer we arm now is at least 200ms in the future,
+ * regardless of current icsk_rto value (as it could be ~2ms)
+ */
+static inline unsigned long tcp_probe0_base(const struct sock *sk)
+{
+ return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
+}
+
+/* Variant of inet_csk_rto_backoff() used for zero window probes */
+static inline unsigned long tcp_probe0_when(const struct sock *sk,
+ unsigned long max_when)
+{
+ u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
+ inet_csk(sk)->icsk_backoff);
+ u64 when = (u64)tcp_probe0_base(sk) << backoff;
+
+ return (unsigned long)min_t(u64, when, max_when);
+}
+
+static inline void tcp_check_probe_timer(struct sock *sk)
+{
+ if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
+ tcp_probe0_base(sk), TCP_RTO_MAX);
+}
+
+static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
+{
+ tp->snd_wl1 = seq;
+}
+
+static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
+{
+ tp->snd_wl1 = seq;
+}
+
+/*
+ * Calculate(/check) TCP checksum
+ */
+static inline __sum16 tcp_v4_check(int len, __be32 saddr,
+ __be32 daddr, __wsum base)
+{
+ return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
+}
+
+static inline bool tcp_checksum_complete(struct sk_buff *skb)
+{
+ return !skb_csum_unnecessary(skb) &&
+ __skb_checksum_complete(skb);
+}
+
+bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *reason);
+
+
+int tcp_filter(struct sock *sk, struct sk_buff *skb);
+void tcp_set_state(struct sock *sk, int state);
+void tcp_done(struct sock *sk);
+int tcp_abort(struct sock *sk, int err);
+
+static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
+{
+ rx_opt->dsack = 0;
+ rx_opt->num_sacks = 0;
+}
+
+void tcp_cwnd_restart(struct sock *sk, s32 delta);
+
+static inline void tcp_slow_start_after_idle_check(struct sock *sk)
+{
+ const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
+ struct tcp_sock *tp = tcp_sk(sk);
+ s32 delta;
+
+ if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
+ tp->packets_out || ca_ops->cong_control)
+ return;
+ delta = tcp_jiffies32 - tp->lsndtime;
+ if (delta > inet_csk(sk)->icsk_rto)
+ tcp_cwnd_restart(sk, delta);
+}
+
+/* Determine a window scaling and initial window to offer. */
+void tcp_select_initial_window(const struct sock *sk, int __space,
+ __u32 mss, __u32 *rcv_wnd,
+ __u32 *window_clamp, int wscale_ok,
+ __u8 *rcv_wscale, __u32 init_rcv_wnd);
+
+static inline int tcp_win_from_space(const struct sock *sk, int space)
+{
+ int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
+
+ return tcp_adv_win_scale <= 0 ?
+ (space>>(-tcp_adv_win_scale)) :
+ space - (space>>tcp_adv_win_scale);
+}
+
+/* Note: caller must be prepared to deal with negative returns */
+static inline int tcp_space(const struct sock *sk)
+{
+ return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
+ READ_ONCE(sk->sk_backlog.len) -
+ atomic_read(&sk->sk_rmem_alloc));
+}
+
+static inline int tcp_full_space(const struct sock *sk)
+{
+ return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
+}
+
+static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
+{
+ int unused_mem = sk_unused_reserved_mem(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
+ if (unused_mem)
+ tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
+ tcp_win_from_space(sk, unused_mem));
+}
+
+static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+{
+ __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
+}
+
+void tcp_cleanup_rbuf(struct sock *sk, int copied);
+void __tcp_cleanup_rbuf(struct sock *sk, int copied);
+
+
+/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
+ * If 87.5 % (7/8) of the space has been consumed, we want to override
+ * SO_RCVLOWAT constraint, since we are receiving skbs with too small
+ * len/truesize ratio.
+ */
+static inline bool tcp_rmem_pressure(const struct sock *sk)
+{
+ int rcvbuf, threshold;
+
+ if (tcp_under_memory_pressure(sk))
+ return true;
+
+ rcvbuf = READ_ONCE(sk->sk_rcvbuf);
+ threshold = rcvbuf - (rcvbuf >> 3);
+
+ return atomic_read(&sk->sk_rmem_alloc) > threshold;
+}
+
+static inline bool tcp_epollin_ready(const struct sock *sk, int target)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
+
+ if (avail <= 0)
+ return false;
+
+ return (avail >= target) || tcp_rmem_pressure(sk) ||
+ (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
+}
+
+extern void tcp_openreq_init_rwin(struct request_sock *req,
+ const struct sock *sk_listener,
+ const struct dst_entry *dst);
+
+void tcp_enter_memory_pressure(struct sock *sk);
+void tcp_leave_memory_pressure(struct sock *sk);
+
+static inline int keepalive_intvl_when(const struct tcp_sock *tp)
+{
+ struct net *net = sock_net((struct sock *)tp);
+ int val;
+
+ /* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
+ * and do_tcp_setsockopt().
+ */
+ val = READ_ONCE(tp->keepalive_intvl);
+
+ return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
+}
+
+static inline int keepalive_time_when(const struct tcp_sock *tp)
+{
+ struct net *net = sock_net((struct sock *)tp);
+ int val;
+
+ /* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
+ val = READ_ONCE(tp->keepalive_time);
+
+ return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
+}
+
+static inline int keepalive_probes(const struct tcp_sock *tp)
+{
+ struct net *net = sock_net((struct sock *)tp);
+ int val;
+
+ /* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
+ * and do_tcp_setsockopt().
+ */
+ val = READ_ONCE(tp->keepalive_probes);
+
+ return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
+}
+
+static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
+{
+ const struct inet_connection_sock *icsk = &tp->inet_conn;
+
+ return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
+ tcp_jiffies32 - tp->rcv_tstamp);
+}
+
+static inline int tcp_fin_time(const struct sock *sk)
+{
+ int fin_timeout = tcp_sk(sk)->linger2 ? :
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
+ const int rto = inet_csk(sk)->icsk_rto;
+
+ if (fin_timeout < (rto << 2) - (rto >> 1))
+ fin_timeout = (rto << 2) - (rto >> 1);
+
+ return fin_timeout;
+}
+
+static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
+ int paws_win)
+{
+ if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
+ return true;
+ if (unlikely(!time_before32(ktime_get_seconds(),
+ rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
+ return true;
+ /*
+ * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
+ * then following tcp messages have valid values. Ignore 0 value,
+ * or else 'negative' tsval might forbid us to accept their packets.
+ */
+ if (!rx_opt->ts_recent)
+ return true;
+ return false;
+}
+
+static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
+ int rst)
+{
+ if (tcp_paws_check(rx_opt, 0))
+ return false;
+
+ /* RST segments are not recommended to carry timestamp,
+ and, if they do, it is recommended to ignore PAWS because
+ "their cleanup function should take precedence over timestamps."
+ Certainly, it is mistake. It is necessary to understand the reasons
+ of this constraint to relax it: if peer reboots, clock may go
+ out-of-sync and half-open connections will not be reset.
+ Actually, the problem would be not existing if all
+ the implementations followed draft about maintaining clock
+ via reboots. Linux-2.2 DOES NOT!
+
+ However, we can relax time bounds for RST segments to MSL.
+ */
+ if (rst && !time_before32(ktime_get_seconds(),
+ rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
+ return false;
+ return true;
+}
+
+bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
+ int mib_idx, u32 *last_oow_ack_time);
+
+static inline void tcp_mib_init(struct net *net)
+{
+ /* See RFC 2012 */
+ TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
+ TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
+ TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
+ TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
+}
+
+/* from STCP */
+static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
+{
+ tp->lost_skb_hint = NULL;
+}
+
+static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
+{
+ tcp_clear_retrans_hints_partial(tp);
+ tp->retransmit_skb_hint = NULL;
+}
+
+union tcp_md5_addr {
+ struct in_addr a4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr a6;
+#endif
+};
+
+/* - key database */
+struct tcp_md5sig_key {
+ struct hlist_node node;
+ u8 keylen;
+ u8 family; /* AF_INET or AF_INET6 */
+ u8 prefixlen;
+ u8 flags;
+ union tcp_md5_addr addr;
+ int l3index; /* set if key added with L3 scope */
+ u8 key[TCP_MD5SIG_MAXKEYLEN];
+ struct rcu_head rcu;
+};
+
+/* - sock block */
+struct tcp_md5sig_info {
+ struct hlist_head head;
+ struct rcu_head rcu;
+};
+
+/* - pseudo header */
+struct tcp4_pseudohdr {
+ __be32 saddr;
+ __be32 daddr;
+ __u8 pad;
+ __u8 protocol;
+ __be16 len;
+};
+
+struct tcp6_pseudohdr {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ __be32 len;
+ __be32 protocol; /* including padding */
+};
+
+union tcp_md5sum_block {
+ struct tcp4_pseudohdr ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct tcp6_pseudohdr ip6;
+#endif
+};
+
+/* - pool: digest algorithm, hash description and scratch buffer */
+struct tcp_md5sig_pool {
+ struct ahash_request *md5_req;
+ void *scratch;
+};
+
+/* - functions */
+int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
+ const struct sock *sk, const struct sk_buff *skb);
+int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
+ int family, u8 prefixlen, int l3index, u8 flags,
+ const u8 *newkey, u8 newkeylen, gfp_t gfp);
+int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
+ int family, u8 prefixlen, int l3index, u8 flags);
+struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
+ const struct sock *addr_sk);
+
+#ifdef CONFIG_TCP_MD5SIG
+#include <linux/jump_label.h>
+extern struct static_key_false tcp_md5_needed;
+struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
+ const union tcp_md5_addr *addr,
+ int family);
+static inline struct tcp_md5sig_key *
+tcp_md5_do_lookup(const struct sock *sk, int l3index,
+ const union tcp_md5_addr *addr, int family)
+{
+ if (!static_branch_unlikely(&tcp_md5_needed))
+ return NULL;
+ return __tcp_md5_do_lookup(sk, l3index, addr, family);
+}
+
+enum skb_drop_reason
+tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int dif, int sdif);
+
+
+#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
+#else
+static inline struct tcp_md5sig_key *
+tcp_md5_do_lookup(const struct sock *sk, int l3index,
+ const union tcp_md5_addr *addr, int family)
+{
+ return NULL;
+}
+
+static inline enum skb_drop_reason
+tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int dif, int sdif)
+{
+ return SKB_NOT_DROPPED_YET;
+}
+#define tcp_twsk_md5_key(twsk) NULL
+#endif
+
+bool tcp_alloc_md5sig_pool(void);
+
+struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
+static inline void tcp_put_md5sig_pool(void)
+{
+ local_bh_enable();
+}
+
+int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
+ unsigned int header_len);
+int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
+ const struct tcp_md5sig_key *key);
+
+/* From tcp_fastopen.c */
+void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
+ struct tcp_fastopen_cookie *cookie);
+void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
+ struct tcp_fastopen_cookie *cookie, bool syn_lost,
+ u16 try_exp);
+struct tcp_fastopen_request {
+ /* Fast Open cookie. Size 0 means a cookie request */
+ struct tcp_fastopen_cookie cookie;
+ struct msghdr *data; /* data in MSG_FASTOPEN */
+ size_t size;
+ int copied; /* queued in tcp_connect() */
+ struct ubuf_info *uarg;
+};
+void tcp_free_fastopen_req(struct tcp_sock *tp);
+void tcp_fastopen_destroy_cipher(struct sock *sk);
+void tcp_fastopen_ctx_destroy(struct net *net);
+int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
+ void *primary_key, void *backup_key);
+int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
+ u64 *key);
+void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
+struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct tcp_fastopen_cookie *foc,
+ const struct dst_entry *dst);
+void tcp_fastopen_init_key_once(struct net *net);
+bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
+ struct tcp_fastopen_cookie *cookie);
+bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
+#define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
+#define TCP_FASTOPEN_KEY_MAX 2
+#define TCP_FASTOPEN_KEY_BUF_LENGTH \
+ (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
+
+/* Fastopen key context */
+struct tcp_fastopen_context {
+ siphash_key_t key[TCP_FASTOPEN_KEY_MAX];
+ int num;
+ struct rcu_head rcu;
+};
+
+void tcp_fastopen_active_disable(struct sock *sk);
+bool tcp_fastopen_active_should_disable(struct sock *sk);
+void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
+void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
+
+/* Caller needs to wrap with rcu_read_(un)lock() */
+static inline
+struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
+{
+ struct tcp_fastopen_context *ctx;
+
+ ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
+ if (!ctx)
+ ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
+ return ctx;
+}
+
+static inline
+bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
+ const struct tcp_fastopen_cookie *orig)
+{
+ if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
+ orig->len == foc->len &&
+ !memcmp(orig->val, foc->val, foc->len))
+ return true;
+ return false;
+}
+
+static inline
+int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
+{
+ return ctx->num;
+}
+
+/* Latencies incurred by various limits for a sender. They are
+ * chronograph-like stats that are mutually exclusive.
+ */
+enum tcp_chrono {
+ TCP_CHRONO_UNSPEC,
+ TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
+ TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
+ TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
+ __TCP_CHRONO_MAX,
+};
+
+void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
+void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
+
+/* This helper is needed, because skb->tcp_tsorted_anchor uses
+ * the same memory storage than skb->destructor/_skb_refdst
+ */
+static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
+{
+ skb->destructor = NULL;
+ skb->_skb_refdst = 0UL;
+}
+
+#define tcp_skb_tsorted_save(skb) { \
+ unsigned long _save = skb->_skb_refdst; \
+ skb->_skb_refdst = 0UL;
+
+#define tcp_skb_tsorted_restore(skb) \
+ skb->_skb_refdst = _save; \
+}
+
+void tcp_write_queue_purge(struct sock *sk);
+
+static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
+{
+ return skb_rb_first(&sk->tcp_rtx_queue);
+}
+
+static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
+{
+ return skb_rb_last(&sk->tcp_rtx_queue);
+}
+
+static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
+{
+ return skb_peek_tail(&sk->sk_write_queue);
+}
+
+#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
+ skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
+
+static inline struct sk_buff *tcp_send_head(const struct sock *sk)
+{
+ return skb_peek(&sk->sk_write_queue);
+}
+
+static inline bool tcp_skb_is_last(const struct sock *sk,
+ const struct sk_buff *skb)
+{
+ return skb_queue_is_last(&sk->sk_write_queue, skb);
+}
+
+/**
+ * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
+ * @sk: socket
+ *
+ * Since the write queue can have a temporary empty skb in it,
+ * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
+ */
+static inline bool tcp_write_queue_empty(const struct sock *sk)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+ return tp->write_seq == tp->snd_nxt;
+}
+
+static inline bool tcp_rtx_queue_empty(const struct sock *sk)
+{
+ return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
+}
+
+static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
+{
+ return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
+}
+
+static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
+{
+ __skb_queue_tail(&sk->sk_write_queue, skb);
+
+ /* Queue it, remembering where we must start sending. */
+ if (sk->sk_write_queue.next == skb)
+ tcp_chrono_start(sk, TCP_CHRONO_BUSY);
+}
+
+/* Insert new before skb on the write queue of sk. */
+static inline void tcp_insert_write_queue_before(struct sk_buff *new,
+ struct sk_buff *skb,
+ struct sock *sk)
+{
+ __skb_queue_before(&sk->sk_write_queue, skb, new);
+}
+
+static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
+{
+ tcp_skb_tsorted_anchor_cleanup(skb);
+ __skb_unlink(skb, &sk->sk_write_queue);
+}
+
+void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
+
+static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
+{
+ tcp_skb_tsorted_anchor_cleanup(skb);
+ rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
+}
+
+static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
+{
+ list_del(&skb->tcp_tsorted_anchor);
+ tcp_rtx_queue_unlink(skb, sk);
+ tcp_wmem_free_skb(sk, skb);
+}
+
+static inline void tcp_push_pending_frames(struct sock *sk)
+{
+ if (tcp_send_head(sk)) {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
+ }
+}
+
+/* Start sequence of the skb just after the highest skb with SACKed
+ * bit, valid only if sacked_out > 0 or when the caller has ensured
+ * validity by itself.
+ */
+static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
+{
+ if (!tp->sacked_out)
+ return tp->snd_una;
+
+ if (tp->highest_sack == NULL)
+ return tp->snd_nxt;
+
+ return TCP_SKB_CB(tp->highest_sack)->seq;
+}
+
+static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
+{
+ tcp_sk(sk)->highest_sack = skb_rb_next(skb);
+}
+
+static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
+{
+ return tcp_sk(sk)->highest_sack;
+}
+
+static inline void tcp_highest_sack_reset(struct sock *sk)
+{
+ tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
+}
+
+/* Called when old skb is about to be deleted and replaced by new skb */
+static inline void tcp_highest_sack_replace(struct sock *sk,
+ struct sk_buff *old,
+ struct sk_buff *new)
+{
+ if (old == tcp_highest_sack(sk))
+ tcp_sk(sk)->highest_sack = new;
+}
+
+/* This helper checks if socket has IP_TRANSPARENT set */
+static inline bool inet_sk_transparent(const struct sock *sk)
+{
+ switch (sk->sk_state) {
+ case TCP_TIME_WAIT:
+ return inet_twsk(sk)->tw_transparent;
+ case TCP_NEW_SYN_RECV:
+ return inet_rsk(inet_reqsk(sk))->no_srccheck;
+ }
+ return inet_sk(sk)->transparent;
+}
+
+/* Determines whether this is a thin stream (which may suffer from
+ * increased latency). Used to trigger latency-reducing mechanisms.
+ */
+static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
+{
+ return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
+}
+
+/* /proc */
+enum tcp_seq_states {
+ TCP_SEQ_STATE_LISTENING,
+ TCP_SEQ_STATE_ESTABLISHED,
+};
+
+void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
+void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
+void tcp_seq_stop(struct seq_file *seq, void *v);
+
+struct tcp_seq_afinfo {
+ sa_family_t family;
+};
+
+struct tcp_iter_state {
+ struct seq_net_private p;
+ enum tcp_seq_states state;
+ struct sock *syn_wait_sk;
+ int bucket, offset, sbucket, num;
+ loff_t last_pos;
+};
+
+extern struct request_sock_ops tcp_request_sock_ops;
+extern struct request_sock_ops tcp6_request_sock_ops;
+
+void tcp_v4_destroy_sock(struct sock *sk);
+
+struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+ netdev_features_t features);
+struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
+INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
+int tcp_gro_complete(struct sk_buff *skb);
+
+void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
+
+static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
+{
+ struct net *net = sock_net((struct sock *)tp);
+ u32 val;
+
+ val = READ_ONCE(tp->notsent_lowat);
+
+ return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
+}
+
+bool tcp_stream_memory_free(const struct sock *sk, int wake);
+
+#ifdef CONFIG_PROC_FS
+int tcp4_proc_init(void);
+void tcp4_proc_exit(void);
+#endif
+
+int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
+int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ const struct tcp_request_sock_ops *af_ops,
+ struct sock *sk, struct sk_buff *skb);
+
+/* TCP af-specific functions */
+struct tcp_sock_af_ops {
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
+ const struct sock *addr_sk);
+ int (*calc_md5_hash)(char *location,
+ const struct tcp_md5sig_key *md5,
+ const struct sock *sk,
+ const struct sk_buff *skb);
+ int (*md5_parse)(struct sock *sk,
+ int optname,
+ sockptr_t optval,
+ int optlen);
+#endif
+};
+
+struct tcp_request_sock_ops {
+ u16 mss_clamp;
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
+ const struct sock *addr_sk);
+ int (*calc_md5_hash) (char *location,
+ const struct tcp_md5sig_key *md5,
+ const struct sock *sk,
+ const struct sk_buff *skb);
+#endif
+#ifdef CONFIG_SYN_COOKIES
+ __u32 (*cookie_init_seq)(const struct sk_buff *skb,
+ __u16 *mss);
+#endif
+ struct dst_entry *(*route_req)(const struct sock *sk,
+ struct sk_buff *skb,
+ struct flowi *fl,
+ struct request_sock *req);
+ u32 (*init_seq)(const struct sk_buff *skb);
+ u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
+ int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
+ struct flowi *fl, struct request_sock *req,
+ struct tcp_fastopen_cookie *foc,
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb);
+};
+
+extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
+#if IS_ENABLED(CONFIG_IPV6)
+extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
+#endif
+
+#ifdef CONFIG_SYN_COOKIES
+static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
+ const struct sock *sk, struct sk_buff *skb,
+ __u16 *mss)
+{
+ tcp_synq_overflow(sk);
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
+ return ops->cookie_init_seq(skb, mss);
+}
+#else
+static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
+ const struct sock *sk, struct sk_buff *skb,
+ __u16 *mss)
+{
+ return 0;
+}
+#endif
+
+int tcpv4_offload_init(void);
+
+void tcp_v4_init(void);
+void tcp_init(void);
+
+/* tcp_recovery.c */
+void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
+void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
+extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
+ u32 reo_wnd);
+extern bool tcp_rack_mark_lost(struct sock *sk);
+extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
+ u64 xmit_time);
+extern void tcp_rack_reo_timeout(struct sock *sk);
+extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
+
+/* At how many usecs into the future should the RTO fire? */
+static inline s64 tcp_rto_delta_us(const struct sock *sk)
+{
+ const struct sk_buff *skb = tcp_rtx_queue_head(sk);
+ u32 rto = inet_csk(sk)->icsk_rto;
+ u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
+
+ return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
+}
+
+/*
+ * Save and compile IPv4 options, return a pointer to it
+ */
+static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
+ struct sk_buff *skb)
+{
+ const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
+ struct ip_options_rcu *dopt = NULL;
+
+ if (opt->optlen) {
+ int opt_size = sizeof(*dopt) + opt->optlen;
+
+ dopt = kmalloc(opt_size, GFP_ATOMIC);
+ if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
+ kfree(dopt);
+ dopt = NULL;
+ }
+ }
+ return dopt;
+}
+
+/* locally generated TCP pure ACKs have skb->truesize == 2
+ * (check tcp_send_ack() in net/ipv4/tcp_output.c )
+ * This is much faster than dissecting the packet to find out.
+ * (Think of GRE encapsulations, IPv4, IPv6, ...)
+ */
+static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
+{
+ return skb->truesize == 2;
+}
+
+static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
+{
+ skb->truesize = 2;
+}
+
+static inline int tcp_inq(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int answ;
+
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ answ = 0;
+ } else if (sock_flag(sk, SOCK_URGINLINE) ||
+ !tp->urg_data ||
+ before(tp->urg_seq, tp->copied_seq) ||
+ !before(tp->urg_seq, tp->rcv_nxt)) {
+
+ answ = tp->rcv_nxt - tp->copied_seq;
+
+ /* Subtract 1, if FIN was received */
+ if (answ && sock_flag(sk, SOCK_DONE))
+ answ--;
+ } else {
+ answ = tp->urg_seq - tp->copied_seq;
+ }
+
+ return answ;
+}
+
+int tcp_peek_len(struct socket *sock);
+
+static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
+{
+ u16 segs_in;
+
+ segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
+
+ /* We update these fields while other threads might
+ * read them from tcp_get_info()
+ */
+ WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
+ if (skb->len > tcp_hdrlen(skb))
+ WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
+}
+
+/*
+ * TCP listen path runs lockless.
+ * We forced "struct sock" to be const qualified to make sure
+ * we don't modify one of its field by mistake.
+ * Here, we increment sk_drops which is an atomic_t, so we can safely
+ * make sock writable again.
+ */
+static inline void tcp_listendrop(const struct sock *sk)
+{
+ atomic_inc(&((struct sock *)sk)->sk_drops);
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
+}
+
+enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
+
+/*
+ * Interface for adding Upper Level Protocols over TCP
+ */
+
+#define TCP_ULP_NAME_MAX 16
+#define TCP_ULP_MAX 128
+#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
+
+struct tcp_ulp_ops {
+ struct list_head list;
+
+ /* initialize ulp */
+ int (*init)(struct sock *sk);
+ /* update ulp */
+ void (*update)(struct sock *sk, struct proto *p,
+ void (*write_space)(struct sock *sk));
+ /* cleanup ulp */
+ void (*release)(struct sock *sk);
+ /* diagnostic */
+ int (*get_info)(const struct sock *sk, struct sk_buff *skb);
+ size_t (*get_info_size)(const struct sock *sk);
+ /* clone ulp */
+ void (*clone)(const struct request_sock *req, struct sock *newsk,
+ const gfp_t priority);
+
+ char name[TCP_ULP_NAME_MAX];
+ struct module *owner;
+};
+int tcp_register_ulp(struct tcp_ulp_ops *type);
+void tcp_unregister_ulp(struct tcp_ulp_ops *type);
+int tcp_set_ulp(struct sock *sk, const char *name);
+void tcp_get_available_ulp(char *buf, size_t len);
+void tcp_cleanup_ulp(struct sock *sk);
+void tcp_update_ulp(struct sock *sk, struct proto *p,
+ void (*write_space)(struct sock *sk));
+
+#define MODULE_ALIAS_TCP_ULP(name) \
+ __MODULE_INFO(alias, alias_userspace, name); \
+ __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
+
+#ifdef CONFIG_NET_SOCK_MSG
+struct sk_msg;
+struct sk_psock;
+
+#ifdef CONFIG_BPF_SYSCALL
+struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
+int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
+#endif /* CONFIG_BPF_SYSCALL */
+
+#ifdef CONFIG_INET
+void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
+#else
+static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
+{
+}
+#endif
+
+int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
+ struct sk_msg *msg, u32 bytes, int flags);
+#endif /* CONFIG_NET_SOCK_MSG */
+
+#if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
+static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
+{
+}
+#endif
+
+#ifdef CONFIG_CGROUP_BPF
+static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
+ struct sk_buff *skb,
+ unsigned int end_offset)
+{
+ skops->skb = skb;
+ skops->skb_data_end = skb->data + end_offset;
+}
+#else
+static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
+ struct sk_buff *skb,
+ unsigned int end_offset)
+{
+}
+#endif
+
+/* Call BPF_SOCK_OPS program that returns an int. If the return value
+ * is < 0, then the BPF op failed (for example if the loaded BPF
+ * program does not support the chosen operation or there is no BPF
+ * program loaded).
+ */
+#ifdef CONFIG_BPF
+static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
+{
+ struct bpf_sock_ops_kern sock_ops;
+ int ret;
+
+ memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
+ if (sk_fullsock(sk)) {
+ sock_ops.is_fullsock = 1;
+ sock_owned_by_me(sk);
+ }
+
+ sock_ops.sk = sk;
+ sock_ops.op = op;
+ if (nargs > 0)
+ memcpy(sock_ops.args, args, nargs * sizeof(*args));
+
+ ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
+ if (ret == 0)
+ ret = sock_ops.reply;
+ else
+ ret = -1;
+ return ret;
+}
+
+static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
+{
+ u32 args[2] = {arg1, arg2};
+
+ return tcp_call_bpf(sk, op, 2, args);
+}
+
+static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
+ u32 arg3)
+{
+ u32 args[3] = {arg1, arg2, arg3};
+
+ return tcp_call_bpf(sk, op, 3, args);
+}
+
+#else
+static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
+{
+ return -EPERM;
+}
+
+static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
+{
+ return -EPERM;
+}
+
+static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
+ u32 arg3)
+{
+ return -EPERM;
+}
+
+#endif
+
+static inline u32 tcp_timeout_init(struct sock *sk)
+{
+ int timeout;
+
+ timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
+
+ if (timeout <= 0)
+ timeout = TCP_TIMEOUT_INIT;
+ return min_t(int, timeout, TCP_RTO_MAX);
+}
+
+static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
+{
+ int rwnd;
+
+ rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
+
+ if (rwnd < 0)
+ rwnd = 0;
+ return rwnd;
+}
+
+static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
+{
+ return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
+}
+
+static inline void tcp_bpf_rtt(struct sock *sk)
+{
+ if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
+ tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
+}
+
+#if IS_ENABLED(CONFIG_SMC)
+extern struct static_key_false tcp_have_smc;
+#endif
+
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+void clean_acked_data_enable(struct inet_connection_sock *icsk,
+ void (*cad)(struct sock *sk, u32 ack_seq));
+void clean_acked_data_disable(struct inet_connection_sock *icsk);
+void clean_acked_data_flush(void);
+#endif
+
+DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
+static inline void tcp_add_tx_delay(struct sk_buff *skb,
+ const struct tcp_sock *tp)
+{
+ if (static_branch_unlikely(&tcp_tx_delay_enabled))
+ skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
+}
+
+/* Compute Earliest Departure Time for some control packets
+ * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
+ */
+static inline u64 tcp_transmit_time(const struct sock *sk)
+{
+ if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
+ u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
+ tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
+
+ return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
+ }
+ return 0;
+}
+
+#endif /* _TCP_H */
diff --git a/include/net/tcp_states.h b/include/net/tcp_states.h
new file mode 100644
index 000000000..cc00118ac
--- /dev/null
+++ b/include/net/tcp_states.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the TCP protocol sk_state field.
+ */
+#ifndef _LINUX_TCP_STATES_H
+#define _LINUX_TCP_STATES_H
+
+enum {
+ TCP_ESTABLISHED = 1,
+ TCP_SYN_SENT,
+ TCP_SYN_RECV,
+ TCP_FIN_WAIT1,
+ TCP_FIN_WAIT2,
+ TCP_TIME_WAIT,
+ TCP_CLOSE,
+ TCP_CLOSE_WAIT,
+ TCP_LAST_ACK,
+ TCP_LISTEN,
+ TCP_CLOSING, /* Now a valid state */
+ TCP_NEW_SYN_RECV,
+
+ TCP_MAX_STATES /* Leave at the end! */
+};
+
+#define TCP_STATE_MASK 0xF
+
+#define TCP_ACTION_FIN (1 << TCP_CLOSE)
+
+enum {
+ TCPF_ESTABLISHED = (1 << TCP_ESTABLISHED),
+ TCPF_SYN_SENT = (1 << TCP_SYN_SENT),
+ TCPF_SYN_RECV = (1 << TCP_SYN_RECV),
+ TCPF_FIN_WAIT1 = (1 << TCP_FIN_WAIT1),
+ TCPF_FIN_WAIT2 = (1 << TCP_FIN_WAIT2),
+ TCPF_TIME_WAIT = (1 << TCP_TIME_WAIT),
+ TCPF_CLOSE = (1 << TCP_CLOSE),
+ TCPF_CLOSE_WAIT = (1 << TCP_CLOSE_WAIT),
+ TCPF_LAST_ACK = (1 << TCP_LAST_ACK),
+ TCPF_LISTEN = (1 << TCP_LISTEN),
+ TCPF_CLOSING = (1 << TCP_CLOSING),
+ TCPF_NEW_SYN_RECV = (1 << TCP_NEW_SYN_RECV),
+};
+
+#endif /* _LINUX_TCP_STATES_H */
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
new file mode 100644
index 000000000..74d2b463c
--- /dev/null
+++ b/include/net/timewait_sock.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * NET Generic infrastructure for Network protocols.
+ *
+ * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ */
+#ifndef _TIMEWAIT_SOCK_H
+#define _TIMEWAIT_SOCK_H
+
+#include <linux/slab.h>
+#include <linux/bug.h>
+#include <net/sock.h>
+
+struct timewait_sock_ops {
+ struct kmem_cache *twsk_slab;
+ char *twsk_slab_name;
+ unsigned int twsk_obj_size;
+ int (*twsk_unique)(struct sock *sk,
+ struct sock *sktw, void *twp);
+ void (*twsk_destructor)(struct sock *sk);
+};
+
+static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
+{
+ if (sk->sk_prot->twsk_prot->twsk_unique != NULL)
+ return sk->sk_prot->twsk_prot->twsk_unique(sk, sktw, twp);
+ return 0;
+}
+
+static inline void twsk_destructor(struct sock *sk)
+{
+ if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
+ sk->sk_prot->twsk_prot->twsk_destructor(sk);
+}
+
+#endif /* _TIMEWAIT_SOCK_H */
diff --git a/include/net/tipc.h b/include/net/tipc.h
new file mode 100644
index 000000000..f0e7e6bc1
--- /dev/null
+++ b/include/net/tipc.h
@@ -0,0 +1,62 @@
+/*
+ * include/net/tipc.h: Include file for TIPC message header routines
+ *
+ * Copyright (c) 2017 Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_HDR_H
+#define _TIPC_HDR_H
+
+#include <linux/random.h>
+
+#define KEEPALIVE_MSG_MASK 0x0e080000 /* LINK_PROTOCOL + MSG_IS_KEEPALIVE */
+
+struct tipc_basic_hdr {
+ __be32 w[4];
+};
+
+static inline __be32 tipc_hdr_rps_key(struct tipc_basic_hdr *hdr)
+{
+ u32 w0 = ntohl(hdr->w[0]);
+ bool keepalive_msg = (w0 & KEEPALIVE_MSG_MASK) == KEEPALIVE_MSG_MASK;
+ __be32 key;
+
+ /* Return source node identity as key */
+ if (likely(!keepalive_msg))
+ return hdr->w[3];
+
+ /* Spread PROBE/PROBE_REPLY messages across the cores */
+ get_random_bytes(&key, sizeof(key));
+ return key;
+}
+
+#endif
diff --git a/include/net/tls.h b/include/net/tls.h
new file mode 100644
index 000000000..c36bf4c50
--- /dev/null
+++ b/include/net/tls.h
@@ -0,0 +1,516 @@
+/*
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _TLS_OFFLOAD_H
+#define _TLS_OFFLOAD_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/crypto.h>
+#include <linux/socket.h>
+#include <linux/tcp.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/rcupdate.h>
+
+#include <net/net_namespace.h>
+#include <net/tcp.h>
+#include <net/strparser.h>
+#include <crypto/aead.h>
+#include <uapi/linux/tls.h>
+
+struct tls_rec;
+
+struct tls_cipher_size_desc {
+ unsigned int iv;
+ unsigned int key;
+ unsigned int salt;
+ unsigned int tag;
+ unsigned int rec_seq;
+};
+
+extern const struct tls_cipher_size_desc tls_cipher_size_desc[];
+
+/* Maximum data size carried in a TLS record */
+#define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
+
+#define TLS_HEADER_SIZE 5
+#define TLS_NONCE_OFFSET TLS_HEADER_SIZE
+
+#define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
+
+#define TLS_RECORD_TYPE_DATA 0x17
+
+#define TLS_AAD_SPACE_SIZE 13
+
+#define MAX_IV_SIZE 16
+#define TLS_TAG_SIZE 16
+#define TLS_MAX_REC_SEQ_SIZE 8
+#define TLS_MAX_AAD_SIZE TLS_AAD_SPACE_SIZE
+
+/* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes.
+ *
+ * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
+ *
+ * The field 'length' is encoded in field 'b0' as '(length width - 1)'.
+ * Hence b0 contains (3 - 1) = 2.
+ */
+#define TLS_AES_CCM_IV_B0_BYTE 2
+#define TLS_SM4_CCM_IV_B0_BYTE 2
+
+enum {
+ TLS_BASE,
+ TLS_SW,
+ TLS_HW,
+ TLS_HW_RECORD,
+ TLS_NUM_CONFIG,
+};
+
+struct tx_work {
+ struct delayed_work work;
+ struct sock *sk;
+};
+
+struct tls_sw_context_tx {
+ struct crypto_aead *aead_send;
+ struct crypto_wait async_wait;
+ struct tx_work tx_work;
+ struct tls_rec *open_rec;
+ struct list_head tx_list;
+ atomic_t encrypt_pending;
+ /* protect crypto_wait with encrypt_pending */
+ spinlock_t encrypt_compl_lock;
+ int async_notify;
+ u8 async_capable:1;
+
+#define BIT_TX_SCHEDULED 0
+#define BIT_TX_CLOSING 1
+ unsigned long tx_bitmask;
+};
+
+struct tls_strparser {
+ struct sock *sk;
+
+ u32 mark : 8;
+ u32 stopped : 1;
+ u32 copy_mode : 1;
+ u32 mixed_decrypted : 1;
+ u32 msg_ready : 1;
+
+ struct strp_msg stm;
+
+ struct sk_buff *anchor;
+ struct work_struct work;
+};
+
+struct tls_sw_context_rx {
+ struct crypto_aead *aead_recv;
+ struct crypto_wait async_wait;
+ struct sk_buff_head rx_list; /* list of decrypted 'data' records */
+ void (*saved_data_ready)(struct sock *sk);
+
+ u8 reader_present;
+ u8 async_capable:1;
+ u8 zc_capable:1;
+ u8 reader_contended:1;
+
+ struct tls_strparser strp;
+
+ atomic_t decrypt_pending;
+ /* protect crypto_wait with decrypt_pending*/
+ spinlock_t decrypt_compl_lock;
+ struct sk_buff_head async_hold;
+ struct wait_queue_head wq;
+};
+
+struct tls_record_info {
+ struct list_head list;
+ u32 end_seq;
+ int len;
+ int num_frags;
+ skb_frag_t frags[MAX_SKB_FRAGS];
+};
+
+struct tls_offload_context_tx {
+ struct crypto_aead *aead_send;
+ spinlock_t lock; /* protects records list */
+ struct list_head records_list;
+ struct tls_record_info *open_record;
+ struct tls_record_info *retransmit_hint;
+ u64 hint_record_sn;
+ u64 unacked_record_sn;
+
+ struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
+ void (*sk_destruct)(struct sock *sk);
+ struct work_struct destruct_work;
+ struct tls_context *ctx;
+ u8 driver_state[] __aligned(8);
+ /* The TLS layer reserves room for driver specific state
+ * Currently the belief is that there is not enough
+ * driver specific state to justify another layer of indirection
+ */
+#define TLS_DRIVER_STATE_SIZE_TX 16
+};
+
+#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
+ (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
+
+enum tls_context_flags {
+ /* tls_device_down was called after the netdev went down, device state
+ * was released, and kTLS works in software, even though rx_conf is
+ * still TLS_HW (needed for transition).
+ */
+ TLS_RX_DEV_DEGRADED = 0,
+ /* Unlike RX where resync is driven entirely by the core in TX only
+ * the driver knows when things went out of sync, so we need the flag
+ * to be atomic.
+ */
+ TLS_TX_SYNC_SCHED = 1,
+ /* tls_dev_del was called for the RX side, device state was released,
+ * but tls_ctx->netdev might still be kept, because TX-side driver
+ * resources might not be released yet. Used to prevent the second
+ * tls_dev_del call in tls_device_down if it happens simultaneously.
+ */
+ TLS_RX_DEV_CLOSED = 2,
+};
+
+struct cipher_context {
+ char *iv;
+ char *rec_seq;
+};
+
+union tls_crypto_context {
+ struct tls_crypto_info info;
+ union {
+ struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+ struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
+ struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
+ struct tls12_crypto_info_sm4_gcm sm4_gcm;
+ struct tls12_crypto_info_sm4_ccm sm4_ccm;
+ };
+};
+
+struct tls_prot_info {
+ u16 version;
+ u16 cipher_type;
+ u16 prepend_size;
+ u16 tag_size;
+ u16 overhead_size;
+ u16 iv_size;
+ u16 salt_size;
+ u16 rec_seq_size;
+ u16 aad_size;
+ u16 tail_size;
+};
+
+struct tls_context {
+ /* read-only cache line */
+ struct tls_prot_info prot_info;
+
+ u8 tx_conf:3;
+ u8 rx_conf:3;
+ u8 zerocopy_sendfile:1;
+ u8 rx_no_pad:1;
+
+ int (*push_pending_record)(struct sock *sk, int flags);
+ void (*sk_write_space)(struct sock *sk);
+
+ void *priv_ctx_tx;
+ void *priv_ctx_rx;
+
+ struct net_device __rcu *netdev;
+
+ /* rw cache line */
+ struct cipher_context tx;
+ struct cipher_context rx;
+
+ struct scatterlist *partially_sent_record;
+ u16 partially_sent_offset;
+
+ bool in_tcp_sendpages;
+ bool pending_open_record_frags;
+
+ struct mutex tx_lock; /* protects partially_sent_* fields and
+ * per-type TX fields
+ */
+ unsigned long flags;
+
+ /* cache cold stuff */
+ struct proto *sk_proto;
+ struct sock *sk;
+
+ void (*sk_destruct)(struct sock *sk);
+
+ union tls_crypto_context crypto_send;
+ union tls_crypto_context crypto_recv;
+
+ struct list_head list;
+ refcount_t refcount;
+ struct rcu_head rcu;
+};
+
+enum tls_offload_ctx_dir {
+ TLS_OFFLOAD_CTX_DIR_RX,
+ TLS_OFFLOAD_CTX_DIR_TX,
+};
+
+struct tlsdev_ops {
+ int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn);
+ void (*tls_dev_del)(struct net_device *netdev,
+ struct tls_context *ctx,
+ enum tls_offload_ctx_dir direction);
+ int (*tls_dev_resync)(struct net_device *netdev,
+ struct sock *sk, u32 seq, u8 *rcd_sn,
+ enum tls_offload_ctx_dir direction);
+};
+
+enum tls_offload_sync_type {
+ TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
+ TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
+ TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2,
+};
+
+#define TLS_DEVICE_RESYNC_NH_START_IVAL 2
+#define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
+
+#define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
+struct tls_offload_resync_async {
+ atomic64_t req;
+ u16 loglen;
+ u16 rcd_delta;
+ u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
+};
+
+struct tls_offload_context_rx {
+ /* sw must be the first member of tls_offload_context_rx */
+ struct tls_sw_context_rx sw;
+ enum tls_offload_sync_type resync_type;
+ /* this member is set regardless of resync_type, to avoid branches */
+ u8 resync_nh_reset:1;
+ /* CORE_NEXT_HINT-only member, but use the hole here */
+ u8 resync_nh_do_now:1;
+ union {
+ /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
+ struct {
+ atomic64_t resync_req;
+ };
+ /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
+ struct {
+ u32 decrypted_failed;
+ u32 decrypted_tgt;
+ } resync_nh;
+ /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */
+ struct {
+ struct tls_offload_resync_async *resync_async;
+ };
+ };
+ u8 driver_state[] __aligned(8);
+ /* The TLS layer reserves room for driver specific state
+ * Currently the belief is that there is not enough
+ * driver specific state to justify another layer of indirection
+ */
+#define TLS_DRIVER_STATE_SIZE_RX 8
+};
+
+#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
+ (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
+
+struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
+ u32 seq, u64 *p_record_sn);
+
+static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
+{
+ return rec->len == 0;
+}
+
+static inline u32 tls_record_start_seq(struct tls_record_info *rec)
+{
+ return rec->end_seq - rec->len;
+}
+
+struct sk_buff *
+tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
+ struct sk_buff *skb);
+struct sk_buff *
+tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
+ struct sk_buff *skb);
+
+static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
+{
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ return sk_fullsock(sk) &&
+ (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
+ &tls_validate_xmit_skb);
+#else
+ return false;
+#endif
+}
+
+static inline struct tls_context *tls_get_ctx(const struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ /* Use RCU on icsk_ulp_data only for sock diag code,
+ * TLS data path doesn't need rcu_dereference().
+ */
+ return (__force void *)icsk->icsk_ulp_data;
+}
+
+static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
+ const struct tls_context *tls_ctx)
+{
+ return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
+}
+
+static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
+ const struct tls_context *tls_ctx)
+{
+ return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
+}
+
+static inline struct tls_offload_context_tx *
+tls_offload_ctx_tx(const struct tls_context *tls_ctx)
+{
+ return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
+}
+
+static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ if (!ctx)
+ return false;
+ return !!tls_sw_ctx_tx(ctx);
+}
+
+static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ if (!ctx)
+ return false;
+ return !!tls_sw_ctx_rx(ctx);
+}
+
+static inline struct tls_offload_context_rx *
+tls_offload_ctx_rx(const struct tls_context *tls_ctx)
+{
+ return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
+}
+
+static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ return tls_offload_ctx_tx(tls_ctx)->driver_state;
+ else
+ return tls_offload_ctx_rx(tls_ctx)->driver_state;
+}
+
+static inline void *
+tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
+{
+ return __tls_driver_ctx(tls_get_ctx(sk), direction);
+}
+
+#define RESYNC_REQ BIT(0)
+#define RESYNC_REQ_ASYNC BIT(1)
+/* The TLS context is valid until sk_destruct is called */
+static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+
+ atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
+}
+
+/* Log all TLS record header TCP sequences in [seq, seq+len] */
+static inline void
+tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+
+ atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
+ ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
+ rx_ctx->resync_async->loglen = 0;
+ rx_ctx->resync_async->rcd_delta = 0;
+}
+
+static inline void
+tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+
+ atomic64_set(&rx_ctx->resync_async->req,
+ ((u64)ntohl(seq) << 32) | RESYNC_REQ);
+}
+
+static inline void
+tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ tls_offload_ctx_rx(tls_ctx)->resync_type = type;
+}
+
+/* Driver's seq tracking has to be disabled until resync succeeded */
+static inline bool tls_offload_tx_resync_pending(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ bool ret;
+
+ ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
+ smp_mb__after_atomic();
+ return ret;
+}
+
+struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
+
+#ifdef CONFIG_TLS_DEVICE
+void tls_device_sk_destruct(struct sock *sk);
+void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
+
+static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
+{
+ if (!sk_fullsock(sk) ||
+ smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct)
+ return false;
+ return tls_get_ctx(sk)->rx_conf == TLS_HW;
+}
+#endif
+#endif /* _TLS_OFFLOAD_H */
diff --git a/include/net/tls_toe.h b/include/net/tls_toe.h
new file mode 100644
index 000000000..b3aa7593c
--- /dev/null
+++ b/include/net/tls_toe.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kref.h>
+#include <linux/list.h>
+
+struct sock;
+
+#define TLS_TOE_DEVICE_NAME_MAX 32
+
+/*
+ * This structure defines the routines for Inline TLS driver.
+ * The following routines are optional and filled with a
+ * null pointer if not defined.
+ *
+ * @name: Its the name of registered Inline tls device
+ * @dev_list: Inline tls device list
+ * int (*feature)(struct tls_toe_device *device);
+ * Called to return Inline TLS driver capability
+ *
+ * int (*hash)(struct tls_toe_device *device, struct sock *sk);
+ * This function sets Inline driver for listen and program
+ * device specific functioanlity as required
+ *
+ * void (*unhash)(struct tls_toe_device *device, struct sock *sk);
+ * This function cleans listen state set by Inline TLS driver
+ *
+ * void (*release)(struct kref *kref);
+ * Release the registered device and allocated resources
+ * @kref: Number of reference to tls_toe_device
+ */
+struct tls_toe_device {
+ char name[TLS_TOE_DEVICE_NAME_MAX];
+ struct list_head dev_list;
+ int (*feature)(struct tls_toe_device *device);
+ int (*hash)(struct tls_toe_device *device, struct sock *sk);
+ void (*unhash)(struct tls_toe_device *device, struct sock *sk);
+ void (*release)(struct kref *kref);
+ struct kref kref;
+};
+
+int tls_toe_bypass(struct sock *sk);
+int tls_toe_hash(struct sock *sk);
+void tls_toe_unhash(struct sock *sk);
+
+void tls_toe_register_device(struct tls_toe_device *device);
+void tls_toe_unregister_device(struct tls_toe_device *device);
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
new file mode 100644
index 000000000..b830463e3
--- /dev/null
+++ b/include/net/transp_v6.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TRANSP_V6_H
+#define _TRANSP_V6_H
+
+#include <net/checksum.h>
+#include <net/sock.h>
+
+/* IPv6 transport protocols */
+extern struct proto rawv6_prot;
+extern struct proto udpv6_prot;
+extern struct proto udplitev6_prot;
+extern struct proto tcpv6_prot;
+extern struct proto pingv6_prot;
+
+struct flowi6;
+struct ipcm6_cookie;
+
+/* extension headers */
+int ipv6_exthdrs_init(void);
+void ipv6_exthdrs_exit(void);
+int ipv6_frag_init(void);
+void ipv6_frag_exit(void);
+
+/* transport protocols */
+int pingv6_init(void);
+void pingv6_exit(void);
+int rawv6_init(void);
+void rawv6_exit(void);
+int udpv6_init(void);
+void udpv6_exit(void);
+int udplitev6_init(void);
+void udplitev6_exit(void);
+int tcpv6_init(void);
+void tcpv6_exit(void);
+
+int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+
+/* this does all the common and the specific ctl work */
+void ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
+ struct sk_buff *skb);
+void ip6_datagram_recv_common_ctl(struct sock *sk, struct msghdr *msg,
+ struct sk_buff *skb);
+void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
+ struct sk_buff *skb);
+
+int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
+ struct flowi6 *fl6, struct ipcm6_cookie *ipc6);
+
+void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+ __u16 srcp, __u16 destp, int rqueue, int bucket);
+static inline void
+ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp,
+ __u16 destp, int bucket)
+{
+ __ip6_dgram_sock_seq_show(seq, sp, srcp, destp, sk_rmem_alloc_get(sp),
+ bucket);
+}
+
+#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
+
+void inet6_destroy_sock(struct sock *sk);
+
+#define IPV6_SEQ_DGRAM_HEADER \
+ " sl " \
+ "local_address " \
+ "remote_address " \
+ "st tx_queue rx_queue tr tm->when retrnsmt" \
+ " uid timeout inode ref pointer drops\n"
+
+#endif
diff --git a/include/net/tso.h b/include/net/tso.h
new file mode 100644
index 000000000..62c98a9c6
--- /dev/null
+++ b/include/net/tso.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TSO_H
+#define _TSO_H
+
+#include <net/ip.h>
+
+#define TSO_HEADER_SIZE 256
+
+struct tso_t {
+ int next_frag_idx;
+ int size;
+ void *data;
+ u16 ip_id;
+ u8 tlen; /* transport header len */
+ bool ipv6;
+ u32 tcp_seq;
+};
+
+int tso_count_descs(const struct sk_buff *skb);
+void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso,
+ int size, bool is_last);
+void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size);
+int tso_start(struct sk_buff *skb, struct tso_t *tso);
+
+#endif /* _TSO_H */
diff --git a/include/net/tun_proto.h b/include/net/tun_proto.h
new file mode 100644
index 000000000..7b0de7852
--- /dev/null
+++ b/include/net/tun_proto.h
@@ -0,0 +1,50 @@
+#ifndef __NET_TUN_PROTO_H
+#define __NET_TUN_PROTO_H
+
+#include <linux/if_ether.h>
+#include <linux/types.h>
+
+/* One byte protocol values as defined by VXLAN-GPE and NSH. These will
+ * hopefully get a shared IANA registry.
+ */
+#define TUN_P_IPV4 0x01
+#define TUN_P_IPV6 0x02
+#define TUN_P_ETHERNET 0x03
+#define TUN_P_NSH 0x04
+#define TUN_P_MPLS_UC 0x05
+
+static inline __be16 tun_p_to_eth_p(u8 proto)
+{
+ switch (proto) {
+ case TUN_P_IPV4:
+ return htons(ETH_P_IP);
+ case TUN_P_IPV6:
+ return htons(ETH_P_IPV6);
+ case TUN_P_ETHERNET:
+ return htons(ETH_P_TEB);
+ case TUN_P_NSH:
+ return htons(ETH_P_NSH);
+ case TUN_P_MPLS_UC:
+ return htons(ETH_P_MPLS_UC);
+ }
+ return 0;
+}
+
+static inline u8 tun_p_from_eth_p(__be16 proto)
+{
+ switch (proto) {
+ case htons(ETH_P_IP):
+ return TUN_P_IPV4;
+ case htons(ETH_P_IPV6):
+ return TUN_P_IPV6;
+ case htons(ETH_P_TEB):
+ return TUN_P_ETHERNET;
+ case htons(ETH_P_NSH):
+ return TUN_P_NSH;
+ case htons(ETH_P_MPLS_UC):
+ return TUN_P_MPLS_UC;
+ }
+ return 0;
+}
+
+#endif
diff --git a/include/net/udp.h b/include/net/udp.h
new file mode 100644
index 000000000..fa4cdbe55
--- /dev/null
+++ b/include/net/udp.h
@@ -0,0 +1,527 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the UDP module.
+ *
+ * Version: @(#)udp.h 1.0.2 05/07/93
+ *
+ * Authors: Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Fixes:
+ * Alan Cox : Turned on udp checksums. I don't want to
+ * chase 'memory corruption' bugs that aren't!
+ */
+#ifndef _UDP_H
+#define _UDP_H
+
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <net/inet_sock.h>
+#include <net/sock.h>
+#include <net/snmp.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/seq_file.h>
+#include <linux/poll.h>
+#include <linux/indirect_call_wrapper.h>
+
+/**
+ * struct udp_skb_cb - UDP(-Lite) private variables
+ *
+ * @header: private variables used by IPv4/IPv6
+ * @cscov: checksum coverage length (UDP-Lite only)
+ * @partial_cov: if set indicates partial csum coverage
+ */
+struct udp_skb_cb {
+ union {
+ struct inet_skb_parm h4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_skb_parm h6;
+#endif
+ } header;
+ __u16 cscov;
+ __u8 partial_cov;
+};
+#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
+
+/**
+ * struct udp_hslot - UDP hash slot
+ *
+ * @head: head of list of sockets
+ * @count: number of sockets in 'head' list
+ * @lock: spinlock protecting changes to head/count
+ */
+struct udp_hslot {
+ struct hlist_head head;
+ int count;
+ spinlock_t lock;
+} __attribute__((aligned(2 * sizeof(long))));
+
+/**
+ * struct udp_table - UDP table
+ *
+ * @hash: hash table, sockets are hashed on (local port)
+ * @hash2: hash table, sockets are hashed on (local port, local address)
+ * @mask: number of slots in hash tables, minus 1
+ * @log: log2(number of slots in hash table)
+ */
+struct udp_table {
+ struct udp_hslot *hash;
+ struct udp_hslot *hash2;
+ unsigned int mask;
+ unsigned int log;
+};
+extern struct udp_table udp_table;
+void udp_table_init(struct udp_table *, const char *);
+static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
+ struct net *net, unsigned int num)
+{
+ return &table->hash[udp_hashfn(net, num, table->mask)];
+}
+/*
+ * For secondary hash, net_hash_mix() is performed before calling
+ * udp_hashslot2(), this explains difference with udp_hashslot()
+ */
+static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
+ unsigned int hash)
+{
+ return &table->hash2[hash & table->mask];
+}
+
+extern struct proto udp_prot;
+
+extern atomic_long_t udp_memory_allocated;
+DECLARE_PER_CPU(int, udp_memory_per_cpu_fw_alloc);
+
+/* sysctl variables for udp */
+extern long sysctl_udp_mem[3];
+extern int sysctl_udp_rmem_min;
+extern int sysctl_udp_wmem_min;
+
+struct sk_buff;
+
+/*
+ * Generic checksumming routines for UDP(-Lite) v4 and v6
+ */
+static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
+{
+ return (UDP_SKB_CB(skb)->cscov == skb->len ?
+ __skb_checksum_complete(skb) :
+ __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
+}
+
+static inline int udp_lib_checksum_complete(struct sk_buff *skb)
+{
+ return !skb_csum_unnecessary(skb) &&
+ __udp_lib_checksum_complete(skb);
+}
+
+/**
+ * udp_csum_outgoing - compute UDPv4/v6 checksum over fragments
+ * @sk: socket we are writing to
+ * @skb: sk_buff containing the filled-in UDP header
+ * (checksum field must be zeroed out)
+ */
+static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
+{
+ __wsum csum = csum_partial(skb_transport_header(skb),
+ sizeof(struct udphdr), 0);
+ skb_queue_walk(&sk->sk_write_queue, skb) {
+ csum = csum_add(csum, skb->csum);
+ }
+ return csum;
+}
+
+static inline __wsum udp_csum(struct sk_buff *skb)
+{
+ __wsum csum = csum_partial(skb_transport_header(skb),
+ sizeof(struct udphdr), skb->csum);
+
+ for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
+ csum = csum_add(csum, skb->csum);
+ }
+ return csum;
+}
+
+static inline __sum16 udp_v4_check(int len, __be32 saddr,
+ __be32 daddr, __wsum base)
+{
+ return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
+}
+
+void udp_set_csum(bool nocheck, struct sk_buff *skb,
+ __be32 saddr, __be32 daddr, int len);
+
+static inline void udp_csum_pull_header(struct sk_buff *skb)
+{
+ if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
+ skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
+ skb->csum);
+ skb_pull_rcsum(skb, sizeof(struct udphdr));
+ UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
+}
+
+typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport,
+ __be16 dport);
+
+void udp_v6_early_demux(struct sk_buff *skb);
+INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
+
+struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
+ netdev_features_t features, bool is_ipv6);
+
+/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
+static inline int udp_lib_hash(struct sock *sk)
+{
+ BUG();
+ return 0;
+}
+
+void udp_lib_unhash(struct sock *sk);
+void udp_lib_rehash(struct sock *sk, u16 new_hash);
+
+static inline void udp_lib_close(struct sock *sk, long timeout)
+{
+ sk_common_release(sk);
+}
+
+int udp_lib_get_port(struct sock *sk, unsigned short snum,
+ unsigned int hash2_nulladdr);
+
+u32 udp_flow_hashrnd(void);
+
+static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
+ int min, int max, bool use_eth)
+{
+ u32 hash;
+
+ if (min >= max) {
+ /* Use default range */
+ inet_get_local_port_range(net, &min, &max);
+ }
+
+ hash = skb_get_hash(skb);
+ if (unlikely(!hash)) {
+ if (use_eth) {
+ /* Can't find a normal hash, caller has indicated an
+ * Ethernet packet so use that to compute a hash.
+ */
+ hash = jhash(skb->data, 2 * ETH_ALEN,
+ (__force u32) skb->protocol);
+ } else {
+ /* Can't derive any sort of hash for the packet, set
+ * to some consistent random value.
+ */
+ hash = udp_flow_hashrnd();
+ }
+ }
+
+ /* Since this is being sent on the wire obfuscate hash a bit
+ * to minimize possbility that any useful information to an
+ * attacker is leaked. Only upper 16 bits are relevant in the
+ * computation for 16 bit port value.
+ */
+ hash ^= hash << 16;
+
+ return htons((((u64) hash * (max - min)) >> 32) + min);
+}
+
+static inline int udp_rqueue_get(struct sock *sk)
+{
+ return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
+}
+
+static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+ int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept),
+ bound_dev_if, dif, sdif);
+#else
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
+/* net/ipv4/udp.c */
+void udp_destruct_common(struct sock *sk);
+void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
+int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
+void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
+struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, int *off,
+ int *err);
+static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
+ int *err)
+{
+ int off = 0;
+
+ return __skb_recv_udp(sk, flags, &off, err);
+}
+
+int udp_v4_early_demux(struct sk_buff *skb);
+bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
+int udp_get_port(struct sock *sk, unsigned short snum,
+ int (*saddr_cmp)(const struct sock *,
+ const struct sock *));
+int udp_err(struct sk_buff *, u32);
+int udp_abort(struct sock *sk, int err);
+int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
+void udp_splice_eof(struct socket *sock);
+int udp_push_pending_frames(struct sock *sk);
+void udp_flush_pending_frames(struct sock *sk);
+int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
+void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
+int udp_rcv(struct sk_buff *skb);
+int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int udp_init_sock(struct sock *sk);
+int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+int __udp_disconnect(struct sock *sk, int flags);
+int udp_disconnect(struct sock *sk, int flags);
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
+struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+ netdev_features_t features,
+ bool is_ipv6);
+int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen,
+ int (*push_pending_frames)(struct sock *));
+struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+ __be32 daddr, __be16 dport, int dif);
+struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+ __be32 daddr, __be16 dport, int dif, int sdif,
+ struct udp_table *tbl, struct sk_buff *skb);
+struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
+ __be16 sport, __be16 dport);
+struct sock *udp6_lib_lookup(struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, __be16 dport,
+ int dif);
+struct sock *__udp6_lib_lookup(struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, __be16 dport,
+ int dif, int sdif, struct udp_table *tbl,
+ struct sk_buff *skb);
+struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
+ __be16 sport, __be16 dport);
+int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
+
+/* UDP uses skb->dev_scratch to cache as much information as possible and avoid
+ * possibly multiple cache miss on dequeue()
+ */
+struct udp_dev_scratch {
+ /* skb->truesize and the stateless bit are embedded in a single field;
+ * do not use a bitfield since the compiler emits better/smaller code
+ * this way
+ */
+ u32 _tsize_state;
+
+#if BITS_PER_LONG == 64
+ /* len and the bit needed to compute skb_csum_unnecessary
+ * will be on cold cache lines at recvmsg time.
+ * skb->len can be stored on 16 bits since the udp header has been
+ * already validated and pulled.
+ */
+ u16 len;
+ bool is_linear;
+ bool csum_unnecessary;
+#endif
+};
+
+static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
+{
+ return (struct udp_dev_scratch *)&skb->dev_scratch;
+}
+
+#if BITS_PER_LONG == 64
+static inline unsigned int udp_skb_len(struct sk_buff *skb)
+{
+ return udp_skb_scratch(skb)->len;
+}
+
+static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
+{
+ return udp_skb_scratch(skb)->csum_unnecessary;
+}
+
+static inline bool udp_skb_is_linear(struct sk_buff *skb)
+{
+ return udp_skb_scratch(skb)->is_linear;
+}
+
+#else
+static inline unsigned int udp_skb_len(struct sk_buff *skb)
+{
+ return skb->len;
+}
+
+static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
+{
+ return skb_csum_unnecessary(skb);
+}
+
+static inline bool udp_skb_is_linear(struct sk_buff *skb)
+{
+ return !skb_is_nonlinear(skb);
+}
+#endif
+
+static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
+ struct iov_iter *to)
+{
+ int n;
+
+ n = copy_to_iter(skb->data + off, len, to);
+ if (n == len)
+ return 0;
+
+ iov_iter_revert(to, n);
+ return -EFAULT;
+}
+
+/*
+ * SNMP statistics for UDP and UDP-Lite
+ */
+#define UDP_INC_STATS(net, field, is_udplite) do { \
+ if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
+ else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
+#define __UDP_INC_STATS(net, field, is_udplite) do { \
+ if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
+ else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
+
+#define __UDP6_INC_STATS(net, field, is_udplite) do { \
+ if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
+ else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
+} while(0)
+#define UDP6_INC_STATS(net, field, __lite) do { \
+ if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
+ else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
+} while(0)
+
+#if IS_ENABLED(CONFIG_IPV6)
+#define __UDPX_MIB(sk, ipv4) \
+({ \
+ ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
+ sock_net(sk)->mib.udp_statistics) : \
+ (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
+ sock_net(sk)->mib.udp_stats_in6); \
+})
+#else
+#define __UDPX_MIB(sk, ipv4) \
+({ \
+ IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
+ sock_net(sk)->mib.udp_statistics; \
+})
+#endif
+
+#define __UDPX_INC_STATS(sk, field) \
+ __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
+
+#ifdef CONFIG_PROC_FS
+struct udp_seq_afinfo {
+ sa_family_t family;
+ struct udp_table *udp_table;
+};
+
+struct udp_iter_state {
+ struct seq_net_private p;
+ int bucket;
+ struct udp_seq_afinfo *bpf_seq_afinfo;
+};
+
+void *udp_seq_start(struct seq_file *seq, loff_t *pos);
+void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
+void udp_seq_stop(struct seq_file *seq, void *v);
+
+extern const struct seq_operations udp_seq_ops;
+extern const struct seq_operations udp6_seq_ops;
+
+int udp4_proc_init(void);
+void udp4_proc_exit(void);
+#endif /* CONFIG_PROC_FS */
+
+int udpv4_offload_init(void);
+
+void udp_init(void);
+
+DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
+void udp_encap_enable(void);
+void udp_encap_disable(void);
+#if IS_ENABLED(CONFIG_IPV6)
+DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
+void udpv6_encap_enable(void);
+#endif
+
+static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
+ struct sk_buff *skb, bool ipv4)
+{
+ netdev_features_t features = NETIF_F_SG;
+ struct sk_buff *segs;
+
+ /* Avoid csum recalculation by skb_segment unless userspace explicitly
+ * asks for the final checksum values
+ */
+ if (!inet_get_convert_csum(sk))
+ features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ /* UDP segmentation expects packets of type CHECKSUM_PARTIAL or
+ * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
+ * packets in udp_gro_complete_segment. As does UDP GSO, verified by
+ * udp_send_skb. But when those packets are looped in dev_loopback_xmit
+ * their ip_summed CHECKSUM_NONE is changed to CHECKSUM_UNNECESSARY.
+ * Reset in this specific case, where PARTIAL is both correct and
+ * required.
+ */
+ if (skb->pkt_type == PACKET_LOOPBACK)
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
+ /* the GSO CB lays after the UDP one, no need to save and restore any
+ * CB fragment
+ */
+ segs = __skb_gso_segment(skb, features, false);
+ if (IS_ERR_OR_NULL(segs)) {
+ int segs_nr = skb_shinfo(skb)->gso_segs;
+
+ atomic_add(segs_nr, &sk->sk_drops);
+ SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ consume_skb(skb);
+ return segs;
+}
+
+static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
+{
+ /* UDP-lite can't land here - no GRO */
+ WARN_ON_ONCE(UDP_SKB_CB(skb)->partial_cov);
+
+ /* UDP packets generated with UDP_SEGMENT and traversing:
+ *
+ * UDP tunnel(xmit) -> veth (segmentation) -> veth (gro) -> UDP tunnel (rx)
+ *
+ * can reach an UDP socket with CHECKSUM_NONE, because
+ * __iptunnel_pull_header() converts CHECKSUM_PARTIAL into NONE.
+ * SKB_GSO_UDP_L4 or SKB_GSO_FRAGLIST packets with no UDP tunnel will
+ * have a valid checksum, as the GRO engine validates the UDP csum
+ * before the aggregation and nobody strips such info in between.
+ * Instead of adding another check in the tunnel fastpath, we can force
+ * a valid csum after the segmentation.
+ * Additionally fixup the UDP CB.
+ */
+ UDP_SKB_CB(skb)->cscov = skb->len;
+ if (skb->ip_summed == CHECKSUM_NONE && !skb->csum_valid)
+ skb->csum_valid = 1;
+}
+
+#ifdef CONFIG_BPF_SYSCALL
+struct sk_psock;
+struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
+int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+#endif
+
+#endif /* _UDP_H */
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
new file mode 100644
index 000000000..e5f81710b
--- /dev/null
+++ b/include/net/udp_tunnel.h
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_UDP_TUNNEL_H
+#define __NET_UDP_TUNNEL_H
+
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/ipv6_stubs.h>
+#endif
+
+struct udp_port_cfg {
+ u8 family;
+
+ /* Used only for kernel-created sockets */
+ union {
+ struct in_addr local_ip;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr local_ip6;
+#endif
+ };
+
+ union {
+ struct in_addr peer_ip;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr peer_ip6;
+#endif
+ };
+
+ __be16 local_udp_port;
+ __be16 peer_udp_port;
+ int bind_ifindex;
+ unsigned int use_udp_checksums:1,
+ use_udp6_tx_checksums:1,
+ use_udp6_rx_checksums:1,
+ ipv6_v6only:1;
+};
+
+int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp);
+
+#if IS_ENABLED(CONFIG_IPV6)
+int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp);
+#else
+static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp)
+{
+ return 0;
+}
+#endif
+
+static inline int udp_sock_create(struct net *net,
+ struct udp_port_cfg *cfg,
+ struct socket **sockp)
+{
+ if (cfg->family == AF_INET)
+ return udp_sock_create4(net, cfg, sockp);
+
+ if (cfg->family == AF_INET6)
+ return udp_sock_create6(net, cfg, sockp);
+
+ return -EPFNOSUPPORT;
+}
+
+typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
+typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
+ struct sk_buff *skb);
+typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk,
+ struct sk_buff *skb,
+ unsigned int udp_offset);
+typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
+typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb);
+typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
+ int nhoff);
+
+struct udp_tunnel_sock_cfg {
+ void *sk_user_data; /* user data used by encap_rcv call back */
+ /* Used for setting up udp_sock fields, see udp.h for details */
+ __u8 encap_type;
+ udp_tunnel_encap_rcv_t encap_rcv;
+ udp_tunnel_encap_err_lookup_t encap_err_lookup;
+ udp_tunnel_encap_err_rcv_t encap_err_rcv;
+ udp_tunnel_encap_destroy_t encap_destroy;
+ udp_tunnel_gro_receive_t gro_receive;
+ udp_tunnel_gro_complete_t gro_complete;
+};
+
+/* Setup the given (UDP) sock to receive UDP encapsulated packets */
+void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
+ struct udp_tunnel_sock_cfg *sock_cfg);
+
+/* -- List of parsable UDP tunnel types --
+ *
+ * Adding to this list will result in serious debate. The main issue is
+ * that this list is essentially a list of workarounds for either poorly
+ * designed tunnels, or poorly designed device offloads.
+ *
+ * The parsing supported via these types should really be used for Rx
+ * traffic only as the network stack will have already inserted offsets for
+ * the location of the headers in the skb. In addition any ports that are
+ * pushed should be kept within the namespace without leaking to other
+ * devices such as VFs or other ports on the same device.
+ *
+ * It is strongly encouraged to use CHECKSUM_COMPLETE for Rx to avoid the
+ * need to use this for Rx checksum offload. It should not be necessary to
+ * call this function to perform Tx offloads on outgoing traffic.
+ */
+enum udp_parsable_tunnel_type {
+ UDP_TUNNEL_TYPE_VXLAN = BIT(0), /* RFC 7348 */
+ UDP_TUNNEL_TYPE_GENEVE = BIT(1), /* draft-ietf-nvo3-geneve */
+ UDP_TUNNEL_TYPE_VXLAN_GPE = BIT(2), /* draft-ietf-nvo3-vxlan-gpe */
+};
+
+struct udp_tunnel_info {
+ unsigned short type;
+ sa_family_t sa_family;
+ __be16 port;
+ u8 hw_priv;
+};
+
+/* Notify network devices of offloadable types */
+void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
+ unsigned short type);
+void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
+ unsigned short type);
+void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type);
+void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
+
+static inline void udp_tunnel_get_rx_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
+ call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
+}
+
+static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
+ call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
+}
+
+/* Transmit the skb using UDP encapsulation. */
+void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl,
+ __be16 df, __be16 src_port, __be16 dst_port,
+ bool xnet, bool nocheck);
+
+int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb,
+ struct net_device *dev, struct in6_addr *saddr,
+ struct in6_addr *daddr,
+ __u8 prio, __u8 ttl, __be32 label,
+ __be16 src_port, __be16 dst_port, bool nocheck);
+
+void udp_tunnel_sock_release(struct socket *sock);
+
+struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
+ __be16 flags, __be64 tunnel_id,
+ int md_size);
+
+#ifdef CONFIG_INET
+static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
+{
+ int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+
+ return iptunnel_handle_offloads(skb, type);
+}
+#endif
+
+static inline void udp_tunnel_encap_enable(struct sock *sk)
+{
+ if (udp_test_and_set_bit(ENCAP_ENABLED, sk))
+ return;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (READ_ONCE(sk->sk_family) == PF_INET6)
+ ipv6_stub->udpv6_encap_enable();
+#endif
+ udp_encap_enable();
+}
+
+#define UDP_TUNNEL_NIC_MAX_TABLES 4
+
+enum udp_tunnel_nic_info_flags {
+ /* Device callbacks may sleep */
+ UDP_TUNNEL_NIC_INFO_MAY_SLEEP = BIT(0),
+ /* Device only supports offloads when it's open, all ports
+ * will be removed before close and re-added after open.
+ */
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(1),
+ /* Device supports only IPv4 tunnels */
+ UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(2),
+ /* Device has hard-coded the IANA VXLAN port (4789) as VXLAN.
+ * This port must not be counted towards n_entries of any table.
+ * Driver will not receive any callback associated with port 4789.
+ */
+ UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(3),
+};
+
+struct udp_tunnel_nic;
+
+#define UDP_TUNNEL_NIC_MAX_SHARING_DEVICES (U16_MAX / 2)
+
+struct udp_tunnel_nic_shared {
+ struct udp_tunnel_nic *udp_tunnel_nic_info;
+
+ struct list_head devices;
+};
+
+struct udp_tunnel_nic_shared_node {
+ struct net_device *dev;
+ struct list_head list;
+};
+
+/**
+ * struct udp_tunnel_nic_info - driver UDP tunnel offload information
+ * @set_port: callback for adding a new port
+ * @unset_port: callback for removing a port
+ * @sync_table: callback for syncing the entire port table at once
+ * @shared: reference to device global state (optional)
+ * @flags: device flags from enum udp_tunnel_nic_info_flags
+ * @tables: UDP port tables this device has
+ * @tables.n_entries: number of entries in this table
+ * @tables.tunnel_types: types of tunnels this table accepts
+ *
+ * Drivers are expected to provide either @set_port and @unset_port callbacks
+ * or the @sync_table callback. Callbacks are invoked with rtnl lock held.
+ *
+ * Devices which (misguidedly) share the UDP tunnel port table across multiple
+ * netdevs should allocate an instance of struct udp_tunnel_nic_shared and
+ * point @shared at it.
+ * There must never be more than %UDP_TUNNEL_NIC_MAX_SHARING_DEVICES devices
+ * sharing a table.
+ *
+ * Known limitations:
+ * - UDP tunnel port notifications are fundamentally best-effort -
+ * it is likely the driver will both see skbs which use a UDP tunnel port,
+ * while not being a tunneled skb, and tunnel skbs from other ports -
+ * drivers should only use these ports for non-critical RX-side offloads,
+ * e.g. the checksum offload;
+ * - none of the devices care about the socket family at present, so we don't
+ * track it. Please extend this code if you care.
+ */
+struct udp_tunnel_nic_info {
+ /* one-by-one */
+ int (*set_port)(struct net_device *dev,
+ unsigned int table, unsigned int entry,
+ struct udp_tunnel_info *ti);
+ int (*unset_port)(struct net_device *dev,
+ unsigned int table, unsigned int entry,
+ struct udp_tunnel_info *ti);
+
+ /* all at once */
+ int (*sync_table)(struct net_device *dev, unsigned int table);
+
+ struct udp_tunnel_nic_shared *shared;
+
+ unsigned int flags;
+
+ struct udp_tunnel_nic_table_info {
+ unsigned int n_entries;
+ unsigned int tunnel_types;
+ } tables[UDP_TUNNEL_NIC_MAX_TABLES];
+};
+
+/* UDP tunnel module dependencies
+ *
+ * Tunnel drivers are expected to have a hard dependency on the udp_tunnel
+ * module. NIC drivers are not, they just attach their
+ * struct udp_tunnel_nic_info to the netdev and wait for callbacks to come.
+ * Loading a tunnel driver will cause the udp_tunnel module to be loaded
+ * and only then will all the required state structures be allocated.
+ * Since we want a weak dependency from the drivers and the core to udp_tunnel
+ * we call things through the following stubs.
+ */
+struct udp_tunnel_nic_ops {
+ void (*get_port)(struct net_device *dev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti);
+ void (*set_port_priv)(struct net_device *dev, unsigned int table,
+ unsigned int idx, u8 priv);
+ void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti);
+ void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti);
+ void (*reset_ntf)(struct net_device *dev);
+
+ size_t (*dump_size)(struct net_device *dev, unsigned int table);
+ int (*dump_write)(struct net_device *dev, unsigned int table,
+ struct sk_buff *skb);
+};
+
+#ifdef CONFIG_INET
+extern const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops;
+#else
+#define udp_tunnel_nic_ops ((struct udp_tunnel_nic_ops *)NULL)
+#endif
+
+static inline void
+udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti)
+{
+ /* This helper is used from .sync_table, we indicate empty entries
+ * by zero'ed @ti. Drivers which need to know the details of a port
+ * when it gets deleted should use the .set_port / .unset_port
+ * callbacks.
+ * Zero out here, otherwise !CONFIG_INET causes uninitilized warnings.
+ */
+ memset(ti, 0, sizeof(*ti));
+
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->get_port(dev, table, idx, ti);
+}
+
+static inline void
+udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
+ unsigned int idx, u8 priv)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv);
+}
+
+static inline void
+udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->add_port(dev, ti);
+}
+
+static inline void
+udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->del_port(dev, ti);
+}
+
+/**
+ * udp_tunnel_nic_reset_ntf() - device-originating reset notification
+ * @dev: network interface device structure
+ *
+ * Called by the driver to inform the core that the entire UDP tunnel port
+ * state has been lost, usually due to device reset. Core will assume device
+ * forgot all the ports and issue .set_port and .sync_table callbacks as
+ * necessary.
+ *
+ * This function must be called with rtnl lock held, and will issue all
+ * the callbacks before returning.
+ */
+static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->reset_ntf(dev);
+}
+
+static inline size_t
+udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
+{
+ if (!udp_tunnel_nic_ops)
+ return 0;
+ return udp_tunnel_nic_ops->dump_size(dev, table);
+}
+
+static inline int
+udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
+ struct sk_buff *skb)
+{
+ if (!udp_tunnel_nic_ops)
+ return 0;
+ return udp_tunnel_nic_ops->dump_write(dev, table, skb);
+}
+#endif
diff --git a/include/net/udplite.h b/include/net/udplite.h
new file mode 100644
index 000000000..299c14ce2
--- /dev/null
+++ b/include/net/udplite.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for the UDP-Lite (RFC 3828) code.
+ */
+#ifndef _UDPLITE_H
+#define _UDPLITE_H
+
+#include <net/ip6_checksum.h>
+#include <net/udp.h>
+
+/* UDP-Lite socket options */
+#define UDPLITE_SEND_CSCOV 10 /* sender partial coverage (as sent) */
+#define UDPLITE_RECV_CSCOV 11 /* receiver partial coverage (threshold ) */
+
+extern struct proto udplite_prot;
+extern struct udp_table udplite_table;
+
+/*
+ * Checksum computation is all in software, hence simpler getfrag.
+ */
+static __inline__ int udplite_getfrag(void *from, char *to, int offset,
+ int len, int odd, struct sk_buff *skb)
+{
+ struct msghdr *msg = from;
+ return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT;
+}
+
+/*
+ * Checksumming routines
+ */
+static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
+{
+ u16 cscov;
+
+ /* In UDPv4 a zero checksum means that the transmitter generated no
+ * checksum. UDP-Lite (like IPv6) mandates checksums, hence packets
+ * with a zero checksum field are illegal. */
+ if (uh->check == 0) {
+ net_dbg_ratelimited("UDPLite: zeroed checksum field\n");
+ return 1;
+ }
+
+ cscov = ntohs(uh->len);
+
+ if (cscov == 0) /* Indicates that full coverage is required. */
+ ;
+ else if (cscov < 8 || cscov > skb->len) {
+ /*
+ * Coverage length violates RFC 3828: log and discard silently.
+ */
+ net_dbg_ratelimited("UDPLite: bad csum coverage %d/%d\n",
+ cscov, skb->len);
+ return 1;
+
+ } else if (cscov < skb->len) {
+ UDP_SKB_CB(skb)->partial_cov = 1;
+ UDP_SKB_CB(skb)->cscov = cscov;
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_valid = 0;
+ }
+
+ return 0;
+}
+
+/* Fast-path computation of checksum. Socket may not be locked. */
+static inline __wsum udplite_csum(struct sk_buff *skb)
+{
+ const struct udp_sock *up = udp_sk(skb->sk);
+ const int off = skb_transport_offset(skb);
+ int len = skb->len - off;
+
+ if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
+ if (0 < up->pcslen)
+ len = up->pcslen;
+ udp_hdr(skb)->len = htons(up->pcslen);
+ }
+ skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
+
+ return skb_checksum(skb, off, len, 0);
+}
+
+void udplite4_register(void);
+int udplite_get_port(struct sock *sk, unsigned short snum,
+ int (*scmp)(const struct sock *, const struct sock *));
+#endif /* _UDPLITE_H */
diff --git a/include/net/vsock_addr.h b/include/net/vsock_addr.h
new file mode 100644
index 000000000..cf8cc140d
--- /dev/null
+++ b/include/net/vsock_addr.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * VMware vSockets Driver
+ *
+ * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
+ */
+
+#ifndef _VSOCK_ADDR_H_
+#define _VSOCK_ADDR_H_
+
+#include <uapi/linux/vm_sockets.h>
+
+void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port);
+int vsock_addr_validate(const struct sockaddr_vm *addr);
+bool vsock_addr_bound(const struct sockaddr_vm *addr);
+void vsock_addr_unbind(struct sockaddr_vm *addr);
+bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
+ const struct sockaddr_vm *other);
+int vsock_addr_cast(const struct sockaddr *addr, size_t len,
+ struct sockaddr_vm **out_addr);
+
+#endif
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
new file mode 100644
index 000000000..a46ec889a
--- /dev/null
+++ b/include/net/vxlan.h
@@ -0,0 +1,574 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_VXLAN_H
+#define __NET_VXLAN_H 1
+
+#include <linux/if_vlan.h>
+#include <net/udp_tunnel.h>
+#include <net/dst_metadata.h>
+#include <net/rtnetlink.h>
+#include <net/switchdev.h>
+#include <net/nexthop.h>
+
+#define IANA_VXLAN_UDP_PORT 4789
+#define IANA_VXLAN_GPE_UDP_PORT 4790
+
+/* VXLAN protocol (RFC 7348) header:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|R|R|I|R|R|R| Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * I = VXLAN Network Identifier (VNI) present.
+ */
+struct vxlanhdr {
+ __be32 vx_flags;
+ __be32 vx_vni;
+};
+
+/* VXLAN header flags. */
+#define VXLAN_HF_VNI cpu_to_be32(BIT(27))
+
+#define VXLAN_N_VID (1u << 24)
+#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
+#define VXLAN_VNI_MASK cpu_to_be32(VXLAN_VID_MASK << 8)
+#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+
+#define VNI_HASH_BITS 10
+#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
+#define FDB_HASH_BITS 8
+#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
+
+/* Remote checksum offload for VXLAN (VXLAN_F_REMCSUM_[RT]X):
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|R|R|I|R|R|R|R|R|C| Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) |O| Csum start |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * C = Remote checksum offload bit. When set indicates that the
+ * remote checksum offload data is present.
+ *
+ * O = Offset bit. Indicates the checksum offset relative to
+ * checksum start.
+ *
+ * Csum start = Checksum start divided by two.
+ *
+ * http://tools.ietf.org/html/draft-herbert-vxlan-rco
+ */
+
+/* VXLAN-RCO header flags. */
+#define VXLAN_HF_RCO cpu_to_be32(BIT(21))
+
+/* Remote checksum offload header option */
+#define VXLAN_RCO_MASK cpu_to_be32(0x7f) /* Last byte of vni field */
+#define VXLAN_RCO_UDP cpu_to_be32(0x80) /* Indicate UDP RCO (TCP when not set *) */
+#define VXLAN_RCO_SHIFT 1 /* Left shift of start */
+#define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1)
+#define VXLAN_MAX_REMCSUM_START (0x7f << VXLAN_RCO_SHIFT)
+
+/*
+ * VXLAN Group Based Policy Extension (VXLAN_F_GBP):
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |G|R|R|R|I|R|R|R|R|D|R|R|A|R|R|R| Group Policy ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * G = Group Policy ID present.
+ *
+ * D = Don't Learn bit. When set, this bit indicates that the egress
+ * VTEP MUST NOT learn the source address of the encapsulated frame.
+ *
+ * A = Indicates that the group policy has already been applied to
+ * this packet. Policies MUST NOT be applied by devices when the
+ * A bit is set.
+ *
+ * https://tools.ietf.org/html/draft-smith-vxlan-group-policy
+ */
+struct vxlanhdr_gbp {
+ u8 vx_flags;
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ u8 reserved_flags1:3,
+ policy_applied:1,
+ reserved_flags2:2,
+ dont_learn:1,
+ reserved_flags3:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved_flags1:1,
+ dont_learn:1,
+ reserved_flags2:2,
+ policy_applied:1,
+ reserved_flags3:3;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be16 policy_id;
+ __be32 vx_vni;
+};
+
+/* VXLAN-GBP header flags. */
+#define VXLAN_HF_GBP cpu_to_be32(BIT(31))
+
+#define VXLAN_GBP_USED_BITS (VXLAN_HF_GBP | cpu_to_be32(0xFFFFFF))
+
+/* skb->mark mapping
+ *
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|R|R|R|R|R|R|R|D|R|R|A|R|R|R| Group Policy ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+#define VXLAN_GBP_DONT_LEARN (BIT(6) << 16)
+#define VXLAN_GBP_POLICY_APPLIED (BIT(3) << 16)
+#define VXLAN_GBP_ID_MASK (0xFFFF)
+
+#define VXLAN_GBP_MASK (VXLAN_GBP_DONT_LEARN | VXLAN_GBP_POLICY_APPLIED | \
+ VXLAN_GBP_ID_MASK)
+
+/*
+ * VXLAN Generic Protocol Extension (VXLAN_F_GPE):
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|Ver|I|P|R|O| Reserved |Next Protocol |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Ver = Version. Indicates VXLAN GPE protocol version.
+ *
+ * P = Next Protocol Bit. The P bit is set to indicate that the
+ * Next Protocol field is present.
+ *
+ * O = OAM Flag Bit. The O bit is set to indicate that the packet
+ * is an OAM packet.
+ *
+ * Next Protocol = This 8 bit field indicates the protocol header
+ * immediately following the VXLAN GPE header.
+ *
+ * https://tools.ietf.org/html/draft-ietf-nvo3-vxlan-gpe-01
+ */
+
+struct vxlanhdr_gpe {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 oam_flag:1,
+ reserved_flags1:1,
+ np_applied:1,
+ instance_applied:1,
+ version:2,
+ reserved_flags2:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved_flags2:2,
+ version:2,
+ instance_applied:1,
+ np_applied:1,
+ reserved_flags1:1,
+ oam_flag:1;
+#endif
+ u8 reserved_flags3;
+ u8 reserved_flags4;
+ u8 next_protocol;
+ __be32 vx_vni;
+};
+
+/* VXLAN-GPE header flags. */
+#define VXLAN_HF_VER cpu_to_be32(BIT(29) | BIT(28))
+#define VXLAN_HF_NP cpu_to_be32(BIT(26))
+#define VXLAN_HF_OAM cpu_to_be32(BIT(24))
+
+#define VXLAN_GPE_USED_BITS (VXLAN_HF_VER | VXLAN_HF_NP | VXLAN_HF_OAM | \
+ cpu_to_be32(0xff))
+
+struct vxlan_metadata {
+ u32 gbp;
+};
+
+/* per UDP socket information */
+struct vxlan_sock {
+ struct hlist_node hlist;
+ struct socket *sock;
+ struct hlist_head vni_list[VNI_HASH_SIZE];
+ refcount_t refcnt;
+ u32 flags;
+};
+
+union vxlan_addr {
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ struct sockaddr sa;
+};
+
+struct vxlan_rdst {
+ union vxlan_addr remote_ip;
+ __be16 remote_port;
+ u8 offloaded:1;
+ __be32 remote_vni;
+ u32 remote_ifindex;
+ struct net_device *remote_dev;
+ struct list_head list;
+ struct rcu_head rcu;
+ struct dst_cache dst_cache;
+};
+
+struct vxlan_config {
+ union vxlan_addr remote_ip;
+ union vxlan_addr saddr;
+ __be32 vni;
+ int remote_ifindex;
+ int mtu;
+ __be16 dst_port;
+ u16 port_min;
+ u16 port_max;
+ u8 tos;
+ u8 ttl;
+ __be32 label;
+ u32 flags;
+ unsigned long age_interval;
+ unsigned int addrmax;
+ bool no_share;
+ enum ifla_vxlan_df df;
+};
+
+enum {
+ VXLAN_VNI_STATS_RX,
+ VXLAN_VNI_STATS_RX_DROPS,
+ VXLAN_VNI_STATS_RX_ERRORS,
+ VXLAN_VNI_STATS_TX,
+ VXLAN_VNI_STATS_TX_DROPS,
+ VXLAN_VNI_STATS_TX_ERRORS,
+};
+
+struct vxlan_vni_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_drops;
+ u64 rx_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_drops;
+ u64 tx_errors;
+};
+
+struct vxlan_vni_stats_pcpu {
+ struct vxlan_vni_stats stats;
+ struct u64_stats_sync syncp;
+};
+
+struct vxlan_dev_node {
+ struct hlist_node hlist;
+ struct vxlan_dev *vxlan;
+};
+
+struct vxlan_vni_node {
+ struct rhash_head vnode;
+ struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */
+#if IS_ENABLED(CONFIG_IPV6)
+ struct vxlan_dev_node hlist6; /* vni hash table for IPv6 socket */
+#endif
+ struct list_head vlist;
+ __be32 vni;
+ union vxlan_addr remote_ip; /* default remote ip for this vni */
+ struct vxlan_vni_stats_pcpu __percpu *stats;
+
+ struct rcu_head rcu;
+};
+
+struct vxlan_vni_group {
+ struct rhashtable vni_hash;
+ struct list_head vni_list;
+ u32 num_vnis;
+};
+
+/* Pseudo network device */
+struct vxlan_dev {
+ struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */
+#if IS_ENABLED(CONFIG_IPV6)
+ struct vxlan_dev_node hlist6; /* vni hash table for IPv6 socket */
+#endif
+ struct list_head next; /* vxlan's per namespace list */
+ struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */
+#if IS_ENABLED(CONFIG_IPV6)
+ struct vxlan_sock __rcu *vn6_sock; /* listening socket for IPv6 */
+#endif
+ struct net_device *dev;
+ struct net *net; /* netns for packet i/o */
+ struct vxlan_rdst default_dst; /* default destination */
+
+ struct timer_list age_timer;
+ spinlock_t hash_lock[FDB_HASH_SIZE];
+ unsigned int addrcnt;
+ struct gro_cells gro_cells;
+
+ struct vxlan_config cfg;
+
+ struct vxlan_vni_group __rcu *vnigrp;
+
+ struct hlist_head fdb_head[FDB_HASH_SIZE];
+};
+
+#define VXLAN_F_LEARN 0x01
+#define VXLAN_F_PROXY 0x02
+#define VXLAN_F_RSC 0x04
+#define VXLAN_F_L2MISS 0x08
+#define VXLAN_F_L3MISS 0x10
+#define VXLAN_F_IPV6 0x20
+#define VXLAN_F_UDP_ZERO_CSUM_TX 0x40
+#define VXLAN_F_UDP_ZERO_CSUM6_TX 0x80
+#define VXLAN_F_UDP_ZERO_CSUM6_RX 0x100
+#define VXLAN_F_REMCSUM_TX 0x200
+#define VXLAN_F_REMCSUM_RX 0x400
+#define VXLAN_F_GBP 0x800
+#define VXLAN_F_REMCSUM_NOPARTIAL 0x1000
+#define VXLAN_F_COLLECT_METADATA 0x2000
+#define VXLAN_F_GPE 0x4000
+#define VXLAN_F_IPV6_LINKLOCAL 0x8000
+#define VXLAN_F_TTL_INHERIT 0x10000
+#define VXLAN_F_VNIFILTER 0x20000
+
+/* Flags that are used in the receive path. These flags must match in
+ * order for a socket to be shareable
+ */
+#define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \
+ VXLAN_F_GPE | \
+ VXLAN_F_UDP_ZERO_CSUM6_RX | \
+ VXLAN_F_REMCSUM_RX | \
+ VXLAN_F_REMCSUM_NOPARTIAL | \
+ VXLAN_F_COLLECT_METADATA | \
+ VXLAN_F_VNIFILTER)
+
+/* Flags that can be set together with VXLAN_F_GPE. */
+#define VXLAN_F_ALLOWED_GPE (VXLAN_F_GPE | \
+ VXLAN_F_IPV6 | \
+ VXLAN_F_IPV6_LINKLOCAL | \
+ VXLAN_F_UDP_ZERO_CSUM_TX | \
+ VXLAN_F_UDP_ZERO_CSUM6_TX | \
+ VXLAN_F_UDP_ZERO_CSUM6_RX | \
+ VXLAN_F_COLLECT_METADATA | \
+ VXLAN_F_VNIFILTER)
+
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+ u8 name_assign_type, struct vxlan_config *conf);
+
+static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ u8 l4_hdr = 0;
+
+ if (!skb->encapsulation)
+ return features;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features;
+ }
+
+ if ((l4_hdr == IPPROTO_UDP) &&
+ (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB) ||
+ (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr)) ||
+ (skb->ip_summed != CHECKSUM_NONE &&
+ !can_checksum_protocol(features, inner_eth_hdr(skb)->h_proto))))
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+ return features;
+}
+
+static inline int vxlan_headroom(u32 flags)
+{
+ /* VXLAN: IP4/6 header + UDP + VXLAN + Ethernet header */
+ /* VXLAN-GPE: IP4/6 header + UDP + VXLAN */
+ return (flags & VXLAN_F_IPV6 ? sizeof(struct ipv6hdr) :
+ sizeof(struct iphdr)) +
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr) +
+ (flags & VXLAN_F_GPE ? 0 : ETH_HLEN);
+}
+
+static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb)
+{
+ return (struct vxlanhdr *)(udp_hdr(skb) + 1);
+}
+
+static inline __be32 vxlan_vni(__be32 vni_field)
+{
+#if defined(__BIG_ENDIAN)
+ return (__force __be32)((__force u32)vni_field >> 8);
+#else
+ return (__force __be32)((__force u32)(vni_field & VXLAN_VNI_MASK) << 8);
+#endif
+}
+
+static inline __be32 vxlan_vni_field(__be32 vni)
+{
+#if defined(__BIG_ENDIAN)
+ return (__force __be32)((__force u32)vni << 8);
+#else
+ return (__force __be32)((__force u32)vni >> 8);
+#endif
+}
+
+static inline size_t vxlan_rco_start(__be32 vni_field)
+{
+ return be32_to_cpu(vni_field & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
+}
+
+static inline size_t vxlan_rco_offset(__be32 vni_field)
+{
+ return (vni_field & VXLAN_RCO_UDP) ?
+ offsetof(struct udphdr, check) :
+ offsetof(struct tcphdr, check);
+}
+
+static inline __be32 vxlan_compute_rco(unsigned int start, unsigned int offset)
+{
+ __be32 vni_field = cpu_to_be32(start >> VXLAN_RCO_SHIFT);
+
+ if (offset == offsetof(struct udphdr, check))
+ vni_field |= VXLAN_RCO_UDP;
+ return vni_field;
+}
+
+static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
+{
+ return vs->sock->sk->sk_family;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
+{
+ if (ipa->sa.sa_family == AF_INET6)
+ return ipv6_addr_any(&ipa->sin6.sin6_addr);
+ else
+ return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
+}
+
+static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
+{
+ if (ipa->sa.sa_family == AF_INET6)
+ return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
+ else
+ return ipv4_is_multicast(ipa->sin.sin_addr.s_addr);
+}
+
+#else /* !IS_ENABLED(CONFIG_IPV6) */
+
+static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
+{
+ return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
+}
+
+static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
+{
+ return ipv4_is_multicast(ipa->sin.sin_addr.s_addr);
+}
+
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
+static inline bool netif_is_vxlan(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "vxlan");
+}
+
+struct switchdev_notifier_vxlan_fdb_info {
+ struct switchdev_notifier_info info; /* must be first */
+ union vxlan_addr remote_ip;
+ __be16 remote_port;
+ __be32 remote_vni;
+ u32 remote_ifindex;
+ u8 eth_addr[ETH_ALEN];
+ __be32 vni;
+ bool offloaded;
+ bool added_by_user;
+};
+
+#if IS_ENABLED(CONFIG_VXLAN)
+int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info);
+int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
+void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni);
+
+#else
+static inline int
+vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info)
+{
+ return -ENOENT;
+}
+
+static inline int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni)
+{
+}
+#endif
+
+static inline void vxlan_flag_attr_error(int attrtype,
+ struct netlink_ext_ack *extack)
+{
+#define VXLAN_FLAG(flg) \
+ case IFLA_VXLAN_##flg: \
+ NL_SET_ERR_MSG_MOD(extack, \
+ "cannot change " #flg " flag"); \
+ break
+ switch (attrtype) {
+ VXLAN_FLAG(TTL_INHERIT);
+ VXLAN_FLAG(LEARNING);
+ VXLAN_FLAG(PROXY);
+ VXLAN_FLAG(RSC);
+ VXLAN_FLAG(L2MISS);
+ VXLAN_FLAG(L3MISS);
+ VXLAN_FLAG(COLLECT_METADATA);
+ VXLAN_FLAG(UDP_ZERO_CSUM6_TX);
+ VXLAN_FLAG(UDP_ZERO_CSUM6_RX);
+ VXLAN_FLAG(REMCSUM_TX);
+ VXLAN_FLAG(REMCSUM_RX);
+ VXLAN_FLAG(GBP);
+ VXLAN_FLAG(GPE);
+ VXLAN_FLAG(REMCSUM_NOPARTIAL);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, \
+ "cannot change flag");
+ break;
+ }
+#undef VXLAN_FLAG
+}
+
+static inline bool vxlan_fdb_nh_path_select(struct nexthop *nh,
+ u32 hash,
+ struct vxlan_rdst *rdst)
+{
+ struct fib_nh_common *nhc;
+
+ nhc = nexthop_path_fdb_result(nh, hash >> 1);
+ if (unlikely(!nhc))
+ return false;
+
+ switch (nhc->nhc_gw_family) {
+ case AF_INET:
+ rdst->remote_ip.sin.sin_addr.s_addr = nhc->nhc_gw.ipv4;
+ rdst->remote_ip.sa.sa_family = AF_INET;
+ break;
+ case AF_INET6:
+ rdst->remote_ip.sin6.sin6_addr = nhc->nhc_gw.ipv6;
+ rdst->remote_ip.sa.sa_family = AF_INET6;
+ break;
+ }
+
+ return true;
+}
+
+#endif
diff --git a/include/net/wext.h b/include/net/wext.h
new file mode 100644
index 000000000..aa192a670
--- /dev/null
+++ b/include/net/wext.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_WEXT_H
+#define __NET_WEXT_H
+
+#include <net/iw_handler.h>
+
+struct net;
+
+#ifdef CONFIG_WEXT_CORE
+int wext_handle_ioctl(struct net *net, unsigned int cmd,
+ void __user *arg);
+int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
+ unsigned long arg);
+
+struct iw_statistics *get_wireless_stats(struct net_device *dev);
+int call_commit_handler(struct net_device *dev);
+#else
+static inline int wext_handle_ioctl(struct net *net, unsigned int cmd,
+ void __user *arg)
+{
+ return -EINVAL;
+}
+static inline int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
+ unsigned long arg)
+{
+ return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_WEXT_PROC
+int wext_proc_init(struct net *net);
+void wext_proc_exit(struct net *net);
+#else
+static inline int wext_proc_init(struct net *net)
+{
+ return 0;
+}
+static inline void wext_proc_exit(struct net *net)
+{
+ return;
+}
+#endif
+
+#ifdef CONFIG_WEXT_PRIV
+int ioctl_private_call(struct net_device *dev, struct iwreq *iwr,
+ unsigned int cmd, struct iw_request_info *info,
+ iw_handler handler);
+int compat_private_call(struct net_device *dev, struct iwreq *iwr,
+ unsigned int cmd, struct iw_request_info *info,
+ iw_handler handler);
+int iw_handler_get_private(struct net_device * dev,
+ struct iw_request_info * info,
+ union iwreq_data * wrqu,
+ char * extra);
+#else
+#define ioctl_private_call NULL
+#define compat_private_call NULL
+#endif
+
+
+#endif /* __NET_WEXT_H */
diff --git a/include/net/x25.h b/include/net/x25.h
new file mode 100644
index 000000000..d7d6c2b4f
--- /dev/null
+++ b/include/net/x25.h
@@ -0,0 +1,327 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Declarations of X.25 Packet Layer type objects.
+ *
+ * History
+ * nov/17/96 Jonathan Naylor Initial version.
+ * mar/20/00 Daniela Squassoni Disabling/enabling of facilities
+ * negotiation.
+ */
+
+#ifndef _X25_H
+#define _X25_H
+#include <linux/x25.h>
+#include <linux/slab.h>
+#include <linux/refcount.h>
+#include <net/sock.h>
+
+#define X25_ADDR_LEN 16
+
+#define X25_MAX_L2_LEN 18 /* 802.2 LLC */
+
+#define X25_STD_MIN_LEN 3
+#define X25_EXT_MIN_LEN 4
+
+#define X25_GFI_SEQ_MASK 0x30
+#define X25_GFI_STDSEQ 0x10
+#define X25_GFI_EXTSEQ 0x20
+
+#define X25_Q_BIT 0x80
+#define X25_D_BIT 0x40
+#define X25_STD_M_BIT 0x10
+#define X25_EXT_M_BIT 0x01
+
+#define X25_CALL_REQUEST 0x0B
+#define X25_CALL_ACCEPTED 0x0F
+#define X25_CLEAR_REQUEST 0x13
+#define X25_CLEAR_CONFIRMATION 0x17
+#define X25_DATA 0x00
+#define X25_INTERRUPT 0x23
+#define X25_INTERRUPT_CONFIRMATION 0x27
+#define X25_RR 0x01
+#define X25_RNR 0x05
+#define X25_REJ 0x09
+#define X25_RESET_REQUEST 0x1B
+#define X25_RESET_CONFIRMATION 0x1F
+#define X25_REGISTRATION_REQUEST 0xF3
+#define X25_REGISTRATION_CONFIRMATION 0xF7
+#define X25_RESTART_REQUEST 0xFB
+#define X25_RESTART_CONFIRMATION 0xFF
+#define X25_DIAGNOSTIC 0xF1
+#define X25_ILLEGAL 0xFD
+
+/* Define the various conditions that may exist */
+
+#define X25_COND_ACK_PENDING 0x01
+#define X25_COND_OWN_RX_BUSY 0x02
+#define X25_COND_PEER_RX_BUSY 0x04
+
+/* Define Link State constants. */
+enum {
+ X25_STATE_0, /* Ready */
+ X25_STATE_1, /* Awaiting Call Accepted */
+ X25_STATE_2, /* Awaiting Clear Confirmation */
+ X25_STATE_3, /* Data Transfer */
+ X25_STATE_4, /* Awaiting Reset Confirmation */
+ X25_STATE_5 /* Call Accepted / Call Connected pending */
+};
+
+enum {
+ X25_LINK_STATE_0,
+ X25_LINK_STATE_1,
+ X25_LINK_STATE_2,
+ X25_LINK_STATE_3
+};
+
+#define X25_DEFAULT_T20 (180 * HZ) /* Default T20 value */
+#define X25_DEFAULT_T21 (200 * HZ) /* Default T21 value */
+#define X25_DEFAULT_T22 (180 * HZ) /* Default T22 value */
+#define X25_DEFAULT_T23 (180 * HZ) /* Default T23 value */
+#define X25_DEFAULT_T2 (3 * HZ) /* Default ack holdback value */
+
+#define X25_DEFAULT_WINDOW_SIZE 2 /* Default Window Size */
+#define X25_DEFAULT_PACKET_SIZE X25_PS128 /* Default Packet Size */
+#define X25_DEFAULT_THROUGHPUT 0x0A /* Deafult Throughput */
+#define X25_DEFAULT_REVERSE 0x00 /* Default Reverse Charging */
+
+#define X25_SMODULUS 8
+#define X25_EMODULUS 128
+
+/*
+ * X.25 Facilities constants.
+ */
+
+#define X25_FAC_CLASS_MASK 0xC0
+
+#define X25_FAC_CLASS_A 0x00
+#define X25_FAC_CLASS_B 0x40
+#define X25_FAC_CLASS_C 0x80
+#define X25_FAC_CLASS_D 0xC0
+
+#define X25_FAC_REVERSE 0x01 /* also fast select */
+#define X25_FAC_THROUGHPUT 0x02
+#define X25_FAC_PACKET_SIZE 0x42
+#define X25_FAC_WINDOW_SIZE 0x43
+
+#define X25_MAX_FAC_LEN 60
+#define X25_MAX_CUD_LEN 128
+
+#define X25_FAC_CALLING_AE 0xCB
+#define X25_FAC_CALLED_AE 0xC9
+
+#define X25_MARKER 0x00
+#define X25_DTE_SERVICES 0x0F
+#define X25_MAX_AE_LEN 40 /* Max num of semi-octets in AE - OSI Nw */
+#define X25_MAX_DTE_FACIL_LEN 21 /* Max length of DTE facility params */
+
+/* Bitset in x25_sock->flags for misc flags */
+#define X25_Q_BIT_FLAG 0
+#define X25_INTERRUPT_FLAG 1
+#define X25_ACCPT_APPRV_FLAG 2
+
+/**
+ * struct x25_route - x25 routing entry
+ * @node - entry in x25_list_lock
+ * @address - Start of address range
+ * @sigdigits - Number of sig digits
+ * @dev - More than one for MLP
+ * @refcnt - reference counter
+ */
+struct x25_route {
+ struct list_head node;
+ struct x25_address address;
+ unsigned int sigdigits;
+ struct net_device *dev;
+ refcount_t refcnt;
+};
+
+struct x25_neigh {
+ struct list_head node;
+ struct net_device *dev;
+ unsigned int state;
+ unsigned int extended;
+ struct sk_buff_head queue;
+ unsigned long t20;
+ struct timer_list t20timer;
+ unsigned long global_facil_mask;
+ refcount_t refcnt;
+};
+
+struct x25_sock {
+ struct sock sk;
+ struct x25_address source_addr, dest_addr;
+ struct x25_neigh *neighbour;
+ unsigned int lci, cudmatchlength;
+ unsigned char state, condition;
+ unsigned short vs, vr, va, vl;
+ unsigned long t2, t21, t22, t23;
+ unsigned short fraglen;
+ unsigned long flags;
+ struct sk_buff_head ack_queue;
+ struct sk_buff_head fragment_queue;
+ struct sk_buff_head interrupt_in_queue;
+ struct sk_buff_head interrupt_out_queue;
+ struct timer_list timer;
+ struct x25_causediag causediag;
+ struct x25_facilities facilities;
+ struct x25_dte_facilities dte_facilities;
+ struct x25_calluserdata calluserdata;
+ unsigned long vc_facil_mask; /* inc_call facilities mask */
+};
+
+struct x25_forward {
+ struct list_head node;
+ unsigned int lci;
+ struct net_device *dev1;
+ struct net_device *dev2;
+ atomic_t refcnt;
+};
+
+static inline struct x25_sock *x25_sk(const struct sock *sk)
+{
+ return (struct x25_sock *)sk;
+}
+
+/* af_x25.c */
+extern int sysctl_x25_restart_request_timeout;
+extern int sysctl_x25_call_request_timeout;
+extern int sysctl_x25_reset_request_timeout;
+extern int sysctl_x25_clear_request_timeout;
+extern int sysctl_x25_ack_holdback_timeout;
+extern int sysctl_x25_forward;
+
+int x25_parse_address_block(struct sk_buff *skb,
+ struct x25_address *called_addr,
+ struct x25_address *calling_addr);
+
+int x25_addr_ntoa(unsigned char *, struct x25_address *, struct x25_address *);
+int x25_addr_aton(unsigned char *, struct x25_address *, struct x25_address *);
+struct sock *x25_find_socket(unsigned int, struct x25_neigh *);
+void x25_destroy_socket_from_timer(struct sock *);
+int x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int);
+void x25_kill_by_neigh(struct x25_neigh *);
+
+/* x25_dev.c */
+void x25_send_frame(struct sk_buff *, struct x25_neigh *);
+int x25_lapb_receive_frame(struct sk_buff *, struct net_device *,
+ struct packet_type *, struct net_device *);
+void x25_establish_link(struct x25_neigh *);
+void x25_terminate_link(struct x25_neigh *);
+
+/* x25_facilities.c */
+int x25_parse_facilities(struct sk_buff *, struct x25_facilities *,
+ struct x25_dte_facilities *, unsigned long *);
+int x25_create_facilities(unsigned char *, struct x25_facilities *,
+ struct x25_dte_facilities *, unsigned long);
+int x25_negotiate_facilities(struct sk_buff *, struct sock *,
+ struct x25_facilities *,
+ struct x25_dte_facilities *);
+void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *);
+
+/* x25_forward.c */
+void x25_clear_forward_by_lci(unsigned int lci);
+void x25_clear_forward_by_dev(struct net_device *);
+int x25_forward_data(int, struct x25_neigh *, struct sk_buff *);
+int x25_forward_call(struct x25_address *, struct x25_neigh *, struct sk_buff *,
+ int);
+
+/* x25_in.c */
+int x25_process_rx_frame(struct sock *, struct sk_buff *);
+int x25_backlog_rcv(struct sock *, struct sk_buff *);
+
+/* x25_link.c */
+void x25_link_control(struct sk_buff *, struct x25_neigh *, unsigned short);
+void x25_link_device_up(struct net_device *);
+void x25_link_device_down(struct net_device *);
+void x25_link_established(struct x25_neigh *);
+void x25_link_terminated(struct x25_neigh *);
+void x25_transmit_clear_request(struct x25_neigh *, unsigned int,
+ unsigned char);
+void x25_transmit_link(struct sk_buff *, struct x25_neigh *);
+int x25_subscr_ioctl(unsigned int, void __user *);
+struct x25_neigh *x25_get_neigh(struct net_device *);
+void x25_link_free(void);
+
+/* x25_neigh.c */
+static __inline__ void x25_neigh_hold(struct x25_neigh *nb)
+{
+ refcount_inc(&nb->refcnt);
+}
+
+static __inline__ void x25_neigh_put(struct x25_neigh *nb)
+{
+ if (refcount_dec_and_test(&nb->refcnt))
+ kfree(nb);
+}
+
+/* x25_out.c */
+int x25_output(struct sock *, struct sk_buff *);
+void x25_kick(struct sock *);
+void x25_enquiry_response(struct sock *);
+
+/* x25_route.c */
+struct x25_route *x25_get_route(struct x25_address *addr);
+struct net_device *x25_dev_get(char *);
+void x25_route_device_down(struct net_device *dev);
+int x25_route_ioctl(unsigned int, void __user *);
+void x25_route_free(void);
+
+static __inline__ void x25_route_hold(struct x25_route *rt)
+{
+ refcount_inc(&rt->refcnt);
+}
+
+static __inline__ void x25_route_put(struct x25_route *rt)
+{
+ if (refcount_dec_and_test(&rt->refcnt))
+ kfree(rt);
+}
+
+/* x25_subr.c */
+void x25_clear_queues(struct sock *);
+void x25_frames_acked(struct sock *, unsigned short);
+void x25_requeue_frames(struct sock *);
+int x25_validate_nr(struct sock *, unsigned short);
+void x25_write_internal(struct sock *, int);
+int x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *,
+ int *);
+void x25_disconnect(struct sock *, int, unsigned char, unsigned char);
+
+/* x25_timer.c */
+void x25_init_timers(struct sock *sk);
+void x25_start_heartbeat(struct sock *);
+void x25_start_t2timer(struct sock *);
+void x25_start_t21timer(struct sock *);
+void x25_start_t22timer(struct sock *);
+void x25_start_t23timer(struct sock *);
+void x25_stop_heartbeat(struct sock *);
+void x25_stop_timer(struct sock *);
+unsigned long x25_display_timer(struct sock *);
+void x25_check_rbuf(struct sock *);
+
+/* sysctl_net_x25.c */
+#ifdef CONFIG_SYSCTL
+int x25_register_sysctl(void);
+void x25_unregister_sysctl(void);
+#else
+static inline int x25_register_sysctl(void) { return 0; };
+static inline void x25_unregister_sysctl(void) {};
+#endif /* CONFIG_SYSCTL */
+
+struct x25_skb_cb {
+ unsigned int flags;
+};
+#define X25_SKB_CB(s) ((struct x25_skb_cb *) ((s)->cb))
+
+extern struct hlist_head x25_list;
+extern rwlock_t x25_list_lock;
+extern struct list_head x25_route_list;
+extern rwlock_t x25_route_list_lock;
+extern struct list_head x25_forward_list;
+extern rwlock_t x25_forward_list_lock;
+extern struct list_head x25_neigh_list;
+extern rwlock_t x25_neigh_list_lock;
+
+int x25_proc_init(void);
+void x25_proc_exit(void);
+#endif
diff --git a/include/net/x25device.h b/include/net/x25device.h
new file mode 100644
index 000000000..cf749efca
--- /dev/null
+++ b/include/net/x25device.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _X25DEVICE_H
+#define _X25DEVICE_H
+
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/if_x25.h>
+#include <linux/skbuff.h>
+
+static inline __be16 x25_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb->pkt_type = PACKET_HOST;
+
+ return htons(ETH_P_X25);
+}
+#endif
diff --git a/include/net/xdp.h b/include/net/xdp.h
new file mode 100644
index 000000000..55dbc68bf
--- /dev/null
+++ b/include/net/xdp.h
@@ -0,0 +1,412 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* include/net/xdp.h
+ *
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ */
+#ifndef __LINUX_NET_XDP_H__
+#define __LINUX_NET_XDP_H__
+
+#include <linux/skbuff.h> /* skb_shared_info */
+
+/**
+ * DOC: XDP RX-queue information
+ *
+ * The XDP RX-queue info (xdp_rxq_info) is associated with the driver
+ * level RX-ring queues. It is information that is specific to how
+ * the driver have configured a given RX-ring queue.
+ *
+ * Each xdp_buff frame received in the driver carries a (pointer)
+ * reference to this xdp_rxq_info structure. This provides the XDP
+ * data-path read-access to RX-info for both kernel and bpf-side
+ * (limited subset).
+ *
+ * For now, direct access is only safe while running in NAPI/softirq
+ * context. Contents are read-mostly and must not be updated during
+ * driver NAPI/softirq poll.
+ *
+ * The driver usage API is a register and unregister API.
+ *
+ * The struct is not directly tied to the XDP prog. A new XDP prog
+ * can be attached as long as it doesn't change the underlying
+ * RX-ring. If the RX-ring does change significantly, the NIC driver
+ * naturally need to stop the RX-ring before purging and reallocating
+ * memory. In that process the driver MUST call unregister (which
+ * also applies for driver shutdown and unload). The register API is
+ * also mandatory during RX-ring setup.
+ */
+
+enum xdp_mem_type {
+ MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */
+ MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */
+ MEM_TYPE_PAGE_POOL,
+ MEM_TYPE_XSK_BUFF_POOL,
+ MEM_TYPE_MAX,
+};
+
+/* XDP flags for ndo_xdp_xmit */
+#define XDP_XMIT_FLUSH (1U << 0) /* doorbell signal consumer */
+#define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH
+
+struct xdp_mem_info {
+ u32 type; /* enum xdp_mem_type, but known size type */
+ u32 id;
+};
+
+struct page_pool;
+
+struct xdp_rxq_info {
+ struct net_device *dev;
+ u32 queue_index;
+ u32 reg_state;
+ struct xdp_mem_info mem;
+ unsigned int napi_id;
+ u32 frag_size;
+} ____cacheline_aligned; /* perf critical, avoid false-sharing */
+
+struct xdp_txq_info {
+ struct net_device *dev;
+};
+
+enum xdp_buff_flags {
+ XDP_FLAGS_HAS_FRAGS = BIT(0), /* non-linear xdp buff */
+ XDP_FLAGS_FRAGS_PF_MEMALLOC = BIT(1), /* xdp paged memory is under
+ * pressure
+ */
+};
+
+struct xdp_buff {
+ void *data;
+ void *data_end;
+ void *data_meta;
+ void *data_hard_start;
+ struct xdp_rxq_info *rxq;
+ struct xdp_txq_info *txq;
+ u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
+ u32 flags; /* supported values defined in xdp_buff_flags */
+};
+
+static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp)
+{
+ return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS);
+}
+
+static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp)
+{
+ xdp->flags |= XDP_FLAGS_HAS_FRAGS;
+}
+
+static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp)
+{
+ xdp->flags &= ~XDP_FLAGS_HAS_FRAGS;
+}
+
+static __always_inline bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp)
+{
+ return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
+}
+
+static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp)
+{
+ xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC;
+}
+
+static __always_inline void
+xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
+{
+ xdp->frame_sz = frame_sz;
+ xdp->rxq = rxq;
+ xdp->flags = 0;
+}
+
+static __always_inline void
+xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start,
+ int headroom, int data_len, const bool meta_valid)
+{
+ unsigned char *data = hard_start + headroom;
+
+ xdp->data_hard_start = hard_start;
+ xdp->data = data;
+ xdp->data_end = data + data_len;
+ xdp->data_meta = meta_valid ? data : data + 1;
+}
+
+/* Reserve memory area at end-of data area.
+ *
+ * This macro reserves tailroom in the XDP buffer by limiting the
+ * XDP/BPF data access to data_hard_end. Notice same area (and size)
+ * is used for XDP_PASS, when constructing the SKB via build_skb().
+ */
+#define xdp_data_hard_end(xdp) \
+ ((xdp)->data_hard_start + (xdp)->frame_sz - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+static inline struct skb_shared_info *
+xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
+{
+ return (struct skb_shared_info *)xdp_data_hard_end(xdp);
+}
+
+static __always_inline unsigned int xdp_get_buff_len(struct xdp_buff *xdp)
+{
+ unsigned int len = xdp->data_end - xdp->data;
+ struct skb_shared_info *sinfo;
+
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ len += sinfo->xdp_frags_size;
+out:
+ return len;
+}
+
+struct xdp_frame {
+ void *data;
+ u16 len;
+ u16 headroom;
+ u32 metasize; /* uses lower 8-bits */
+ /* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
+ * while mem info is valid on remote CPU.
+ */
+ struct xdp_mem_info mem;
+ struct net_device *dev_rx; /* used by cpumap */
+ u32 frame_sz;
+ u32 flags; /* supported values defined in xdp_buff_flags */
+};
+
+static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame)
+{
+ return !!(frame->flags & XDP_FLAGS_HAS_FRAGS);
+}
+
+static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame)
+{
+ return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
+}
+
+#define XDP_BULK_QUEUE_SIZE 16
+struct xdp_frame_bulk {
+ int count;
+ void *xa;
+ void *q[XDP_BULK_QUEUE_SIZE];
+};
+
+static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
+{
+ /* bq->count will be zero'ed when bq->xa gets updated */
+ bq->xa = NULL;
+}
+
+static inline struct skb_shared_info *
+xdp_get_shared_info_from_frame(struct xdp_frame *frame)
+{
+ void *data_hard_start = frame->data - frame->headroom - sizeof(*frame);
+
+ return (struct skb_shared_info *)(data_hard_start + frame->frame_sz -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+}
+
+struct xdp_cpumap_stats {
+ unsigned int redirect;
+ unsigned int pass;
+ unsigned int drop;
+};
+
+/* Clear kernel pointers in xdp_frame */
+static inline void xdp_scrub_frame(struct xdp_frame *frame)
+{
+ frame->data = NULL;
+ frame->dev_rx = NULL;
+}
+
+static inline void
+xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags,
+ unsigned int size, unsigned int truesize,
+ bool pfmemalloc)
+{
+ skb_shinfo(skb)->nr_frags = nr_frags;
+
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += truesize;
+ skb->pfmemalloc |= pfmemalloc;
+}
+
+/* Avoids inlining WARN macro in fast-path */
+void xdp_warn(const char *msg, const char *func, const int line);
+#define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
+
+struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
+struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
+ struct sk_buff *skb,
+ struct net_device *dev);
+struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
+ struct net_device *dev);
+int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp);
+struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf);
+
+static inline
+void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
+{
+ xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame);
+ xdp->data = frame->data;
+ xdp->data_end = frame->data + frame->len;
+ xdp->data_meta = frame->data - frame->metasize;
+ xdp->frame_sz = frame->frame_sz;
+ xdp->flags = frame->flags;
+}
+
+static inline
+int xdp_update_frame_from_buff(struct xdp_buff *xdp,
+ struct xdp_frame *xdp_frame)
+{
+ int metasize, headroom;
+
+ /* Assure headroom is available for storing info */
+ headroom = xdp->data - xdp->data_hard_start;
+ metasize = xdp->data - xdp->data_meta;
+ metasize = metasize > 0 ? metasize : 0;
+ if (unlikely((headroom - metasize) < sizeof(*xdp_frame)))
+ return -ENOSPC;
+
+ /* Catch if driver didn't reserve tailroom for skb_shared_info */
+ if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
+ XDP_WARN("Driver BUG: missing reserved tailroom");
+ return -ENOSPC;
+ }
+
+ xdp_frame->data = xdp->data;
+ xdp_frame->len = xdp->data_end - xdp->data;
+ xdp_frame->headroom = headroom - sizeof(*xdp_frame);
+ xdp_frame->metasize = metasize;
+ xdp_frame->frame_sz = xdp->frame_sz;
+ xdp_frame->flags = xdp->flags;
+
+ return 0;
+}
+
+/* Convert xdp_buff to xdp_frame */
+static inline
+struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
+{
+ struct xdp_frame *xdp_frame;
+
+ if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
+ return xdp_convert_zc_to_xdp_frame(xdp);
+
+ /* Store info in top of packet */
+ xdp_frame = xdp->data_hard_start;
+ if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0))
+ return NULL;
+
+ /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */
+ xdp_frame->mem = xdp->rxq->mem;
+
+ return xdp_frame;
+}
+
+void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
+ struct xdp_buff *xdp);
+void xdp_return_frame(struct xdp_frame *xdpf);
+void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
+void xdp_return_buff(struct xdp_buff *xdp);
+void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
+void xdp_return_frame_bulk(struct xdp_frame *xdpf,
+ struct xdp_frame_bulk *bq);
+
+/* When sending xdp_frame into the network stack, then there is no
+ * return point callback, which is needed to release e.g. DMA-mapping
+ * resources with page_pool. Thus, have explicit function to release
+ * frame resources.
+ */
+void __xdp_release_frame(void *data, struct xdp_mem_info *mem);
+static inline void xdp_release_frame(struct xdp_frame *xdpf)
+{
+ struct xdp_mem_info *mem = &xdpf->mem;
+ struct skb_shared_info *sinfo;
+ int i;
+
+ /* Curr only page_pool needs this */
+ if (mem->type != MEM_TYPE_PAGE_POOL)
+ return;
+
+ if (likely(!xdp_frame_has_frags(xdpf)))
+ goto out;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ struct page *page = skb_frag_page(&sinfo->frags[i]);
+
+ __xdp_release_frame(page_address(page), mem);
+ }
+out:
+ __xdp_release_frame(xdpf->data, mem);
+}
+
+static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf)
+{
+ struct skb_shared_info *sinfo;
+ unsigned int len = xdpf->len;
+
+ if (likely(!xdp_frame_has_frags(xdpf)))
+ goto out;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ len += sinfo->xdp_frags_size;
+out:
+ return len;
+}
+
+int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+ struct net_device *dev, u32 queue_index,
+ unsigned int napi_id, u32 frag_size);
+static inline int
+xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+ struct net_device *dev, u32 queue_index,
+ unsigned int napi_id)
+{
+ return __xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id, 0);
+}
+
+void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
+void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
+bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
+int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
+ enum xdp_mem_type type, void *allocator);
+void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
+int xdp_reg_mem_model(struct xdp_mem_info *mem,
+ enum xdp_mem_type type, void *allocator);
+void xdp_unreg_mem_model(struct xdp_mem_info *mem);
+
+/* Drivers not supporting XDP metadata can use this helper, which
+ * rejects any room expansion for metadata as a result.
+ */
+static __always_inline void
+xdp_set_data_meta_invalid(struct xdp_buff *xdp)
+{
+ xdp->data_meta = xdp->data + 1;
+}
+
+static __always_inline bool
+xdp_data_meta_unsupported(const struct xdp_buff *xdp)
+{
+ return unlikely(xdp->data_meta > xdp->data);
+}
+
+static inline bool xdp_metalen_invalid(unsigned long metalen)
+{
+ return (metalen & (sizeof(__u32) - 1)) || (metalen > 32);
+}
+
+struct xdp_attachment_info {
+ struct bpf_prog *prog;
+ u32 flags;
+};
+
+struct netdev_bpf;
+void xdp_attachment_setup(struct xdp_attachment_info *info,
+ struct netdev_bpf *bpf);
+
+#define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
+
+#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xdp_priv.h b/include/net/xdp_priv.h
new file mode 100644
index 000000000..c9df68d5f
--- /dev/null
+++ b/include/net/xdp_priv.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_NET_XDP_PRIV_H__
+#define __LINUX_NET_XDP_PRIV_H__
+
+#include <linux/rhashtable.h>
+#include <net/xdp.h>
+
+/* Private to net/core/xdp.c, but used by trace/events/xdp.h */
+struct xdp_mem_allocator {
+ struct xdp_mem_info mem;
+ union {
+ void *allocator;
+ struct page_pool *page_pool;
+ };
+ struct rhash_head node;
+ struct rcu_head rcu;
+};
+
+#endif /* __LINUX_NET_XDP_PRIV_H__ */
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
new file mode 100644
index 000000000..3057e1a4a
--- /dev/null
+++ b/include/net/xdp_sock.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* AF_XDP internal functions
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#ifndef _LINUX_XDP_SOCK_H
+#define _LINUX_XDP_SOCK_H
+
+#include <linux/bpf.h>
+#include <linux/workqueue.h>
+#include <linux/if_xdp.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <net/sock.h>
+
+struct net_device;
+struct xsk_queue;
+struct xdp_buff;
+
+struct xdp_umem {
+ void *addrs;
+ u64 size;
+ u32 headroom;
+ u32 chunk_size;
+ u32 chunks;
+ u32 npgs;
+ struct user_struct *user;
+ refcount_t users;
+ u8 flags;
+ bool zc;
+ struct page **pgs;
+ int id;
+ struct list_head xsk_dma_list;
+ struct work_struct work;
+};
+
+struct xsk_map {
+ struct bpf_map map;
+ spinlock_t lock; /* Synchronize map updates */
+ struct xdp_sock __rcu *xsk_map[];
+};
+
+struct xdp_sock {
+ /* struct sock must be the first member of struct xdp_sock */
+ struct sock sk;
+ struct xsk_queue *rx ____cacheline_aligned_in_smp;
+ struct net_device *dev;
+ struct xdp_umem *umem;
+ struct list_head flush_node;
+ struct xsk_buff_pool *pool;
+ u16 queue_id;
+ bool zc;
+ enum {
+ XSK_READY = 0,
+ XSK_BOUND,
+ XSK_UNBOUND,
+ } state;
+
+ struct xsk_queue *tx ____cacheline_aligned_in_smp;
+ struct list_head tx_list;
+ /* Protects generic receive. */
+ spinlock_t rx_lock;
+
+ /* Statistics */
+ u64 rx_dropped;
+ u64 rx_queue_full;
+
+ struct list_head map_list;
+ /* Protects map_list */
+ spinlock_t map_list_lock;
+ /* Protects multiple processes in the control path */
+ struct mutex mutex;
+ struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
+ struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
+};
+
+#ifdef CONFIG_XDP_SOCKETS
+
+int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
+int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
+void __xsk_map_flush(void);
+
+#else
+
+static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
+{
+ return -ENOTSUPP;
+}
+
+static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void __xsk_map_flush(void)
+{
+}
+
+#endif /* CONFIG_XDP_SOCKETS */
+
+#endif /* _LINUX_XDP_SOCK_H */
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
new file mode 100644
index 000000000..9c0d86060
--- /dev/null
+++ b/include/net/xdp_sock_drv.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Interface for implementing AF_XDP zero-copy support in drivers.
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#ifndef _LINUX_XDP_SOCK_DRV_H
+#define _LINUX_XDP_SOCK_DRV_H
+
+#include <net/xdp_sock.h>
+#include <net/xsk_buff_pool.h>
+
+#define XDP_UMEM_MIN_CHUNK_SHIFT 11
+#define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
+
+#ifdef CONFIG_XDP_SOCKETS
+
+void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
+bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
+u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
+void xsk_tx_release(struct xsk_buff_pool *pool);
+struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
+ u16 queue_id);
+void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
+void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
+void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
+void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
+bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
+
+static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
+{
+ return XDP_PACKET_HEADROOM + pool->headroom;
+}
+
+static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
+{
+ return pool->chunk_size;
+}
+
+static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
+{
+ return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
+}
+
+static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
+ struct xdp_rxq_info *rxq)
+{
+ xp_set_rxq_info(pool, rxq);
+}
+
+static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ return pool->heads[0].xdp.rxq->napi_id;
+#else
+ return 0;
+#endif
+}
+
+static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
+ unsigned long attrs)
+{
+ xp_dma_unmap(pool, attrs);
+}
+
+static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
+ struct device *dev, unsigned long attrs)
+{
+ struct xdp_umem *umem = pool->umem;
+
+ return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
+}
+
+static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
+{
+ struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+ return xp_get_dma(xskb);
+}
+
+static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
+{
+ struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+ return xp_get_frame_dma(xskb);
+}
+
+static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
+{
+ return xp_alloc(pool);
+}
+
+/* Returns as many entries as possible up to max. 0 <= N <= max. */
+static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ return xp_alloc_batch(pool, xdp, max);
+}
+
+static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
+{
+ return xp_can_alloc(pool, count);
+}
+
+static inline void xsk_buff_free(struct xdp_buff *xdp)
+{
+ struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+ xp_free(xskb);
+}
+
+static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+{
+ xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
+ xdp->data_meta = xdp->data;
+ xdp->data_end = xdp->data + size;
+}
+
+static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
+ u64 addr)
+{
+ return xp_raw_get_dma(pool, addr);
+}
+
+static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
+{
+ return xp_raw_get_data(pool, addr);
+}
+
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
+{
+ struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+ if (!pool->dma_need_sync)
+ return;
+
+ xp_dma_sync_for_cpu(xskb);
+}
+
+static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
+ dma_addr_t dma,
+ size_t size)
+{
+ xp_dma_sync_for_device(pool, dma, size);
+}
+
+#else
+
+static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
+{
+}
+
+static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
+ struct xdp_desc *desc)
+{
+ return false;
+}
+
+static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
+{
+ return 0;
+}
+
+static inline void xsk_tx_release(struct xsk_buff_pool *pool)
+{
+}
+
+static inline struct xsk_buff_pool *
+xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
+{
+ return NULL;
+}
+
+static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
+{
+}
+
+static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
+{
+}
+
+static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
+{
+}
+
+static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
+{
+}
+
+static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
+{
+ return false;
+}
+
+static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
+{
+ return 0;
+}
+
+static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
+{
+ return 0;
+}
+
+static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
+{
+ return 0;
+}
+
+static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
+ struct xdp_rxq_info *rxq)
+{
+}
+
+static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
+{
+ return 0;
+}
+
+static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
+ unsigned long attrs)
+{
+}
+
+static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
+ struct device *dev, unsigned long attrs)
+{
+ return 0;
+}
+
+static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
+{
+ return 0;
+}
+
+static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
+{
+ return 0;
+}
+
+static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
+{
+ return NULL;
+}
+
+static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ return 0;
+}
+
+static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
+{
+ return false;
+}
+
+static inline void xsk_buff_free(struct xdp_buff *xdp)
+{
+}
+
+static inline void xsk_buff_discard(struct xdp_buff *xdp)
+{
+}
+
+static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+{
+}
+
+static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
+ u64 addr)
+{
+ return 0;
+}
+
+static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
+{
+ return NULL;
+}
+
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
+{
+}
+
+static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
+ dma_addr_t dma,
+ size_t size)
+{
+}
+
+#endif /* CONFIG_XDP_SOCKETS */
+
+#endif /* _LINUX_XDP_SOCK_DRV_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
new file mode 100644
index 000000000..9ec6f2e92
--- /dev/null
+++ b/include/net/xfrm.h
@@ -0,0 +1,2088 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_XFRM_H
+#define _NET_XFRM_H
+
+#include <linux/compiler.h>
+#include <linux/xfrm.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/pfkeyv2.h>
+#include <linux/ipsec.h>
+#include <linux/in6.h>
+#include <linux/mutex.h>
+#include <linux/audit.h>
+#include <linux/slab.h>
+#include <linux/refcount.h>
+#include <linux/sockptr.h>
+
+#include <net/sock.h>
+#include <net/dst.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_fib.h>
+#include <net/flow.h>
+#include <net/gro_cells.h>
+
+#include <linux/interrupt.h>
+
+#ifdef CONFIG_XFRM_STATISTICS
+#include <net/snmp.h>
+#endif
+
+#define XFRM_PROTO_ESP 50
+#define XFRM_PROTO_AH 51
+#define XFRM_PROTO_COMP 108
+#define XFRM_PROTO_IPIP 4
+#define XFRM_PROTO_IPV6 41
+#define XFRM_PROTO_ROUTING IPPROTO_ROUTING
+#define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
+
+#define XFRM_ALIGN4(len) (((len) + 3) & ~3)
+#define XFRM_ALIGN8(len) (((len) + 7) & ~7)
+#define MODULE_ALIAS_XFRM_MODE(family, encap) \
+ MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
+#define MODULE_ALIAS_XFRM_TYPE(family, proto) \
+ MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
+#define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \
+ MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto))
+
+#ifdef CONFIG_XFRM_STATISTICS
+#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
+#else
+#define XFRM_INC_STATS(net, field) ((void)(net))
+#endif
+
+
+/* Organization of SPD aka "XFRM rules"
+ ------------------------------------
+
+ Basic objects:
+ - policy rule, struct xfrm_policy (=SPD entry)
+ - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
+ - instance of a transformer, struct xfrm_state (=SA)
+ - template to clone xfrm_state, struct xfrm_tmpl
+
+ SPD is plain linear list of xfrm_policy rules, ordered by priority.
+ (To be compatible with existing pfkeyv2 implementations,
+ many rules with priority of 0x7fffffff are allowed to exist and
+ such rules are ordered in an unpredictable way, thanks to bsd folks.)
+
+ Lookup is plain linear search until the first match with selector.
+
+ If "action" is "block", then we prohibit the flow, otherwise:
+ if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
+ policy entry has list of up to XFRM_MAX_DEPTH transformations,
+ described by templates xfrm_tmpl. Each template is resolved
+ to a complete xfrm_state (see below) and we pack bundle of transformations
+ to a dst_entry returned to requestor.
+
+ dst -. xfrm .-> xfrm_state #1
+ |---. child .-> dst -. xfrm .-> xfrm_state #2
+ |---. child .-> dst -. xfrm .-> xfrm_state #3
+ |---. child .-> NULL
+
+ Bundles are cached at xrfm_policy struct (field ->bundles).
+
+
+ Resolution of xrfm_tmpl
+ -----------------------
+ Template contains:
+ 1. ->mode Mode: transport or tunnel
+ 2. ->id.proto Protocol: AH/ESP/IPCOMP
+ 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
+ Q: allow to resolve security gateway?
+ 4. ->id.spi If not zero, static SPI.
+ 5. ->saddr Local tunnel endpoint, ignored for transport mode.
+ 6. ->algos List of allowed algos. Plain bitmask now.
+ Q: ealgos, aalgos, calgos. What a mess...
+ 7. ->share Sharing mode.
+ Q: how to implement private sharing mode? To add struct sock* to
+ flow id?
+
+ Having this template we search through SAD searching for entries
+ with appropriate mode/proto/algo, permitted by selector.
+ If no appropriate entry found, it is requested from key manager.
+
+ PROBLEMS:
+ Q: How to find all the bundles referring to a physical path for
+ PMTU discovery? Seems, dst should contain list of all parents...
+ and enter to infinite locking hierarchy disaster.
+ No! It is easier, we will not search for them, let them find us.
+ We add genid to each dst plus pointer to genid of raw IP route,
+ pmtu disc will update pmtu on raw IP route and increase its genid.
+ dst_check() will see this for top level and trigger resyncing
+ metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
+ */
+
+struct xfrm_state_walk {
+ struct list_head all;
+ u8 state;
+ u8 dying;
+ u8 proto;
+ u32 seq;
+ struct xfrm_address_filter *filter;
+};
+
+enum {
+ XFRM_DEV_OFFLOAD_IN = 1,
+ XFRM_DEV_OFFLOAD_OUT,
+};
+
+struct xfrm_dev_offload {
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+ struct net_device *real_dev;
+ unsigned long offload_handle;
+ u8 dir : 2;
+};
+
+struct xfrm_mode {
+ u8 encap;
+ u8 family;
+ u8 flags;
+};
+
+/* Flags for xfrm_mode. */
+enum {
+ XFRM_MODE_FLAG_TUNNEL = 1,
+};
+
+enum xfrm_replay_mode {
+ XFRM_REPLAY_MODE_LEGACY,
+ XFRM_REPLAY_MODE_BMP,
+ XFRM_REPLAY_MODE_ESN,
+};
+
+/* Full description of state of transformer. */
+struct xfrm_state {
+ possible_net_t xs_net;
+ union {
+ struct hlist_node gclist;
+ struct hlist_node bydst;
+ };
+ struct hlist_node bysrc;
+ struct hlist_node byspi;
+ struct hlist_node byseq;
+
+ refcount_t refcnt;
+ spinlock_t lock;
+
+ struct xfrm_id id;
+ struct xfrm_selector sel;
+ struct xfrm_mark mark;
+ u32 if_id;
+ u32 tfcpad;
+
+ u32 genid;
+
+ /* Key manager bits */
+ struct xfrm_state_walk km;
+
+ /* Parameters of this state. */
+ struct {
+ u32 reqid;
+ u8 mode;
+ u8 replay_window;
+ u8 aalgo, ealgo, calgo;
+ u8 flags;
+ u16 family;
+ xfrm_address_t saddr;
+ int header_len;
+ int trailer_len;
+ u32 extra_flags;
+ struct xfrm_mark smark;
+ } props;
+
+ struct xfrm_lifetime_cfg lft;
+
+ /* Data for transformer */
+ struct xfrm_algo_auth *aalg;
+ struct xfrm_algo *ealg;
+ struct xfrm_algo *calg;
+ struct xfrm_algo_aead *aead;
+ const char *geniv;
+
+ /* mapping change rate limiting */
+ __be16 new_mapping_sport;
+ u32 new_mapping; /* seconds */
+ u32 mapping_maxage; /* seconds for input SA */
+
+ /* Data for encapsulator */
+ struct xfrm_encap_tmpl *encap;
+ struct sock __rcu *encap_sk;
+
+ /* Data for care-of address */
+ xfrm_address_t *coaddr;
+
+ /* IPComp needs an IPIP tunnel for handling uncompressed packets */
+ struct xfrm_state *tunnel;
+
+ /* If a tunnel, number of users + 1 */
+ atomic_t tunnel_users;
+
+ /* State for replay detection */
+ struct xfrm_replay_state replay;
+ struct xfrm_replay_state_esn *replay_esn;
+
+ /* Replay detection state at the time we sent the last notification */
+ struct xfrm_replay_state preplay;
+ struct xfrm_replay_state_esn *preplay_esn;
+
+ /* replay detection mode */
+ enum xfrm_replay_mode repl_mode;
+ /* internal flag that only holds state for delayed aevent at the
+ * moment
+ */
+ u32 xflags;
+
+ /* Replay detection notification settings */
+ u32 replay_maxage;
+ u32 replay_maxdiff;
+
+ /* Replay detection notification timer */
+ struct timer_list rtimer;
+
+ /* Statistics */
+ struct xfrm_stats stats;
+
+ struct xfrm_lifetime_cur curlft;
+ struct hrtimer mtimer;
+
+ struct xfrm_dev_offload xso;
+
+ /* used to fix curlft->add_time when changing date */
+ long saved_tmo;
+
+ /* Last used time */
+ time64_t lastused;
+
+ struct page_frag xfrag;
+
+ /* Reference to data common to all the instances of this
+ * transformer. */
+ const struct xfrm_type *type;
+ struct xfrm_mode inner_mode;
+ struct xfrm_mode inner_mode_iaf;
+ struct xfrm_mode outer_mode;
+
+ const struct xfrm_type_offload *type_offload;
+
+ /* Security context */
+ struct xfrm_sec_ctx *security;
+
+ /* Private data of this transformer, format is opaque,
+ * interpreted by xfrm_type methods. */
+ void *data;
+};
+
+static inline struct net *xs_net(struct xfrm_state *x)
+{
+ return read_pnet(&x->xs_net);
+}
+
+/* xflags - make enum if more show up */
+#define XFRM_TIME_DEFER 1
+#define XFRM_SOFT_EXPIRE 2
+
+enum {
+ XFRM_STATE_VOID,
+ XFRM_STATE_ACQ,
+ XFRM_STATE_VALID,
+ XFRM_STATE_ERROR,
+ XFRM_STATE_EXPIRED,
+ XFRM_STATE_DEAD
+};
+
+/* callback structure passed from either netlink or pfkey */
+struct km_event {
+ union {
+ u32 hard;
+ u32 proto;
+ u32 byid;
+ u32 aevent;
+ u32 type;
+ } data;
+
+ u32 seq;
+ u32 portid;
+ u32 event;
+ struct net *net;
+};
+
+struct xfrm_if_decode_session_result {
+ struct net *net;
+ u32 if_id;
+};
+
+struct xfrm_if_cb {
+ bool (*decode_session)(struct sk_buff *skb,
+ unsigned short family,
+ struct xfrm_if_decode_session_result *res);
+};
+
+void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
+void xfrm_if_unregister_cb(void);
+
+struct net_device;
+struct xfrm_type;
+struct xfrm_dst;
+struct xfrm_policy_afinfo {
+ struct dst_ops *dst_ops;
+ struct dst_entry *(*dst_lookup)(struct net *net,
+ int tos, int oif,
+ const xfrm_address_t *saddr,
+ const xfrm_address_t *daddr,
+ u32 mark);
+ int (*get_saddr)(struct net *net, int oif,
+ xfrm_address_t *saddr,
+ xfrm_address_t *daddr,
+ u32 mark);
+ int (*fill_dst)(struct xfrm_dst *xdst,
+ struct net_device *dev,
+ const struct flowi *fl);
+ struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
+};
+
+int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family);
+void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
+void km_policy_notify(struct xfrm_policy *xp, int dir,
+ const struct km_event *c);
+void km_state_notify(struct xfrm_state *x, const struct km_event *c);
+
+struct xfrm_tmpl;
+int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
+ struct xfrm_policy *pol);
+void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
+int __xfrm_state_delete(struct xfrm_state *x);
+
+struct xfrm_state_afinfo {
+ u8 family;
+ u8 proto;
+
+ const struct xfrm_type_offload *type_offload_esp;
+
+ const struct xfrm_type *type_esp;
+ const struct xfrm_type *type_ipip;
+ const struct xfrm_type *type_ipip6;
+ const struct xfrm_type *type_comp;
+ const struct xfrm_type *type_ah;
+ const struct xfrm_type *type_routing;
+ const struct xfrm_type *type_dstopts;
+
+ int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
+ int (*transport_finish)(struct sk_buff *skb,
+ int async);
+ void (*local_error)(struct sk_buff *skb, u32 mtu);
+};
+
+int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
+int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
+struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
+struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
+
+struct xfrm_input_afinfo {
+ u8 family;
+ bool is_ipip;
+ int (*callback)(struct sk_buff *skb, u8 protocol,
+ int err);
+};
+
+int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
+int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
+
+void xfrm_flush_gc(void);
+void xfrm_state_delete_tunnel(struct xfrm_state *x);
+
+struct xfrm_type {
+ struct module *owner;
+ u8 proto;
+ u8 flags;
+#define XFRM_TYPE_NON_FRAGMENT 1
+#define XFRM_TYPE_REPLAY_PROT 2
+#define XFRM_TYPE_LOCAL_COADDR 4
+#define XFRM_TYPE_REMOTE_COADDR 8
+
+ int (*init_state)(struct xfrm_state *x,
+ struct netlink_ext_ack *extack);
+ void (*destructor)(struct xfrm_state *);
+ int (*input)(struct xfrm_state *, struct sk_buff *skb);
+ int (*output)(struct xfrm_state *, struct sk_buff *pskb);
+ int (*reject)(struct xfrm_state *, struct sk_buff *,
+ const struct flowi *);
+};
+
+int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
+void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
+
+struct xfrm_type_offload {
+ struct module *owner;
+ u8 proto;
+ void (*encap)(struct xfrm_state *, struct sk_buff *pskb);
+ int (*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
+ int (*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features);
+};
+
+int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
+void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
+
+static inline int xfrm_af2proto(unsigned int family)
+{
+ switch(family) {
+ case AF_INET:
+ return IPPROTO_IPIP;
+ case AF_INET6:
+ return IPPROTO_IPV6;
+ default:
+ return 0;
+ }
+}
+
+static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
+{
+ if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
+ (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
+ return &x->inner_mode;
+ else
+ return &x->inner_mode_iaf;
+}
+
+struct xfrm_tmpl {
+/* id in template is interpreted as:
+ * daddr - destination of tunnel, may be zero for transport mode.
+ * spi - zero to acquire spi. Not zero if spi is static, then
+ * daddr must be fixed too.
+ * proto - AH/ESP/IPCOMP
+ */
+ struct xfrm_id id;
+
+/* Source address of tunnel. Ignored, if it is not a tunnel. */
+ xfrm_address_t saddr;
+
+ unsigned short encap_family;
+
+ u32 reqid;
+
+/* Mode: transport, tunnel etc. */
+ u8 mode;
+
+/* Sharing mode: unique, this session only, this user only etc. */
+ u8 share;
+
+/* May skip this transfomration if no SA is found */
+ u8 optional;
+
+/* Skip aalgos/ealgos/calgos checks. */
+ u8 allalgs;
+
+/* Bit mask of algos allowed for acquisition */
+ u32 aalgos;
+ u32 ealgos;
+ u32 calgos;
+};
+
+#define XFRM_MAX_DEPTH 6
+#define XFRM_MAX_OFFLOAD_DEPTH 1
+
+struct xfrm_policy_walk_entry {
+ struct list_head all;
+ u8 dead;
+};
+
+struct xfrm_policy_walk {
+ struct xfrm_policy_walk_entry walk;
+ u8 type;
+ u32 seq;
+};
+
+struct xfrm_policy_queue {
+ struct sk_buff_head hold_queue;
+ struct timer_list hold_timer;
+ unsigned long timeout;
+};
+
+struct xfrm_policy {
+ possible_net_t xp_net;
+ struct hlist_node bydst;
+ struct hlist_node byidx;
+
+ /* This lock only affects elements except for entry. */
+ rwlock_t lock;
+ refcount_t refcnt;
+ u32 pos;
+ struct timer_list timer;
+
+ atomic_t genid;
+ u32 priority;
+ u32 index;
+ u32 if_id;
+ struct xfrm_mark mark;
+ struct xfrm_selector selector;
+ struct xfrm_lifetime_cfg lft;
+ struct xfrm_lifetime_cur curlft;
+ struct xfrm_policy_walk_entry walk;
+ struct xfrm_policy_queue polq;
+ bool bydst_reinsert;
+ u8 type;
+ u8 action;
+ u8 flags;
+ u8 xfrm_nr;
+ u16 family;
+ struct xfrm_sec_ctx *security;
+ struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
+ struct hlist_node bydst_inexact_list;
+ struct rcu_head rcu;
+};
+
+static inline struct net *xp_net(const struct xfrm_policy *xp)
+{
+ return read_pnet(&xp->xp_net);
+}
+
+struct xfrm_kmaddress {
+ xfrm_address_t local;
+ xfrm_address_t remote;
+ u32 reserved;
+ u16 family;
+};
+
+struct xfrm_migrate {
+ xfrm_address_t old_daddr;
+ xfrm_address_t old_saddr;
+ xfrm_address_t new_daddr;
+ xfrm_address_t new_saddr;
+ u8 proto;
+ u8 mode;
+ u16 reserved;
+ u32 reqid;
+ u16 old_family;
+ u16 new_family;
+};
+
+#define XFRM_KM_TIMEOUT 30
+/* what happened */
+#define XFRM_REPLAY_UPDATE XFRM_AE_CR
+#define XFRM_REPLAY_TIMEOUT XFRM_AE_CE
+
+/* default aevent timeout in units of 100ms */
+#define XFRM_AE_ETIME 10
+/* Async Event timer multiplier */
+#define XFRM_AE_ETH_M 10
+/* default seq threshold size */
+#define XFRM_AE_SEQT_SIZE 2
+
+struct xfrm_mgr {
+ struct list_head list;
+ int (*notify)(struct xfrm_state *x, const struct km_event *c);
+ int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
+ struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
+ int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
+ int (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
+ int (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
+ int (*migrate)(const struct xfrm_selector *sel,
+ u8 dir, u8 type,
+ const struct xfrm_migrate *m,
+ int num_bundles,
+ const struct xfrm_kmaddress *k,
+ const struct xfrm_encap_tmpl *encap);
+ bool (*is_alive)(const struct km_event *c);
+};
+
+void xfrm_register_km(struct xfrm_mgr *km);
+void xfrm_unregister_km(struct xfrm_mgr *km);
+
+struct xfrm_tunnel_skb_cb {
+ union {
+ struct inet_skb_parm h4;
+ struct inet6_skb_parm h6;
+ } header;
+
+ union {
+ struct ip_tunnel *ip4;
+ struct ip6_tnl *ip6;
+ } tunnel;
+};
+
+#define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
+
+/*
+ * This structure is used for the duration where packets are being
+ * transformed by IPsec. As soon as the packet leaves IPsec the
+ * area beyond the generic IP part may be overwritten.
+ */
+struct xfrm_skb_cb {
+ struct xfrm_tunnel_skb_cb header;
+
+ /* Sequence number for replay protection. */
+ union {
+ struct {
+ __u32 low;
+ __u32 hi;
+ } output;
+ struct {
+ __be32 low;
+ __be32 hi;
+ } input;
+ } seq;
+};
+
+#define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
+
+/*
+ * This structure is used by the afinfo prepare_input/prepare_output functions
+ * to transmit header information to the mode input/output functions.
+ */
+struct xfrm_mode_skb_cb {
+ struct xfrm_tunnel_skb_cb header;
+
+ /* Copied from header for IPv4, always set to zero and DF for IPv6. */
+ __be16 id;
+ __be16 frag_off;
+
+ /* IP header length (excluding options or extension headers). */
+ u8 ihl;
+
+ /* TOS for IPv4, class for IPv6. */
+ u8 tos;
+
+ /* TTL for IPv4, hop limitfor IPv6. */
+ u8 ttl;
+
+ /* Protocol for IPv4, NH for IPv6. */
+ u8 protocol;
+
+ /* Option length for IPv4, zero for IPv6. */
+ u8 optlen;
+
+ /* Used by IPv6 only, zero for IPv4. */
+ u8 flow_lbl[3];
+};
+
+#define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
+
+/*
+ * This structure is used by the input processing to locate the SPI and
+ * related information.
+ */
+struct xfrm_spi_skb_cb {
+ struct xfrm_tunnel_skb_cb header;
+
+ unsigned int daddroff;
+ unsigned int family;
+ __be32 seq;
+};
+
+#define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
+
+#ifdef CONFIG_AUDITSYSCALL
+static inline struct audit_buffer *xfrm_audit_start(const char *op)
+{
+ struct audit_buffer *audit_buf = NULL;
+
+ if (audit_enabled == AUDIT_OFF)
+ return NULL;
+ audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
+ AUDIT_MAC_IPSEC_EVENT);
+ if (audit_buf == NULL)
+ return NULL;
+ audit_log_format(audit_buf, "op=%s", op);
+ return audit_buf;
+}
+
+static inline void xfrm_audit_helper_usrinfo(bool task_valid,
+ struct audit_buffer *audit_buf)
+{
+ const unsigned int auid = from_kuid(&init_user_ns, task_valid ?
+ audit_get_loginuid(current) :
+ INVALID_UID);
+ const unsigned int ses = task_valid ? audit_get_sessionid(current) :
+ AUDIT_SID_UNSET;
+
+ audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
+ audit_log_task_context(audit_buf);
+}
+
+void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid);
+void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
+ bool task_valid);
+void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid);
+void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid);
+void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
+ struct sk_buff *skb);
+void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
+ __be32 net_seq);
+void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
+void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
+ __be32 net_seq);
+void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
+ u8 proto);
+#else
+
+static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
+ bool task_valid)
+{
+}
+
+static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
+ bool task_valid)
+{
+}
+
+static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
+ bool task_valid)
+{
+}
+
+static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
+ bool task_valid)
+{
+}
+
+static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
+ struct sk_buff *skb)
+{
+}
+
+static inline void xfrm_audit_state_replay(struct xfrm_state *x,
+ struct sk_buff *skb, __be32 net_seq)
+{
+}
+
+static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
+ u16 family)
+{
+}
+
+static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
+ __be32 net_spi, __be32 net_seq)
+{
+}
+
+static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
+ struct sk_buff *skb, u8 proto)
+{
+}
+#endif /* CONFIG_AUDITSYSCALL */
+
+static inline void xfrm_pol_hold(struct xfrm_policy *policy)
+{
+ if (likely(policy != NULL))
+ refcount_inc(&policy->refcnt);
+}
+
+void xfrm_policy_destroy(struct xfrm_policy *policy);
+
+static inline void xfrm_pol_put(struct xfrm_policy *policy)
+{
+ if (refcount_dec_and_test(&policy->refcnt))
+ xfrm_policy_destroy(policy);
+}
+
+static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
+{
+ int i;
+ for (i = npols - 1; i >= 0; --i)
+ xfrm_pol_put(pols[i]);
+}
+
+void __xfrm_state_destroy(struct xfrm_state *, bool);
+
+static inline void __xfrm_state_put(struct xfrm_state *x)
+{
+ refcount_dec(&x->refcnt);
+}
+
+static inline void xfrm_state_put(struct xfrm_state *x)
+{
+ if (refcount_dec_and_test(&x->refcnt))
+ __xfrm_state_destroy(x, false);
+}
+
+static inline void xfrm_state_put_sync(struct xfrm_state *x)
+{
+ if (refcount_dec_and_test(&x->refcnt))
+ __xfrm_state_destroy(x, true);
+}
+
+static inline void xfrm_state_hold(struct xfrm_state *x)
+{
+ refcount_inc(&x->refcnt);
+}
+
+static inline bool addr_match(const void *token1, const void *token2,
+ unsigned int prefixlen)
+{
+ const __be32 *a1 = token1;
+ const __be32 *a2 = token2;
+ unsigned int pdw;
+ unsigned int pbi;
+
+ pdw = prefixlen >> 5; /* num of whole u32 in prefix */
+ pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
+
+ if (pdw)
+ if (memcmp(a1, a2, pdw << 2))
+ return false;
+
+ if (pbi) {
+ __be32 mask;
+
+ mask = htonl((0xffffffff) << (32 - pbi));
+
+ if ((a1[pdw] ^ a2[pdw]) & mask)
+ return false;
+ }
+
+ return true;
+}
+
+static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
+{
+ /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
+ if (sizeof(long) == 4 && prefixlen == 0)
+ return true;
+ return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen)));
+}
+
+static __inline__
+__be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
+{
+ __be16 port;
+ switch(fl->flowi_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
+ case IPPROTO_SCTP:
+ port = uli->ports.sport;
+ break;
+ case IPPROTO_ICMP:
+ case IPPROTO_ICMPV6:
+ port = htons(uli->icmpt.type);
+ break;
+ case IPPROTO_MH:
+ port = htons(uli->mht.type);
+ break;
+ case IPPROTO_GRE:
+ port = htons(ntohl(uli->gre_key) >> 16);
+ break;
+ default:
+ port = 0; /*XXX*/
+ }
+ return port;
+}
+
+static __inline__
+__be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
+{
+ __be16 port;
+ switch(fl->flowi_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
+ case IPPROTO_SCTP:
+ port = uli->ports.dport;
+ break;
+ case IPPROTO_ICMP:
+ case IPPROTO_ICMPV6:
+ port = htons(uli->icmpt.code);
+ break;
+ case IPPROTO_GRE:
+ port = htons(ntohl(uli->gre_key) & 0xffff);
+ break;
+ default:
+ port = 0; /*XXX*/
+ }
+ return port;
+}
+
+bool xfrm_selector_match(const struct xfrm_selector *sel,
+ const struct flowi *fl, unsigned short family);
+
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+/* If neither has a context --> match
+ * Otherwise, both must have a context and the sids, doi, alg must match
+ */
+static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
+{
+ return ((!s1 && !s2) ||
+ (s1 && s2 &&
+ (s1->ctx_sid == s2->ctx_sid) &&
+ (s1->ctx_doi == s2->ctx_doi) &&
+ (s1->ctx_alg == s2->ctx_alg)));
+}
+#else
+static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
+{
+ return true;
+}
+#endif
+
+/* A struct encoding bundle of transformations to apply to some set of flow.
+ *
+ * xdst->child points to the next element of bundle.
+ * dst->xfrm points to an instanse of transformer.
+ *
+ * Due to unfortunate limitations of current routing cache, which we
+ * have no time to fix, it mirrors struct rtable and bound to the same
+ * routing key, including saddr,daddr. However, we can have many of
+ * bundles differing by session id. All the bundles grow from a parent
+ * policy rule.
+ */
+struct xfrm_dst {
+ union {
+ struct dst_entry dst;
+ struct rtable rt;
+ struct rt6_info rt6;
+ } u;
+ struct dst_entry *route;
+ struct dst_entry *child;
+ struct dst_entry *path;
+ struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
+ int num_pols, num_xfrms;
+ u32 xfrm_genid;
+ u32 policy_genid;
+ u32 route_mtu_cached;
+ u32 child_mtu_cached;
+ u32 route_cookie;
+ u32 path_cookie;
+};
+
+static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
+{
+#ifdef CONFIG_XFRM
+ if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
+ const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
+
+ return xdst->path;
+ }
+#endif
+ return (struct dst_entry *) dst;
+}
+
+static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
+{
+#ifdef CONFIG_XFRM
+ if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
+ struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
+ return xdst->child;
+ }
+#endif
+ return NULL;
+}
+
+#ifdef CONFIG_XFRM
+static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
+{
+ xdst->child = child;
+}
+
+static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
+{
+ xfrm_pols_put(xdst->pols, xdst->num_pols);
+ dst_release(xdst->route);
+ if (likely(xdst->u.dst.xfrm))
+ xfrm_state_put(xdst->u.dst.xfrm);
+}
+#endif
+
+void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
+
+struct xfrm_if_parms {
+ int link; /* ifindex of underlying L2 interface */
+ u32 if_id; /* interface identifyer */
+ bool collect_md;
+};
+
+struct xfrm_if {
+ struct xfrm_if __rcu *next; /* next interface in list */
+ struct net_device *dev; /* virtual device associated with interface */
+ struct net *net; /* netns for packet i/o */
+ struct xfrm_if_parms p; /* interface parms */
+
+ struct gro_cells gro_cells;
+};
+
+struct xfrm_offload {
+ /* Output sequence number for replay protection on offloading. */
+ struct {
+ __u32 low;
+ __u32 hi;
+ } seq;
+
+ __u32 flags;
+#define SA_DELETE_REQ 1
+#define CRYPTO_DONE 2
+#define CRYPTO_NEXT_DONE 4
+#define CRYPTO_FALLBACK 8
+#define XFRM_GSO_SEGMENT 16
+#define XFRM_GRO 32
+/* 64 is free */
+#define XFRM_DEV_RESUME 128
+#define XFRM_XMIT 256
+
+ __u32 status;
+#define CRYPTO_SUCCESS 1
+#define CRYPTO_GENERIC_ERROR 2
+#define CRYPTO_TRANSPORT_AH_AUTH_FAILED 4
+#define CRYPTO_TRANSPORT_ESP_AUTH_FAILED 8
+#define CRYPTO_TUNNEL_AH_AUTH_FAILED 16
+#define CRYPTO_TUNNEL_ESP_AUTH_FAILED 32
+#define CRYPTO_INVALID_PACKET_SYNTAX 64
+#define CRYPTO_INVALID_PROTOCOL 128
+
+ __u8 proto;
+ __u8 inner_ipproto;
+};
+
+struct sec_path {
+ int len;
+ int olen;
+ int verified_cnt;
+
+ struct xfrm_state *xvec[XFRM_MAX_DEPTH];
+ struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH];
+};
+
+struct sec_path *secpath_set(struct sk_buff *skb);
+
+static inline void
+secpath_reset(struct sk_buff *skb)
+{
+#ifdef CONFIG_XFRM
+ skb_ext_del(skb, SKB_EXT_SEC_PATH);
+#endif
+}
+
+static inline int
+xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
+{
+ switch (family) {
+ case AF_INET:
+ return addr->a4 == 0;
+ case AF_INET6:
+ return ipv6_addr_any(&addr->in6);
+ }
+ return 0;
+}
+
+static inline int
+__xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
+{
+ return (tmpl->saddr.a4 &&
+ tmpl->saddr.a4 != x->props.saddr.a4);
+}
+
+static inline int
+__xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
+{
+ return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
+ !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
+}
+
+static inline int
+xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
+{
+ switch (family) {
+ case AF_INET:
+ return __xfrm4_state_addr_cmp(tmpl, x);
+ case AF_INET6:
+ return __xfrm6_state_addr_cmp(tmpl, x);
+ }
+ return !0;
+}
+
+#ifdef CONFIG_XFRM
+int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
+ unsigned short family);
+
+static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
+ int dir)
+{
+ if (!net->xfrm.policy_count[dir] && !secpath_exists(skb))
+ return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT;
+
+ return false;
+}
+
+static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
+ int dir, unsigned short family)
+{
+ if (dir != XFRM_POLICY_OUT && family == AF_INET) {
+ /* same dst may be used for traffic originating from
+ * devices with different policy settings.
+ */
+ return IPCB(skb)->flags & IPSKB_NOPOLICY;
+ }
+ return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
+}
+
+static inline int __xfrm_policy_check2(struct sock *sk, int dir,
+ struct sk_buff *skb,
+ unsigned int family, int reverse)
+{
+ struct net *net = dev_net(skb->dev);
+ int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
+
+ if (sk && sk->sk_policy[XFRM_POLICY_IN])
+ return __xfrm_policy_check(sk, ndir, skb, family);
+
+ return __xfrm_check_nopolicy(net, skb, dir) ||
+ __xfrm_check_dev_nopolicy(skb, dir, family) ||
+ __xfrm_policy_check(sk, ndir, skb, family);
+}
+
+static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
+{
+ return __xfrm_policy_check2(sk, dir, skb, family, 0);
+}
+
+static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
+{
+ return xfrm_policy_check(sk, dir, skb, AF_INET);
+}
+
+static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
+{
+ return xfrm_policy_check(sk, dir, skb, AF_INET6);
+}
+
+static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
+ struct sk_buff *skb)
+{
+ return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
+}
+
+static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
+ struct sk_buff *skb)
+{
+ return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
+}
+
+int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
+ unsigned int family, int reverse);
+
+static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
+ unsigned int family)
+{
+ return __xfrm_decode_session(skb, fl, family, 0);
+}
+
+static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
+ struct flowi *fl,
+ unsigned int family)
+{
+ return __xfrm_decode_session(skb, fl, family, 1);
+}
+
+int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
+
+static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
+{
+ struct net *net = dev_net(skb->dev);
+
+ if (!net->xfrm.policy_count[XFRM_POLICY_OUT] &&
+ net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT)
+ return true;
+
+ return (skb_dst(skb)->flags & DST_NOXFRM) ||
+ __xfrm_route_forward(skb, family);
+}
+
+static inline int xfrm4_route_forward(struct sk_buff *skb)
+{
+ return xfrm_route_forward(skb, AF_INET);
+}
+
+static inline int xfrm6_route_forward(struct sk_buff *skb)
+{
+ return xfrm_route_forward(skb, AF_INET6);
+}
+
+int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
+
+static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
+{
+ if (!sk_fullsock(osk))
+ return 0;
+ sk->sk_policy[0] = NULL;
+ sk->sk_policy[1] = NULL;
+ if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
+ return __xfrm_sk_clone_policy(sk, osk);
+ return 0;
+}
+
+int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
+
+static inline void xfrm_sk_free_policy(struct sock *sk)
+{
+ struct xfrm_policy *pol;
+
+ pol = rcu_dereference_protected(sk->sk_policy[0], 1);
+ if (unlikely(pol != NULL)) {
+ xfrm_policy_delete(pol, XFRM_POLICY_MAX);
+ sk->sk_policy[0] = NULL;
+ }
+ pol = rcu_dereference_protected(sk->sk_policy[1], 1);
+ if (unlikely(pol != NULL)) {
+ xfrm_policy_delete(pol, XFRM_POLICY_MAX+1);
+ sk->sk_policy[1] = NULL;
+ }
+}
+
+#else
+
+static inline void xfrm_sk_free_policy(struct sock *sk) {}
+static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
+static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
+static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
+static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
+{
+ return 1;
+}
+static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
+{
+ return 1;
+}
+static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
+{
+ return 1;
+}
+static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
+ struct flowi *fl,
+ unsigned int family)
+{
+ return -ENOSYS;
+}
+static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
+ struct sk_buff *skb)
+{
+ return 1;
+}
+static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
+ struct sk_buff *skb)
+{
+ return 1;
+}
+#endif
+
+static __inline__
+xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
+{
+ switch (family){
+ case AF_INET:
+ return (xfrm_address_t *)&fl->u.ip4.daddr;
+ case AF_INET6:
+ return (xfrm_address_t *)&fl->u.ip6.daddr;
+ }
+ return NULL;
+}
+
+static __inline__
+xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
+{
+ switch (family){
+ case AF_INET:
+ return (xfrm_address_t *)&fl->u.ip4.saddr;
+ case AF_INET6:
+ return (xfrm_address_t *)&fl->u.ip6.saddr;
+ }
+ return NULL;
+}
+
+static __inline__
+void xfrm_flowi_addr_get(const struct flowi *fl,
+ xfrm_address_t *saddr, xfrm_address_t *daddr,
+ unsigned short family)
+{
+ switch(family) {
+ case AF_INET:
+ memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
+ memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
+ break;
+ case AF_INET6:
+ saddr->in6 = fl->u.ip6.saddr;
+ daddr->in6 = fl->u.ip6.daddr;
+ break;
+ }
+}
+
+static __inline__ int
+__xfrm4_state_addr_check(const struct xfrm_state *x,
+ const xfrm_address_t *daddr, const xfrm_address_t *saddr)
+{
+ if (daddr->a4 == x->id.daddr.a4 &&
+ (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
+ return 1;
+ return 0;
+}
+
+static __inline__ int
+__xfrm6_state_addr_check(const struct xfrm_state *x,
+ const xfrm_address_t *daddr, const xfrm_address_t *saddr)
+{
+ if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
+ (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
+ ipv6_addr_any((struct in6_addr *)saddr) ||
+ ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
+ return 1;
+ return 0;
+}
+
+static __inline__ int
+xfrm_state_addr_check(const struct xfrm_state *x,
+ const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ unsigned short family)
+{
+ switch (family) {
+ case AF_INET:
+ return __xfrm4_state_addr_check(x, daddr, saddr);
+ case AF_INET6:
+ return __xfrm6_state_addr_check(x, daddr, saddr);
+ }
+ return 0;
+}
+
+static __inline__ int
+xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
+ unsigned short family)
+{
+ switch (family) {
+ case AF_INET:
+ return __xfrm4_state_addr_check(x,
+ (const xfrm_address_t *)&fl->u.ip4.daddr,
+ (const xfrm_address_t *)&fl->u.ip4.saddr);
+ case AF_INET6:
+ return __xfrm6_state_addr_check(x,
+ (const xfrm_address_t *)&fl->u.ip6.daddr,
+ (const xfrm_address_t *)&fl->u.ip6.saddr);
+ }
+ return 0;
+}
+
+static inline int xfrm_state_kern(const struct xfrm_state *x)
+{
+ return atomic_read(&x->tunnel_users);
+}
+
+static inline bool xfrm_id_proto_valid(u8 proto)
+{
+ switch (proto) {
+ case IPPROTO_AH:
+ case IPPROTO_ESP:
+ case IPPROTO_COMP:
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ROUTING:
+ case IPPROTO_DSTOPTS:
+#endif
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
+static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
+{
+ return (!userproto || proto == userproto ||
+ (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
+ proto == IPPROTO_ESP ||
+ proto == IPPROTO_COMP)));
+}
+
+/*
+ * xfrm algorithm information
+ */
+struct xfrm_algo_aead_info {
+ char *geniv;
+ u16 icv_truncbits;
+};
+
+struct xfrm_algo_auth_info {
+ u16 icv_truncbits;
+ u16 icv_fullbits;
+};
+
+struct xfrm_algo_encr_info {
+ char *geniv;
+ u16 blockbits;
+ u16 defkeybits;
+};
+
+struct xfrm_algo_comp_info {
+ u16 threshold;
+};
+
+struct xfrm_algo_desc {
+ char *name;
+ char *compat;
+ u8 available:1;
+ u8 pfkey_supported:1;
+ union {
+ struct xfrm_algo_aead_info aead;
+ struct xfrm_algo_auth_info auth;
+ struct xfrm_algo_encr_info encr;
+ struct xfrm_algo_comp_info comp;
+ } uinfo;
+ struct sadb_alg desc;
+};
+
+/* XFRM protocol handlers. */
+struct xfrm4_protocol {
+ int (*handler)(struct sk_buff *skb);
+ int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type);
+ int (*cb_handler)(struct sk_buff *skb, int err);
+ int (*err_handler)(struct sk_buff *skb, u32 info);
+
+ struct xfrm4_protocol __rcu *next;
+ int priority;
+};
+
+struct xfrm6_protocol {
+ int (*handler)(struct sk_buff *skb);
+ int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type);
+ int (*cb_handler)(struct sk_buff *skb, int err);
+ int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, __be32 info);
+
+ struct xfrm6_protocol __rcu *next;
+ int priority;
+};
+
+/* XFRM tunnel handlers. */
+struct xfrm_tunnel {
+ int (*handler)(struct sk_buff *skb);
+ int (*cb_handler)(struct sk_buff *skb, int err);
+ int (*err_handler)(struct sk_buff *skb, u32 info);
+
+ struct xfrm_tunnel __rcu *next;
+ int priority;
+};
+
+struct xfrm6_tunnel {
+ int (*handler)(struct sk_buff *skb);
+ int (*cb_handler)(struct sk_buff *skb, int err);
+ int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, __be32 info);
+ struct xfrm6_tunnel __rcu *next;
+ int priority;
+};
+
+void xfrm_init(void);
+void xfrm4_init(void);
+int xfrm_state_init(struct net *net);
+void xfrm_state_fini(struct net *net);
+void xfrm4_state_init(void);
+void xfrm4_protocol_init(void);
+#ifdef CONFIG_XFRM
+int xfrm6_init(void);
+void xfrm6_fini(void);
+int xfrm6_state_init(void);
+void xfrm6_state_fini(void);
+int xfrm6_protocol_init(void);
+void xfrm6_protocol_fini(void);
+#else
+static inline int xfrm6_init(void)
+{
+ return 0;
+}
+static inline void xfrm6_fini(void)
+{
+ ;
+}
+#endif
+
+#ifdef CONFIG_XFRM_STATISTICS
+int xfrm_proc_init(struct net *net);
+void xfrm_proc_fini(struct net *net);
+#endif
+
+int xfrm_sysctl_init(struct net *net);
+#ifdef CONFIG_SYSCTL
+void xfrm_sysctl_fini(struct net *net);
+#else
+static inline void xfrm_sysctl_fini(struct net *net)
+{
+}
+#endif
+
+void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
+ struct xfrm_address_filter *filter);
+int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
+ int (*func)(struct xfrm_state *, int, void*), void *);
+void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
+struct xfrm_state *xfrm_state_alloc(struct net *net);
+void xfrm_state_free(struct xfrm_state *x);
+struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
+ const struct flowi *fl,
+ struct xfrm_tmpl *tmpl,
+ struct xfrm_policy *pol, int *err,
+ unsigned short family, u32 if_id);
+struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
+ xfrm_address_t *daddr,
+ xfrm_address_t *saddr,
+ unsigned short family,
+ u8 mode, u8 proto, u32 reqid);
+struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
+ unsigned short family);
+int xfrm_state_check_expire(struct xfrm_state *x);
+void xfrm_state_insert(struct xfrm_state *x);
+int xfrm_state_add(struct xfrm_state *x);
+int xfrm_state_update(struct xfrm_state *x);
+struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
+ const xfrm_address_t *daddr, __be32 spi,
+ u8 proto, unsigned short family);
+struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+ const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
+ u8 proto,
+ unsigned short family);
+#ifdef CONFIG_XFRM_SUB_POLICY
+void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
+ unsigned short family);
+void xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
+ unsigned short family);
+#else
+static inline void xfrm_tmpl_sort(struct xfrm_tmpl **d, struct xfrm_tmpl **s,
+ int n, unsigned short family)
+{
+}
+
+static inline void xfrm_state_sort(struct xfrm_state **d, struct xfrm_state **s,
+ int n, unsigned short family)
+{
+}
+#endif
+
+struct xfrmk_sadinfo {
+ u32 sadhcnt; /* current hash bkts */
+ u32 sadhmcnt; /* max allowed hash bkts */
+ u32 sadcnt; /* current running count */
+};
+
+struct xfrmk_spdinfo {
+ u32 incnt;
+ u32 outcnt;
+ u32 fwdcnt;
+ u32 inscnt;
+ u32 outscnt;
+ u32 fwdscnt;
+ u32 spdhcnt;
+ u32 spdhmcnt;
+};
+
+struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
+int xfrm_state_delete(struct xfrm_state *x);
+int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
+int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
+void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
+void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
+u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
+int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack);
+u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
+int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
+ struct netlink_ext_ack *extack);
+int xfrm_init_state(struct xfrm_state *x);
+int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
+int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
+int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
+ int (*finish)(struct net *, struct sock *,
+ struct sk_buff *));
+int xfrm_trans_queue(struct sk_buff *skb,
+ int (*finish)(struct net *, struct sock *,
+ struct sk_buff *));
+int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
+int xfrm_output(struct sock *sk, struct sk_buff *skb);
+
+#if IS_ENABLED(CONFIG_NET_PKTGEN)
+int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
+#endif
+
+void xfrm_local_error(struct sk_buff *skb, int mtu);
+int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type);
+int xfrm4_transport_finish(struct sk_buff *skb, int async);
+int xfrm4_rcv(struct sk_buff *skb);
+
+static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
+{
+ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+ XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+ return xfrm_input(skb, nexthdr, spi, 0);
+}
+
+int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
+int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
+int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
+int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
+int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
+void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
+ struct ip6_tnl *t);
+int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type);
+int xfrm6_transport_finish(struct sk_buff *skb, int async);
+int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
+int xfrm6_rcv(struct sk_buff *skb);
+int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
+ xfrm_address_t *saddr, u8 proto);
+void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
+int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
+int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
+int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
+__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
+__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
+int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
+
+#ifdef CONFIG_XFRM
+void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
+int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
+int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
+int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval,
+ int optlen);
+#else
+static inline int xfrm_user_policy(struct sock *sk, int optname,
+ sockptr_t optval, int optlen)
+{
+ return -ENOPROTOOPT;
+}
+#endif
+
+struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
+ const xfrm_address_t *saddr,
+ const xfrm_address_t *daddr,
+ int family, u32 mark);
+
+struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
+
+void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
+int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
+ int (*func)(struct xfrm_policy *, int, int, void*),
+ void *);
+void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
+int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
+struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
+ const struct xfrm_mark *mark,
+ u32 if_id, u8 type, int dir,
+ struct xfrm_selector *sel,
+ struct xfrm_sec_ctx *ctx, int delete,
+ int *err);
+struct xfrm_policy *xfrm_policy_byid(struct net *net,
+ const struct xfrm_mark *mark, u32 if_id,
+ u8 type, int dir, u32 id, int delete,
+ int *err);
+int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
+void xfrm_policy_hash_rebuild(struct net *net);
+u32 xfrm_get_acqseq(void);
+int verify_spi_info(u8 proto, u32 min, u32 max);
+int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
+struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
+ u8 mode, u32 reqid, u32 if_id, u8 proto,
+ const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr, int create,
+ unsigned short family);
+int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
+
+#ifdef CONFIG_XFRM_MIGRATE
+int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ const struct xfrm_migrate *m, int num_bundles,
+ const struct xfrm_kmaddress *k,
+ const struct xfrm_encap_tmpl *encap);
+struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
+ u32 if_id);
+struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
+ struct xfrm_migrate *m,
+ struct xfrm_encap_tmpl *encap);
+int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ struct xfrm_migrate *m, int num_bundles,
+ struct xfrm_kmaddress *k, struct net *net,
+ struct xfrm_encap_tmpl *encap, u32 if_id);
+#endif
+
+int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
+void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
+int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
+ xfrm_address_t *addr);
+
+void xfrm_input_init(void);
+int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
+
+void xfrm_probe_algs(void);
+int xfrm_count_pfkey_auth_supported(void);
+int xfrm_count_pfkey_enc_supported(void);
+struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
+struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
+struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
+ int probe);
+
+static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
+ const xfrm_address_t *b)
+{
+ return ipv6_addr_equal((const struct in6_addr *)a,
+ (const struct in6_addr *)b);
+}
+
+static inline bool xfrm_addr_equal(const xfrm_address_t *a,
+ const xfrm_address_t *b,
+ sa_family_t family)
+{
+ switch (family) {
+ default:
+ case AF_INET:
+ return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0;
+ case AF_INET6:
+ return xfrm6_addr_equal(a, b);
+ }
+}
+
+static inline int xfrm_policy_id2dir(u32 index)
+{
+ return index & 7;
+}
+
+#ifdef CONFIG_XFRM
+void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq);
+int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
+void xfrm_replay_notify(struct xfrm_state *x, int event);
+int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
+
+static inline int xfrm_aevent_is_on(struct net *net)
+{
+ struct sock *nlsk;
+ int ret = 0;
+
+ rcu_read_lock();
+ nlsk = rcu_dereference(net->xfrm.nlsk);
+ if (nlsk)
+ ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
+ rcu_read_unlock();
+ return ret;
+}
+
+static inline int xfrm_acquire_is_on(struct net *net)
+{
+ struct sock *nlsk;
+ int ret = 0;
+
+ rcu_read_lock();
+ nlsk = rcu_dereference(net->xfrm.nlsk);
+ if (nlsk)
+ ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
+ rcu_read_unlock();
+
+ return ret;
+}
+#endif
+
+static inline unsigned int aead_len(struct xfrm_algo_aead *alg)
+{
+ return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
+}
+
+static inline unsigned int xfrm_alg_len(const struct xfrm_algo *alg)
+{
+ return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
+}
+
+static inline unsigned int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
+{
+ return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
+}
+
+static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
+{
+ return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
+}
+
+#ifdef CONFIG_XFRM_MIGRATE
+static inline int xfrm_replay_clone(struct xfrm_state *x,
+ struct xfrm_state *orig)
+{
+
+ x->replay_esn = kmemdup(orig->replay_esn,
+ xfrm_replay_state_esn_len(orig->replay_esn),
+ GFP_KERNEL);
+ if (!x->replay_esn)
+ return -ENOMEM;
+ x->preplay_esn = kmemdup(orig->preplay_esn,
+ xfrm_replay_state_esn_len(orig->preplay_esn),
+ GFP_KERNEL);
+ if (!x->preplay_esn)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
+{
+ return kmemdup(orig, aead_len(orig), GFP_KERNEL);
+}
+
+
+static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
+{
+ return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
+}
+
+static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
+{
+ return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
+}
+
+static inline void xfrm_states_put(struct xfrm_state **states, int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ xfrm_state_put(*(states + i));
+}
+
+static inline void xfrm_states_delete(struct xfrm_state **states, int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ xfrm_state_delete(*(states + i));
+}
+#endif
+
+#ifdef CONFIG_XFRM
+static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
+{
+ struct sec_path *sp = skb_sec_path(skb);
+
+ return sp->xvec[sp->len - 1];
+}
+#endif
+
+static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
+{
+#ifdef CONFIG_XFRM
+ struct sec_path *sp = skb_sec_path(skb);
+
+ if (!sp || !sp->olen || sp->len != sp->olen)
+ return NULL;
+
+ return &sp->ovec[sp->olen - 1];
+#else
+ return NULL;
+#endif
+}
+
+void __init xfrm_dev_init(void);
+
+#ifdef CONFIG_XFRM_OFFLOAD
+void xfrm_dev_resume(struct sk_buff *skb);
+void xfrm_dev_backlog(struct softnet_data *sd);
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
+int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
+ struct xfrm_user_offload *xuo,
+ struct netlink_ext_ack *extack);
+bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+
+static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
+{
+ struct xfrm_dev_offload *xso = &x->xso;
+
+ if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn)
+ xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
+}
+
+static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
+{
+ struct xfrm_state *x = dst->xfrm;
+ struct xfrm_dst *xdst;
+
+ if (!x || !x->type_offload)
+ return false;
+
+ xdst = (struct xfrm_dst *) dst;
+ if (!x->xso.offload_handle && !xdst->child->xfrm)
+ return true;
+ if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
+ !xdst->child->xfrm)
+ return true;
+
+ return false;
+}
+
+static inline void xfrm_dev_state_delete(struct xfrm_state *x)
+{
+ struct xfrm_dev_offload *xso = &x->xso;
+
+ if (xso->dev)
+ xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
+}
+
+static inline void xfrm_dev_state_free(struct xfrm_state *x)
+{
+ struct xfrm_dev_offload *xso = &x->xso;
+ struct net_device *dev = xso->dev;
+
+ if (dev && dev->xfrmdev_ops) {
+ if (dev->xfrmdev_ops->xdo_dev_state_free)
+ dev->xfrmdev_ops->xdo_dev_state_free(x);
+ xso->dev = NULL;
+ netdev_put(dev, &xso->dev_tracker);
+ }
+}
+#else
+static inline void xfrm_dev_resume(struct sk_buff *skb)
+{
+}
+
+static inline void xfrm_dev_backlog(struct softnet_data *sd)
+{
+}
+
+static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
+{
+ return skb;
+}
+
+static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo, struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static inline void xfrm_dev_state_delete(struct xfrm_state *x)
+{
+}
+
+static inline void xfrm_dev_state_free(struct xfrm_state *x)
+{
+}
+
+static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ return false;
+}
+
+static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
+{
+}
+
+static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
+{
+ return false;
+}
+#endif
+
+static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
+{
+ if (attrs[XFRMA_MARK])
+ memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
+ else
+ m->v = m->m = 0;
+
+ return m->v & m->m;
+}
+
+static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
+{
+ int ret = 0;
+
+ if (m->m | m->v)
+ ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
+ return ret;
+}
+
+static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
+{
+ struct xfrm_mark *m = &x->props.smark;
+
+ return (m->v & m->m) | (mark & ~m->m);
+}
+
+static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
+{
+ int ret = 0;
+
+ if (if_id)
+ ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
+ return ret;
+}
+
+static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
+ unsigned int family)
+{
+ bool tunnel = false;
+
+ switch(family) {
+ case AF_INET:
+ if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
+ tunnel = true;
+ break;
+ case AF_INET6:
+ if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
+ tunnel = true;
+ break;
+ }
+ if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL))
+ return -EINVAL;
+
+ return 0;
+}
+
+extern const int xfrm_msg_min[XFRM_NR_MSGTYPES];
+extern const struct nla_policy xfrma_policy[XFRMA_MAX+1];
+
+struct xfrm_translator {
+ /* Allocate frag_list and put compat translation there */
+ int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src);
+
+ /* Allocate nlmsg with 64-bit translaton of received 32-bit message */
+ struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh,
+ int maxtype, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack);
+
+ /* Translate 32-bit user_policy from sockptr */
+ int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen);
+
+ struct module *owner;
+};
+
+#if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
+extern int xfrm_register_translator(struct xfrm_translator *xtr);
+extern int xfrm_unregister_translator(struct xfrm_translator *xtr);
+extern struct xfrm_translator *xfrm_get_translator(void);
+extern void xfrm_put_translator(struct xfrm_translator *xtr);
+#else
+static inline struct xfrm_translator *xfrm_get_translator(void)
+{
+ return NULL;
+}
+static inline void xfrm_put_translator(struct xfrm_translator *xtr)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline bool xfrm6_local_dontfrag(const struct sock *sk)
+{
+ int proto;
+
+ if (!sk || sk->sk_family != AF_INET6)
+ return false;
+
+ proto = sk->sk_protocol;
+ if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
+ return inet6_sk(sk)->dontfrag;
+
+ return false;
+}
+#endif
+#endif /* _NET_XFRM_H */
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
new file mode 100644
index 000000000..996eaf1ef
--- /dev/null
+++ b/include/net/xsk_buff_pool.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 Intel Corporation. */
+
+#ifndef XSK_BUFF_POOL_H_
+#define XSK_BUFF_POOL_H_
+
+#include <linux/if_xdp.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/bpf.h>
+#include <net/xdp.h>
+
+struct xsk_buff_pool;
+struct xdp_rxq_info;
+struct xsk_queue;
+struct xdp_desc;
+struct xdp_umem;
+struct xdp_sock;
+struct device;
+struct page;
+
+struct xdp_buff_xsk {
+ struct xdp_buff xdp;
+ dma_addr_t dma;
+ dma_addr_t frame_dma;
+ struct xsk_buff_pool *pool;
+ u64 orig_addr;
+ struct list_head free_list_node;
+};
+
+struct xsk_dma_map {
+ dma_addr_t *dma_pages;
+ struct device *dev;
+ struct net_device *netdev;
+ refcount_t users;
+ struct list_head list; /* Protected by the RTNL_LOCK */
+ u32 dma_pages_cnt;
+ bool dma_need_sync;
+};
+
+struct xsk_buff_pool {
+ /* Members only used in the control path first. */
+ struct device *dev;
+ struct net_device *netdev;
+ struct list_head xsk_tx_list;
+ /* Protects modifications to the xsk_tx_list */
+ spinlock_t xsk_tx_list_lock;
+ refcount_t users;
+ struct xdp_umem *umem;
+ struct work_struct work;
+ struct list_head free_list;
+ u32 heads_cnt;
+ u16 queue_id;
+
+ /* Data path members as close to free_heads at the end as possible. */
+ struct xsk_queue *fq ____cacheline_aligned_in_smp;
+ struct xsk_queue *cq;
+ /* For performance reasons, each buff pool has its own array of dma_pages
+ * even when they are identical.
+ */
+ dma_addr_t *dma_pages;
+ struct xdp_buff_xsk *heads;
+ struct xdp_desc *tx_descs;
+ u64 chunk_mask;
+ u64 addrs_cnt;
+ u32 free_list_cnt;
+ u32 dma_pages_cnt;
+ u32 free_heads_cnt;
+ u32 headroom;
+ u32 chunk_size;
+ u32 chunk_shift;
+ u32 frame_len;
+ u8 cached_need_wakeup;
+ bool uses_need_wakeup;
+ bool dma_need_sync;
+ bool unaligned;
+ void *addrs;
+ /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
+ * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
+ * sockets share a single cq when the same netdev and queue id is shared.
+ */
+ spinlock_t cq_lock;
+ struct xdp_buff_xsk *free_heads[];
+};
+
+/* Masks for xdp_umem_page flags.
+ * The low 12-bits of the addr will be 0 since this is the page address, so we
+ * can use them for flags.
+ */
+#define XSK_NEXT_PG_CONTIG_SHIFT 0
+#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
+
+/* AF_XDP core. */
+struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
+ struct xdp_umem *umem);
+int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
+ u16 queue_id, u16 flags);
+int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
+ struct net_device *dev, u16 queue_id);
+int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
+void xp_destroy(struct xsk_buff_pool *pool);
+void xp_get_pool(struct xsk_buff_pool *pool);
+bool xp_put_pool(struct xsk_buff_pool *pool);
+void xp_clear_dev(struct xsk_buff_pool *pool);
+void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
+void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
+
+/* AF_XDP, and XDP core. */
+void xp_free(struct xdp_buff_xsk *xskb);
+
+static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
+ u64 addr)
+{
+ xskb->orig_addr = addr;
+ xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
+}
+
+static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
+ dma_addr_t *dma_pages, u64 addr)
+{
+ xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
+ (addr & ~PAGE_MASK);
+ xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
+}
+
+/* AF_XDP ZC drivers, via xdp_sock_buff.h */
+void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
+int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
+ unsigned long attrs, struct page **pages, u32 nr_pages);
+void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
+struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
+u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
+bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
+void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
+dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
+static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
+{
+ return xskb->dma;
+}
+
+static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
+{
+ return xskb->frame_dma;
+}
+
+void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
+static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
+{
+ xp_dma_sync_for_cpu_slow(xskb);
+}
+
+void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
+ size_t size);
+static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
+ dma_addr_t dma, size_t size)
+{
+ if (!pool->dma_need_sync)
+ return;
+
+ xp_dma_sync_for_device_slow(pool, dma, size);
+}
+
+/* Masks for xdp_umem_page flags.
+ * The low 12-bits of the addr will be 0 since this is the page address, so we
+ * can use them for flags.
+ */
+#define XSK_NEXT_PG_CONTIG_SHIFT 0
+#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
+
+static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
+ u64 addr, u32 len)
+{
+ bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
+
+ if (likely(!cross_pg))
+ return false;
+
+ return pool->dma_pages_cnt &&
+ !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
+}
+
+static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
+{
+ return addr & pool->chunk_mask;
+}
+
+static inline u64 xp_unaligned_extract_addr(u64 addr)
+{
+ return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
+}
+
+static inline u64 xp_unaligned_extract_offset(u64 addr)
+{
+ return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
+}
+
+static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
+{
+ return xp_unaligned_extract_addr(addr) +
+ xp_unaligned_extract_offset(addr);
+}
+
+static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
+{
+ return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
+}
+
+static inline void xp_release(struct xdp_buff_xsk *xskb)
+{
+ if (xskb->pool->unaligned)
+ xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
+}
+
+static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
+{
+ u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
+
+ offset += xskb->pool->headroom;
+ if (!xskb->pool->unaligned)
+ return xskb->orig_addr + offset;
+ return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
+}
+
+#endif /* XSK_BUFF_POOL_H_ */