diff options
Diffstat (limited to '')
82 files changed, 81350 insertions, 0 deletions
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig new file mode 100644 index 0000000000..51ec8256b7 --- /dev/null +++ b/net/mac80211/Kconfig @@ -0,0 +1,306 @@ +# SPDX-License-Identifier: GPL-2.0-only +config MAC80211 + tristate "Generic IEEE 802.11 Networking Stack (mac80211)" + depends on CFG80211 + select CRYPTO + select CRYPTO_LIB_ARC4 + select CRYPTO_AES + select CRYPTO_CCM + select CRYPTO_GCM + select CRYPTO_CMAC + select CRC32 + help + This option enables the hardware independent IEEE 802.11 + networking stack. + +comment "CFG80211 needs to be enabled for MAC80211" + depends on CFG80211=n + +if MAC80211 != n + +config MAC80211_HAS_RC + bool + +config MAC80211_RC_MINSTREL + bool "Minstrel" if EXPERT + select MAC80211_HAS_RC + default y + help + This option enables the 'minstrel' TX rate control algorithm + +choice + prompt "Default rate control algorithm" + depends on MAC80211_HAS_RC + default MAC80211_RC_DEFAULT_MINSTREL + help + This option selects the default rate control algorithm + mac80211 will use. Note that this default can still be + overridden through the ieee80211_default_rc_algo module + parameter if different algorithms are available. + +config MAC80211_RC_DEFAULT_MINSTREL + bool "Minstrel" + depends on MAC80211_RC_MINSTREL + help + Select Minstrel as the default rate control algorithm. + + +endchoice + +config MAC80211_RC_DEFAULT + string + default "minstrel_ht" if MAC80211_RC_DEFAULT_MINSTREL + default "" + +endif + +comment "Some wireless drivers require a rate control algorithm" + depends on MAC80211 && MAC80211_HAS_RC=n + +config MAC80211_MESH + bool "Enable mac80211 mesh networking support" + depends on MAC80211 + help + Select this option to enable 802.11 mesh operation in mac80211 + drivers that support it. 802.11 mesh connects multiple stations + over (possibly multi-hop) wireless links to form a single logical + LAN. + +config MAC80211_LEDS + bool "Enable LED triggers" + depends on MAC80211 + depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211 + select LEDS_TRIGGERS + help + This option enables a few LED triggers for different + packet receive/transmit events. + +config MAC80211_DEBUGFS + bool "Export mac80211 internals in DebugFS" + depends on MAC80211 && DEBUG_FS + help + Select this to see extensive information about + the internal state of mac80211 in debugfs. + + Say N unless you know you need this. + +config MAC80211_MESSAGE_TRACING + bool "Trace all mac80211 debug messages" + depends on MAC80211 + help + Select this option to have mac80211 register the + mac80211_msg trace subsystem with tracepoints to + collect all debugging messages, independent of + printing them into the kernel log. + + The overhead in this option is that all the messages + need to be present in the binary and formatted at + runtime for tracing. + +menuconfig MAC80211_DEBUG_MENU + bool "Select mac80211 debugging features" + depends on MAC80211 + help + This option collects various mac80211 debug settings. + +config MAC80211_NOINLINE + bool "Do not inline TX/RX handlers" + depends on MAC80211_DEBUG_MENU + help + This option affects code generation in mac80211, when + selected some functions are marked "noinline" to allow + easier debugging of problems in the transmit and receive + paths. + + This option increases code size a bit and inserts a lot + of function calls in the code, but is otherwise safe to + enable. + + If unsure, say N unless you expect to be finding problems + in mac80211. + +config MAC80211_VERBOSE_DEBUG + bool "Verbose debugging output" + depends on MAC80211_DEBUG_MENU + help + Selecting this option causes mac80211 to print out + many debugging messages. It should not be selected + on production systems as some of the messages are + remotely triggerable. + + Do not select this option. + +config MAC80211_MLME_DEBUG + bool "Verbose managed MLME output" + depends on MAC80211_DEBUG_MENU + help + Selecting this option causes mac80211 to print out + debugging messages for the managed-mode MLME. It + should not be selected on production systems as some + of the messages are remotely triggerable. + + Do not select this option. + +config MAC80211_STA_DEBUG + bool "Verbose station debugging" + depends on MAC80211_DEBUG_MENU + help + Selecting this option causes mac80211 to print out + debugging messages for station addition/removal. + + Do not select this option. + +config MAC80211_HT_DEBUG + bool "Verbose HT debugging" + depends on MAC80211_DEBUG_MENU + help + This option enables 802.11n High Throughput features + debug tracing output. + + It should not be selected on production systems as some + of the messages are remotely triggerable. + + Do not select this option. + +config MAC80211_OCB_DEBUG + bool "Verbose OCB debugging" + depends on MAC80211_DEBUG_MENU + help + Selecting this option causes mac80211 to print out + very verbose OCB debugging messages. It should not + be selected on production systems as those messages + are remotely triggerable. + + Do not select this option. + +config MAC80211_IBSS_DEBUG + bool "Verbose IBSS debugging" + depends on MAC80211_DEBUG_MENU + help + Selecting this option causes mac80211 to print out + very verbose IBSS debugging messages. It should not + be selected on production systems as those messages + are remotely triggerable. + + Do not select this option. + +config MAC80211_PS_DEBUG + bool "Verbose powersave mode debugging" + depends on MAC80211_DEBUG_MENU + help + Selecting this option causes mac80211 to print out very + verbose power save mode debugging messages (when mac80211 + is an AP and has power saving stations.) + It should not be selected on production systems as those + messages are remotely triggerable. + + Do not select this option. + +config MAC80211_MPL_DEBUG + bool "Verbose mesh peer link debugging" + depends on MAC80211_DEBUG_MENU + depends on MAC80211_MESH + help + Selecting this option causes mac80211 to print out very + verbose mesh peer link debugging messages (when mac80211 + is taking part in a mesh network). + It should not be selected on production systems as those + messages are remotely triggerable. + + Do not select this option. + +config MAC80211_MPATH_DEBUG + bool "Verbose mesh path debugging" + depends on MAC80211_DEBUG_MENU + depends on MAC80211_MESH + help + Selecting this option causes mac80211 to print out very + verbose mesh path selection debugging messages (when mac80211 + is taking part in a mesh network). + It should not be selected on production systems as those + messages are remotely triggerable. + + Do not select this option. + +config MAC80211_MHWMP_DEBUG + bool "Verbose mesh HWMP routing debugging" + depends on MAC80211_DEBUG_MENU + depends on MAC80211_MESH + help + Selecting this option causes mac80211 to print out very + verbose mesh routing (HWMP) debugging messages (when mac80211 + is taking part in a mesh network). + It should not be selected on production systems as those + messages are remotely triggerable. + + Do not select this option. + +config MAC80211_MESH_SYNC_DEBUG + bool "Verbose mesh synchronization debugging" + depends on MAC80211_DEBUG_MENU + depends on MAC80211_MESH + help + Selecting this option causes mac80211 to print out very verbose mesh + synchronization debugging messages (when mac80211 is taking part in a + mesh network). + + Do not select this option. + +config MAC80211_MESH_CSA_DEBUG + bool "Verbose mesh channel switch debugging" + depends on MAC80211_DEBUG_MENU + depends on MAC80211_MESH + help + Selecting this option causes mac80211 to print out very verbose mesh + channel switch debugging messages (when mac80211 is taking part in a + mesh network). + + Do not select this option. + +config MAC80211_MESH_PS_DEBUG + bool "Verbose mesh powersave debugging" + depends on MAC80211_DEBUG_MENU + depends on MAC80211_MESH + help + Selecting this option causes mac80211 to print out very verbose mesh + powersave debugging messages (when mac80211 is taking part in a + mesh network). + + Do not select this option. + +config MAC80211_TDLS_DEBUG + bool "Verbose TDLS debugging" + depends on MAC80211_DEBUG_MENU + help + Selecting this option causes mac80211 to print out very + verbose TDLS selection debugging messages (when mac80211 + is a TDLS STA). + It should not be selected on production systems as those + messages are remotely triggerable. + + Do not select this option. + +config MAC80211_DEBUG_COUNTERS + bool "Extra statistics for TX/RX debugging" + depends on MAC80211_DEBUG_MENU + depends on MAC80211_DEBUGFS + help + Selecting this option causes mac80211 to keep additional + and very verbose statistics about TX and RX handler use + as well as a few selected dot11 counters. These will be + exposed in debugfs. + + Note that some of the counters are not concurrency safe + and may thus not always be accurate. + + If unsure, say N. + +config MAC80211_STA_HASH_MAX_SIZE + int "Station hash table maximum size" if MAC80211_DEBUG_MENU + default 0 + help + Setting this option to a low value (e.g. 4) allows testing the + hash table with collisions relatively deterministically (just + connect more stations than the number selected here.) + + If unsure, leave the default of 0. diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile new file mode 100644 index 0000000000..b8de44da1f --- /dev/null +++ b/net/mac80211/Makefile @@ -0,0 +1,68 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_MAC80211) += mac80211.o + +# mac80211 objects +mac80211-y := \ + main.o status.o \ + driver-ops.o \ + sta_info.o \ + wep.o \ + aead_api.o \ + wpa.o \ + scan.o offchannel.o \ + ht.o agg-tx.o agg-rx.o \ + vht.o \ + he.o \ + s1g.o \ + ibss.o \ + iface.o \ + link.o \ + rate.o \ + michael.o \ + tkip.o \ + aes_cmac.o \ + aes_gmac.o \ + fils_aead.o \ + cfg.o \ + ethtool.o \ + rx.o \ + spectmgmt.o \ + tx.o \ + key.o \ + util.o \ + wme.o \ + chan.o \ + trace.o mlme.o \ + tdls.o \ + ocb.o \ + airtime.o \ + eht.o + +mac80211-$(CONFIG_MAC80211_LEDS) += led.o +mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ + debugfs.o \ + debugfs_sta.o \ + debugfs_netdev.o \ + debugfs_key.o + +mac80211-$(CONFIG_MAC80211_MESH) += \ + mesh.o \ + mesh_pathtbl.o \ + mesh_plink.o \ + mesh_hwmp.o \ + mesh_sync.o \ + mesh_ps.o + +mac80211-$(CONFIG_PM) += pm.o + +CFLAGS_trace.o := -I$(src) + +rc80211_minstrel-y := \ + rc80211_minstrel_ht.o + +rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += \ + rc80211_minstrel_ht_debugfs.o + +mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y) + +ccflags-y += -DDEBUG diff --git a/net/mac80211/aead_api.c b/net/mac80211/aead_api.c new file mode 100644 index 0000000000..b00d6f5b33 --- /dev/null +++ b/net/mac80211/aead_api.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2003-2004, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2014-2015, Qualcomm Atheros, Inc. + * + * Rewrite: Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org> + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/err.h> +#include <linux/scatterlist.h> +#include <crypto/aead.h> + +#include "aead_api.h" + +int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len, + u8 *data, size_t data_len, u8 *mic) +{ + size_t mic_len = crypto_aead_authsize(tfm); + struct scatterlist sg[3]; + struct aead_request *aead_req; + int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm); + u8 *__aad; + int ret; + + aead_req = kzalloc(reqsize + aad_len, GFP_ATOMIC); + if (!aead_req) + return -ENOMEM; + + __aad = (u8 *)aead_req + reqsize; + memcpy(__aad, aad, aad_len); + + sg_init_table(sg, 3); + sg_set_buf(&sg[0], __aad, aad_len); + sg_set_buf(&sg[1], data, data_len); + sg_set_buf(&sg[2], mic, mic_len); + + aead_request_set_tfm(aead_req, tfm); + aead_request_set_crypt(aead_req, sg, sg, data_len, b_0); + aead_request_set_ad(aead_req, sg[0].length); + + ret = crypto_aead_encrypt(aead_req); + kfree_sensitive(aead_req); + + return ret; +} + +int aead_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len, + u8 *data, size_t data_len, u8 *mic) +{ + size_t mic_len = crypto_aead_authsize(tfm); + struct scatterlist sg[3]; + struct aead_request *aead_req; + int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm); + u8 *__aad; + int err; + + if (data_len == 0) + return -EINVAL; + + aead_req = kzalloc(reqsize + aad_len, GFP_ATOMIC); + if (!aead_req) + return -ENOMEM; + + __aad = (u8 *)aead_req + reqsize; + memcpy(__aad, aad, aad_len); + + sg_init_table(sg, 3); + sg_set_buf(&sg[0], __aad, aad_len); + sg_set_buf(&sg[1], data, data_len); + sg_set_buf(&sg[2], mic, mic_len); + + aead_request_set_tfm(aead_req, tfm); + aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0); + aead_request_set_ad(aead_req, sg[0].length); + + err = crypto_aead_decrypt(aead_req); + kfree_sensitive(aead_req); + + return err; +} + +struct crypto_aead * +aead_key_setup_encrypt(const char *alg, const u8 key[], + size_t key_len, size_t mic_len) +{ + struct crypto_aead *tfm; + int err; + + tfm = crypto_alloc_aead(alg, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + return tfm; + + err = crypto_aead_setkey(tfm, key, key_len); + if (err) + goto free_aead; + err = crypto_aead_setauthsize(tfm, mic_len); + if (err) + goto free_aead; + + return tfm; + +free_aead: + crypto_free_aead(tfm); + return ERR_PTR(err); +} + +void aead_key_free(struct crypto_aead *tfm) +{ + crypto_free_aead(tfm); +} diff --git a/net/mac80211/aead_api.h b/net/mac80211/aead_api.h new file mode 100644 index 0000000000..7d463b8092 --- /dev/null +++ b/net/mac80211/aead_api.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _AEAD_API_H +#define _AEAD_API_H + +#include <crypto/aead.h> +#include <linux/crypto.h> + +struct crypto_aead * +aead_key_setup_encrypt(const char *alg, const u8 key[], + size_t key_len, size_t mic_len); + +int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, + size_t aad_len, u8 *data, + size_t data_len, u8 *mic); + +int aead_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, + size_t aad_len, u8 *data, + size_t data_len, u8 *mic); + +void aead_key_free(struct crypto_aead *tfm); + +#endif /* _AEAD_API_H */ diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h new file mode 100644 index 0000000000..96256193cf --- /dev/null +++ b/net/mac80211/aes_ccm.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2003-2004, Instant802 Networks, Inc. + * Copyright 2006, Devicescape Software, Inc. + */ + +#ifndef AES_CCM_H +#define AES_CCM_H + +#include "aead_api.h" + +#define CCM_AAD_LEN 32 + +static inline struct crypto_aead * +ieee80211_aes_key_setup_encrypt(const u8 key[], size_t key_len, size_t mic_len) +{ + return aead_key_setup_encrypt("ccm(aes)", key, key_len, mic_len); +} + +static inline int +ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, + u8 *b_0, u8 *aad, u8 *data, + size_t data_len, u8 *mic) +{ + return aead_encrypt(tfm, b_0, aad + 2, + be16_to_cpup((__be16 *)aad), + data, data_len, mic); +} + +static inline int +ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, + u8 *b_0, u8 *aad, u8 *data, + size_t data_len, u8 *mic) +{ + return aead_decrypt(tfm, b_0, aad + 2, + be16_to_cpup((__be16 *)aad), + data, data_len, mic); +} + +static inline void ieee80211_aes_key_free(struct crypto_aead *tfm) +{ + return aead_key_free(tfm); +} + +#endif /* AES_CCM_H */ diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c new file mode 100644 index 0000000000..48c04f89de --- /dev/null +++ b/net/mac80211/aes_cmac.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AES-128-CMAC with TLen 16 for IEEE 802.11w BIP + * Copyright 2008, Jouni Malinen <j@w1.fi> + * Copyright (C) 2020 Intel Corporation + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/crypto.h> +#include <linux/export.h> +#include <linux/err.h> +#include <crypto/aes.h> + +#include <net/mac80211.h> +#include "key.h" +#include "aes_cmac.h" + +#define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */ +#define CMAC_TLEN_256 16 /* CMAC TLen = 128 bits (16 octets) */ +#define AAD_LEN 20 + +static const u8 zero[CMAC_TLEN_256]; + +void ieee80211_aes_cmac(struct crypto_shash *tfm, const u8 *aad, + const u8 *data, size_t data_len, u8 *mic) +{ + SHASH_DESC_ON_STACK(desc, tfm); + u8 out[AES_BLOCK_SIZE]; + const __le16 *fc; + + desc->tfm = tfm; + + crypto_shash_init(desc); + crypto_shash_update(desc, aad, AAD_LEN); + fc = (const __le16 *)aad; + if (ieee80211_is_beacon(*fc)) { + /* mask Timestamp field to zero */ + crypto_shash_update(desc, zero, 8); + crypto_shash_update(desc, data + 8, data_len - 8 - CMAC_TLEN); + } else { + crypto_shash_update(desc, data, data_len - CMAC_TLEN); + } + crypto_shash_finup(desc, zero, CMAC_TLEN, out); + + memcpy(mic, out, CMAC_TLEN); +} + +void ieee80211_aes_cmac_256(struct crypto_shash *tfm, const u8 *aad, + const u8 *data, size_t data_len, u8 *mic) +{ + SHASH_DESC_ON_STACK(desc, tfm); + const __le16 *fc; + + desc->tfm = tfm; + + crypto_shash_init(desc); + crypto_shash_update(desc, aad, AAD_LEN); + fc = (const __le16 *)aad; + if (ieee80211_is_beacon(*fc)) { + /* mask Timestamp field to zero */ + crypto_shash_update(desc, zero, 8); + crypto_shash_update(desc, data + 8, + data_len - 8 - CMAC_TLEN_256); + } else { + crypto_shash_update(desc, data, data_len - CMAC_TLEN_256); + } + crypto_shash_finup(desc, zero, CMAC_TLEN_256, mic); +} + +struct crypto_shash *ieee80211_aes_cmac_key_setup(const u8 key[], + size_t key_len) +{ + struct crypto_shash *tfm; + + tfm = crypto_alloc_shash("cmac(aes)", 0, 0); + if (!IS_ERR(tfm)) { + int err = crypto_shash_setkey(tfm, key, key_len); + + if (err) { + crypto_free_shash(tfm); + return ERR_PTR(err); + } + } + + return tfm; +} + +void ieee80211_aes_cmac_key_free(struct crypto_shash *tfm) +{ + crypto_free_shash(tfm); +} diff --git a/net/mac80211/aes_cmac.h b/net/mac80211/aes_cmac.h new file mode 100644 index 0000000000..76817446fb --- /dev/null +++ b/net/mac80211/aes_cmac.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008, Jouni Malinen <j@w1.fi> + */ + +#ifndef AES_CMAC_H +#define AES_CMAC_H + +#include <linux/crypto.h> +#include <crypto/hash.h> + +struct crypto_shash *ieee80211_aes_cmac_key_setup(const u8 key[], + size_t key_len); +void ieee80211_aes_cmac(struct crypto_shash *tfm, const u8 *aad, + const u8 *data, size_t data_len, u8 *mic); +void ieee80211_aes_cmac_256(struct crypto_shash *tfm, const u8 *aad, + const u8 *data, size_t data_len, u8 *mic); +void ieee80211_aes_cmac_key_free(struct crypto_shash *tfm); + +#endif /* AES_CMAC_H */ diff --git a/net/mac80211/aes_gcm.h b/net/mac80211/aes_gcm.h new file mode 100644 index 0000000000..b14093b2f7 --- /dev/null +++ b/net/mac80211/aes_gcm.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2014-2015, Qualcomm Atheros, Inc. + */ + +#ifndef AES_GCM_H +#define AES_GCM_H + +#include "aead_api.h" + +#define GCM_AAD_LEN 32 + +static inline int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, + u8 *j_0, u8 *aad, u8 *data, + size_t data_len, u8 *mic) +{ + return aead_encrypt(tfm, j_0, aad + 2, + be16_to_cpup((__be16 *)aad), + data, data_len, mic); +} + +static inline int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, + u8 *j_0, u8 *aad, u8 *data, + size_t data_len, u8 *mic) +{ + return aead_decrypt(tfm, j_0, aad + 2, + be16_to_cpup((__be16 *)aad), + data, data_len, mic); +} + +static inline struct crypto_aead * +ieee80211_aes_gcm_key_setup_encrypt(const u8 key[], size_t key_len) +{ + return aead_key_setup_encrypt("gcm(aes)", key, + key_len, IEEE80211_GCMP_MIC_LEN); +} + +static inline void ieee80211_aes_gcm_key_free(struct crypto_aead *tfm) +{ + return aead_key_free(tfm); +} + +#endif /* AES_GCM_H */ diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c new file mode 100644 index 0000000000..512cab073f --- /dev/null +++ b/net/mac80211/aes_gmac.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AES-GMAC for IEEE 802.11 BIP-GMAC-128 and BIP-GMAC-256 + * Copyright 2015, Qualcomm Atheros, Inc. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/err.h> +#include <crypto/aead.h> +#include <crypto/aes.h> + +#include <net/mac80211.h> +#include "key.h" +#include "aes_gmac.h" + +int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, + const u8 *data, size_t data_len, u8 *mic) +{ + struct scatterlist sg[5]; + u8 *zero, *__aad, iv[AES_BLOCK_SIZE]; + struct aead_request *aead_req; + int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm); + const __le16 *fc; + int ret; + + if (data_len < GMAC_MIC_LEN) + return -EINVAL; + + aead_req = kzalloc(reqsize + GMAC_MIC_LEN + GMAC_AAD_LEN, GFP_ATOMIC); + if (!aead_req) + return -ENOMEM; + + zero = (u8 *)aead_req + reqsize; + __aad = zero + GMAC_MIC_LEN; + memcpy(__aad, aad, GMAC_AAD_LEN); + + fc = (const __le16 *)aad; + if (ieee80211_is_beacon(*fc)) { + /* mask Timestamp field to zero */ + sg_init_table(sg, 5); + sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN); + sg_set_buf(&sg[1], zero, 8); + sg_set_buf(&sg[2], data + 8, data_len - 8 - GMAC_MIC_LEN); + sg_set_buf(&sg[3], zero, GMAC_MIC_LEN); + sg_set_buf(&sg[4], mic, GMAC_MIC_LEN); + } else { + sg_init_table(sg, 4); + sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN); + sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN); + sg_set_buf(&sg[2], zero, GMAC_MIC_LEN); + sg_set_buf(&sg[3], mic, GMAC_MIC_LEN); + } + + memcpy(iv, nonce, GMAC_NONCE_LEN); + memset(iv + GMAC_NONCE_LEN, 0, sizeof(iv) - GMAC_NONCE_LEN); + iv[AES_BLOCK_SIZE - 1] = 0x01; + + aead_request_set_tfm(aead_req, tfm); + aead_request_set_crypt(aead_req, sg, sg, 0, iv); + aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len); + + ret = crypto_aead_encrypt(aead_req); + kfree_sensitive(aead_req); + + return ret; +} + +struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[], + size_t key_len) +{ + struct crypto_aead *tfm; + int err; + + tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + return tfm; + + err = crypto_aead_setkey(tfm, key, key_len); + if (!err) + err = crypto_aead_setauthsize(tfm, GMAC_MIC_LEN); + if (!err) + return tfm; + + crypto_free_aead(tfm); + return ERR_PTR(err); +} + +void ieee80211_aes_gmac_key_free(struct crypto_aead *tfm) +{ + crypto_free_aead(tfm); +} diff --git a/net/mac80211/aes_gmac.h b/net/mac80211/aes_gmac.h new file mode 100644 index 0000000000..c739356bae --- /dev/null +++ b/net/mac80211/aes_gmac.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2015, Qualcomm Atheros, Inc. + */ + +#ifndef AES_GMAC_H +#define AES_GMAC_H + +#include <linux/crypto.h> + +#define GMAC_AAD_LEN 20 +#define GMAC_MIC_LEN 16 +#define GMAC_NONCE_LEN 12 + +struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[], + size_t key_len); +int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, + const u8 *data, size_t data_len, u8 *mic); +void ieee80211_aes_gmac_key_free(struct crypto_aead *tfm); + +#endif /* AES_GMAC_H */ diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c new file mode 100644 index 0000000000..c6fa532304 --- /dev/null +++ b/net/mac80211/agg-rx.c @@ -0,0 +1,543 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HT handling + * + * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2007, Michael Wu <flamingice@sourmilk.net> + * Copyright 2007-2010, Intel Corporation + * Copyright(c) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2022 Intel Corporation + */ + +/** + * DOC: RX A-MPDU aggregation + * + * Aggregation on the RX side requires only implementing the + * @ampdu_action callback that is invoked to start/stop any + * block-ack sessions for RX aggregation. + * + * When RX aggregation is started by the peer, the driver is + * notified via @ampdu_action function, with the + * %IEEE80211_AMPDU_RX_START action, and may reject the request + * in which case a negative response is sent to the peer, if it + * accepts it a positive response is sent. + * + * While the session is active, the device/driver are required + * to de-aggregate frames and pass them up one by one to mac80211, + * which will handle the reorder buffer. + * + * When the aggregation session is stopped again by the peer or + * ourselves, the driver's @ampdu_action function will be called + * with the action %IEEE80211_AMPDU_RX_STOP. In this case, the + * call must not fail. + */ + +#include <linux/ieee80211.h> +#include <linux/slab.h> +#include <linux/export.h> +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "driver-ops.h" + +static void ieee80211_free_tid_rx(struct rcu_head *h) +{ + struct tid_ampdu_rx *tid_rx = + container_of(h, struct tid_ampdu_rx, rcu_head); + int i; + + for (i = 0; i < tid_rx->buf_size; i++) + __skb_queue_purge(&tid_rx->reorder_buf[i]); + kfree(tid_rx->reorder_buf); + kfree(tid_rx->reorder_time); + kfree(tid_rx); +} + +void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, + u16 initiator, u16 reason, bool tx) +{ + struct ieee80211_local *local = sta->local; + struct tid_ampdu_rx *tid_rx; + struct ieee80211_ampdu_params params = { + .sta = &sta->sta, + .action = IEEE80211_AMPDU_RX_STOP, + .tid = tid, + .amsdu = false, + .timeout = 0, + .ssn = 0, + }; + + lockdep_assert_held(&sta->ampdu_mlme.mtx); + + tid_rx = rcu_dereference_protected(sta->ampdu_mlme.tid_rx[tid], + lockdep_is_held(&sta->ampdu_mlme.mtx)); + + if (!test_bit(tid, sta->ampdu_mlme.agg_session_valid)) + return; + + RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL); + __clear_bit(tid, sta->ampdu_mlme.agg_session_valid); + + ht_dbg(sta->sdata, + "Rx BA session stop requested for %pM tid %u %s reason: %d\n", + sta->sta.addr, tid, + initiator == WLAN_BACK_RECIPIENT ? "recipient" : "initiator", + (int)reason); + + if (drv_ampdu_action(local, sta->sdata, ¶ms)) + sdata_info(sta->sdata, + "HW problem - can not stop rx aggregation for %pM tid %d\n", + sta->sta.addr, tid); + + /* check if this is a self generated aggregation halt */ + if (initiator == WLAN_BACK_RECIPIENT && tx) + ieee80211_send_delba(sta->sdata, sta->sta.addr, + tid, WLAN_BACK_RECIPIENT, reason); + + /* + * return here in case tid_rx is not assigned - which will happen if + * IEEE80211_HW_SUPPORTS_REORDERING_BUFFER is set. + */ + if (!tid_rx) + return; + + del_timer_sync(&tid_rx->session_timer); + + /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */ + spin_lock_bh(&tid_rx->reorder_lock); + tid_rx->removed = true; + spin_unlock_bh(&tid_rx->reorder_lock); + del_timer_sync(&tid_rx->reorder_timer); + + call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); +} + +void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, + u16 initiator, u16 reason, bool tx) +{ + mutex_lock(&sta->ampdu_mlme.mtx); + ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, tx); + mutex_unlock(&sta->ampdu_mlme.mtx); +} + +void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap, + const u8 *addr) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct sta_info *sta; + int i; + + rcu_read_lock(); + sta = sta_info_get_bss(sdata, addr); + if (!sta) { + rcu_read_unlock(); + return; + } + + for (i = 0; i < IEEE80211_NUM_TIDS; i++) + if (ba_rx_bitmap & BIT(i)) + set_bit(i, sta->ampdu_mlme.tid_rx_stop_requested); + + ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); + rcu_read_unlock(); +} +EXPORT_SYMBOL(ieee80211_stop_rx_ba_session); + +/* + * After accepting the AddBA Request we activated a timer, + * resetting it after each frame that arrives from the originator. + */ +static void sta_rx_agg_session_timer_expired(struct timer_list *t) +{ + struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, session_timer); + struct sta_info *sta = tid_rx->sta; + u8 tid = tid_rx->tid; + unsigned long timeout; + + timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout); + if (time_is_after_jiffies(timeout)) { + mod_timer(&tid_rx->session_timer, timeout); + return; + } + + ht_dbg(sta->sdata, "RX session timer expired on %pM tid %d\n", + sta->sta.addr, tid); + + set_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired); + ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); +} + +static void sta_rx_agg_reorder_timer_expired(struct timer_list *t) +{ + struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, reorder_timer); + + rcu_read_lock(); + ieee80211_release_reorder_timeout(tid_rx->sta, tid_rx->tid); + rcu_read_unlock(); +} + +static void ieee80211_add_addbaext(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, + const struct ieee80211_addba_ext_ie *req, + u16 buf_size) +{ + struct ieee80211_addba_ext_ie *resp; + u8 *pos; + + pos = skb_put_zero(skb, 2 + sizeof(struct ieee80211_addba_ext_ie)); + *pos++ = WLAN_EID_ADDBA_EXT; + *pos++ = sizeof(struct ieee80211_addba_ext_ie); + resp = (struct ieee80211_addba_ext_ie *)pos; + resp->data = req->data & IEEE80211_ADDBA_EXT_NO_FRAG; + + resp->data |= u8_encode_bits(buf_size >> IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT, + IEEE80211_ADDBA_EXT_BUF_SIZE_MASK); +} + +static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid, + u8 dialog_token, u16 status, u16 policy, + u16 buf_size, u16 timeout, + const struct ieee80211_addba_ext_ie *addbaext) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + bool amsdu = ieee80211_hw_check(&local->hw, SUPPORTS_AMSDU_IN_AMPDU); + u16 capab; + + skb = dev_alloc_skb(sizeof(*mgmt) + + 2 + sizeof(struct ieee80211_addba_ext_ie) + + local->hw.extra_tx_headroom); + if (!skb) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom); + mgmt = skb_put_zero(skb, 24); + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + if (sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + sdata->vif.type == NL80211_IFTYPE_MESH_POINT) + memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); + else if (sdata->vif.type == NL80211_IFTYPE_STATION) + memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); + else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) + memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN); + + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + + skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp)); + mgmt->u.action.category = WLAN_CATEGORY_BACK; + mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; + mgmt->u.action.u.addba_resp.dialog_token = dialog_token; + + capab = u16_encode_bits(amsdu, IEEE80211_ADDBA_PARAM_AMSDU_MASK); + capab |= u16_encode_bits(policy, IEEE80211_ADDBA_PARAM_POLICY_MASK); + capab |= u16_encode_bits(tid, IEEE80211_ADDBA_PARAM_TID_MASK); + capab |= u16_encode_bits(buf_size, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK); + + mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab); + mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); + mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); + + if (sta->sta.deflink.he_cap.has_he && addbaext) + ieee80211_add_addbaext(sdata, skb, addbaext, buf_size); + + ieee80211_tx_skb(sdata, skb); +} + +void ___ieee80211_start_rx_ba_session(struct sta_info *sta, + u8 dialog_token, u16 timeout, + u16 start_seq_num, u16 ba_policy, u16 tid, + u16 buf_size, bool tx, bool auto_seq, + const struct ieee80211_addba_ext_ie *addbaext) +{ + struct ieee80211_local *local = sta->sdata->local; + struct tid_ampdu_rx *tid_agg_rx; + struct ieee80211_ampdu_params params = { + .sta = &sta->sta, + .action = IEEE80211_AMPDU_RX_START, + .tid = tid, + .amsdu = false, + .timeout = timeout, + .ssn = start_seq_num, + }; + int i, ret = -EOPNOTSUPP; + u16 status = WLAN_STATUS_REQUEST_DECLINED; + u16 max_buf_size; + + if (tid >= IEEE80211_FIRST_TSPEC_TSID) { + ht_dbg(sta->sdata, + "STA %pM requests BA session on unsupported tid %d\n", + sta->sta.addr, tid); + goto end; + } + + if (!sta->sta.deflink.ht_cap.ht_supported && + !sta->sta.deflink.he_cap.has_he) { + ht_dbg(sta->sdata, + "STA %pM erroneously requests BA session on tid %d w/o HT\n", + sta->sta.addr, tid); + /* send a response anyway, it's an error case if we get here */ + goto end; + } + + if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { + ht_dbg(sta->sdata, + "Suspend in progress - Denying ADDBA request (%pM tid %d)\n", + sta->sta.addr, tid); + goto end; + } + + if (sta->sta.deflink.eht_cap.has_eht) + max_buf_size = IEEE80211_MAX_AMPDU_BUF_EHT; + else if (sta->sta.deflink.he_cap.has_he) + max_buf_size = IEEE80211_MAX_AMPDU_BUF_HE; + else + max_buf_size = IEEE80211_MAX_AMPDU_BUF_HT; + + /* sanity check for incoming parameters: + * check if configuration can support the BA policy + * and if buffer size does not exceeds max value */ + /* XXX: check own ht delayed BA capability?? */ + if (((ba_policy != 1) && + (!(sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || + (buf_size > max_buf_size)) { + status = WLAN_STATUS_INVALID_QOS_PARAM; + ht_dbg_ratelimited(sta->sdata, + "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n", + sta->sta.addr, tid, ba_policy, buf_size); + goto end; + } + /* determine default buffer size */ + if (buf_size == 0) + buf_size = max_buf_size; + + /* make sure the size doesn't exceed the maximum supported by the hw */ + if (buf_size > sta->sta.max_rx_aggregation_subframes) + buf_size = sta->sta.max_rx_aggregation_subframes; + params.buf_size = buf_size; + + ht_dbg(sta->sdata, "AddBA Req buf_size=%d for %pM\n", + buf_size, sta->sta.addr); + + /* examine state machine */ + lockdep_assert_held(&sta->ampdu_mlme.mtx); + + if (test_bit(tid, sta->ampdu_mlme.agg_session_valid)) { + if (sta->ampdu_mlme.tid_rx_token[tid] == dialog_token) { + struct tid_ampdu_rx *tid_rx; + + ht_dbg_ratelimited(sta->sdata, + "updated AddBA Req from %pM on tid %u\n", + sta->sta.addr, tid); + /* We have no API to update the timeout value in the + * driver so reject the timeout update if the timeout + * changed. If it did not change, i.e., no real update, + * just reply with success. + */ + rcu_read_lock(); + tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); + if (tid_rx && tid_rx->timeout == timeout) + status = WLAN_STATUS_SUCCESS; + else + status = WLAN_STATUS_REQUEST_DECLINED; + rcu_read_unlock(); + goto end; + } + + ht_dbg_ratelimited(sta->sdata, + "unexpected AddBA Req from %pM on tid %u\n", + sta->sta.addr, tid); + + /* delete existing Rx BA session on the same tid */ + ___ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, + WLAN_STATUS_UNSPECIFIED_QOS, + false); + } + + if (ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) { + ret = drv_ampdu_action(local, sta->sdata, ¶ms); + ht_dbg(sta->sdata, + "Rx A-MPDU request on %pM tid %d result %d\n", + sta->sta.addr, tid, ret); + if (!ret) + status = WLAN_STATUS_SUCCESS; + goto end; + } + + /* prepare A-MPDU MLME for Rx aggregation */ + tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL); + if (!tid_agg_rx) + goto end; + + spin_lock_init(&tid_agg_rx->reorder_lock); + + /* rx timer */ + timer_setup(&tid_agg_rx->session_timer, + sta_rx_agg_session_timer_expired, TIMER_DEFERRABLE); + + /* rx reorder timer */ + timer_setup(&tid_agg_rx->reorder_timer, + sta_rx_agg_reorder_timer_expired, 0); + + /* prepare reordering buffer */ + tid_agg_rx->reorder_buf = + kcalloc(buf_size, sizeof(struct sk_buff_head), GFP_KERNEL); + tid_agg_rx->reorder_time = + kcalloc(buf_size, sizeof(unsigned long), GFP_KERNEL); + if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) { + kfree(tid_agg_rx->reorder_buf); + kfree(tid_agg_rx->reorder_time); + kfree(tid_agg_rx); + goto end; + } + + for (i = 0; i < buf_size; i++) + __skb_queue_head_init(&tid_agg_rx->reorder_buf[i]); + + ret = drv_ampdu_action(local, sta->sdata, ¶ms); + ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n", + sta->sta.addr, tid, ret); + if (ret) { + kfree(tid_agg_rx->reorder_buf); + kfree(tid_agg_rx->reorder_time); + kfree(tid_agg_rx); + goto end; + } + + /* update data */ + tid_agg_rx->ssn = start_seq_num; + tid_agg_rx->head_seq_num = start_seq_num; + tid_agg_rx->buf_size = buf_size; + tid_agg_rx->timeout = timeout; + tid_agg_rx->stored_mpdu_num = 0; + tid_agg_rx->auto_seq = auto_seq; + tid_agg_rx->started = false; + tid_agg_rx->reorder_buf_filtered = 0; + tid_agg_rx->tid = tid; + tid_agg_rx->sta = sta; + status = WLAN_STATUS_SUCCESS; + + /* activate it for RX */ + rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx); + + if (timeout) { + mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout)); + tid_agg_rx->last_rx = jiffies; + } + +end: + if (status == WLAN_STATUS_SUCCESS) { + __set_bit(tid, sta->ampdu_mlme.agg_session_valid); + __clear_bit(tid, sta->ampdu_mlme.unexpected_agg); + sta->ampdu_mlme.tid_rx_token[tid] = dialog_token; + } + + if (tx) + ieee80211_send_addba_resp(sta, sta->sta.addr, tid, + dialog_token, status, 1, buf_size, + timeout, addbaext); +} + +static void __ieee80211_start_rx_ba_session(struct sta_info *sta, + u8 dialog_token, u16 timeout, + u16 start_seq_num, u16 ba_policy, + u16 tid, u16 buf_size, bool tx, + bool auto_seq, + const struct ieee80211_addba_ext_ie *addbaext) +{ + mutex_lock(&sta->ampdu_mlme.mtx); + ___ieee80211_start_rx_ba_session(sta, dialog_token, timeout, + start_seq_num, ba_policy, tid, + buf_size, tx, auto_seq, addbaext); + mutex_unlock(&sta->ampdu_mlme.mtx); +} + +void ieee80211_process_addba_request(struct ieee80211_local *local, + struct sta_info *sta, + struct ieee80211_mgmt *mgmt, + size_t len) +{ + u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num; + struct ieee802_11_elems *elems = NULL; + u8 dialog_token; + int ies_len; + + /* extract session parameters from addba request frame */ + dialog_token = mgmt->u.action.u.addba_req.dialog_token; + timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout); + start_seq_num = + le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4; + + capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); + ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1; + tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; + buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; + + ies_len = len - offsetof(struct ieee80211_mgmt, + u.action.u.addba_req.variable); + if (ies_len) { + elems = ieee802_11_parse_elems(mgmt->u.action.u.addba_req.variable, + ies_len, true, NULL); + if (!elems || elems->parse_error) + goto free; + } + + if (sta->sta.deflink.eht_cap.has_eht && elems && elems->addba_ext_ie) { + u8 buf_size_1k = u8_get_bits(elems->addba_ext_ie->data, + IEEE80211_ADDBA_EXT_BUF_SIZE_MASK); + + buf_size |= buf_size_1k << IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT; + } + + __ieee80211_start_rx_ba_session(sta, dialog_token, timeout, + start_seq_num, ba_policy, tid, + buf_size, true, false, + elems ? elems->addba_ext_ie : NULL); +free: + kfree(elems); +} + +void ieee80211_manage_rx_ba_offl(struct ieee80211_vif *vif, + const u8 *addr, unsigned int tid) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + rcu_read_lock(); + sta = sta_info_get_bss(sdata, addr); + if (!sta) + goto unlock; + + set_bit(tid, sta->ampdu_mlme.tid_rx_manage_offl); + ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); + unlock: + rcu_read_unlock(); +} +EXPORT_SYMBOL(ieee80211_manage_rx_ba_offl); + +void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif, + const u8 *addr, unsigned int tid) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + rcu_read_lock(); + sta = sta_info_get_bss(sdata, addr); + if (!sta) + goto unlock; + + set_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired); + ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); + + unlock: + rcu_read_unlock(); +} +EXPORT_SYMBOL(ieee80211_rx_ba_timer_expired); diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c new file mode 100644 index 0000000000..b6b7726858 --- /dev/null +++ b/net/mac80211/agg-tx.c @@ -0,0 +1,1073 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HT handling + * + * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2007, Michael Wu <flamingice@sourmilk.net> + * Copyright 2007-2010, Intel Corporation + * Copyright(c) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018 - 2023 Intel Corporation + */ + +#include <linux/ieee80211.h> +#include <linux/slab.h> +#include <linux/export.h> +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "wme.h" + +/** + * DOC: TX A-MPDU aggregation + * + * Aggregation on the TX side requires setting the hardware flag + * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed + * packets with a flag indicating A-MPDU aggregation. The driver + * or device is responsible for actually aggregating the frames, + * as well as deciding how many and which to aggregate. + * + * When TX aggregation is started by some subsystem (usually the rate + * control algorithm would be appropriate) by calling the + * ieee80211_start_tx_ba_session() function, the driver will be + * notified via its @ampdu_action function, with the + * %IEEE80211_AMPDU_TX_START action. + * + * In response to that, the driver is later required to call the + * ieee80211_start_tx_ba_cb_irqsafe() function, which will really + * start the aggregation session after the peer has also responded. + * If the peer responds negatively, the session will be stopped + * again right away. Note that it is possible for the aggregation + * session to be stopped before the driver has indicated that it + * is done setting it up, in which case it must not indicate the + * setup completion. + * + * Also note that, since we also need to wait for a response from + * the peer, the driver is notified of the completion of the + * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the + * @ampdu_action callback. + * + * Similarly, when the aggregation session is stopped by the peer + * or something calling ieee80211_stop_tx_ba_session(), the driver's + * @ampdu_action function will be called with the action + * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail, + * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe(). + * Note that the sta can get destroyed before the BA tear down is + * complete. + */ + +static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, + const u8 *da, u16 tid, + u8 dialog_token, u16 start_seq_num, + u16 agg_size, u16 timeout) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + u16 capab; + + skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); + + if (!skb) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom); + mgmt = skb_put_zero(skb, 24); + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + if (sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + sdata->vif.type == NL80211_IFTYPE_MESH_POINT) + memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); + else if (sdata->vif.type == NL80211_IFTYPE_STATION) + memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); + else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) + memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN); + + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + + skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); + + mgmt->u.action.category = WLAN_CATEGORY_BACK; + mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; + + mgmt->u.action.u.addba_req.dialog_token = dialog_token; + capab = IEEE80211_ADDBA_PARAM_AMSDU_MASK; + capab |= IEEE80211_ADDBA_PARAM_POLICY_MASK; + capab |= u16_encode_bits(tid, IEEE80211_ADDBA_PARAM_TID_MASK); + capab |= u16_encode_bits(agg_size, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK); + + mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); + + mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout); + mgmt->u.action.u.addba_req.start_seq_num = + cpu_to_le16(start_seq_num << 4); + + ieee80211_tx_skb_tid(sdata, skb, tid, -1); +} + +void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_bar *bar; + u16 bar_control = 0; + + skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); + if (!skb) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom); + bar = skb_put_zero(skb, sizeof(*bar)); + bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | + IEEE80211_STYPE_BACK_REQ); + memcpy(bar->ra, ra, ETH_ALEN); + memcpy(bar->ta, sdata->vif.addr, ETH_ALEN); + bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; + bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; + bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT); + bar->control = cpu_to_le16(bar_control); + bar->start_seq_num = cpu_to_le16(ssn); + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | + IEEE80211_TX_CTL_REQ_TX_STATUS; + ieee80211_tx_skb_tid(sdata, skb, tid, -1); +} +EXPORT_SYMBOL(ieee80211_send_bar); + +void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, + struct tid_ampdu_tx *tid_tx) +{ + lockdep_assert_held(&sta->ampdu_mlme.mtx); + lockdep_assert_held(&sta->lock); + rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); +} + +/* + * When multiple aggregation sessions on multiple stations + * are being created/destroyed simultaneously, we need to + * refcount the global queue stop caused by that in order + * to not get into a situation where one of the aggregation + * setup or teardown re-enables queues before the other is + * ready to handle that. + * + * These two functions take care of this issue by keeping + * a global "agg_queue_stop" refcount. + */ +static void __acquires(agg_queue) +ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) +{ + int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; + + /* we do refcounting here, so don't use the queue reason refcounting */ + + if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1) + ieee80211_stop_queue_by_reason( + &sdata->local->hw, queue, + IEEE80211_QUEUE_STOP_REASON_AGGREGATION, + false); + __acquire(agg_queue); +} + +static void __releases(agg_queue) +ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) +{ + int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; + + if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) + ieee80211_wake_queue_by_reason( + &sdata->local->hw, queue, + IEEE80211_QUEUE_STOP_REASON_AGGREGATION, + false); + __release(agg_queue); +} + +static void +ieee80211_agg_stop_txq(struct sta_info *sta, int tid) +{ + struct ieee80211_txq *txq = sta->sta.txq[tid]; + struct ieee80211_sub_if_data *sdata; + struct fq *fq; + struct txq_info *txqi; + + if (!txq) + return; + + txqi = to_txq_info(txq); + sdata = vif_to_sdata(txq->vif); + fq = &sdata->local->fq; + + /* Lock here to protect against further seqno updates on dequeue */ + spin_lock_bh(&fq->lock); + set_bit(IEEE80211_TXQ_STOP, &txqi->flags); + spin_unlock_bh(&fq->lock); +} + +static void +ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable) +{ + struct ieee80211_txq *txq = sta->sta.txq[tid]; + struct txq_info *txqi; + + lockdep_assert_held(&sta->ampdu_mlme.mtx); + + if (!txq) + return; + + txqi = to_txq_info(txq); + + if (enable) + set_bit(IEEE80211_TXQ_AMPDU, &txqi->flags); + else + clear_bit(IEEE80211_TXQ_AMPDU, &txqi->flags); + + clear_bit(IEEE80211_TXQ_STOP, &txqi->flags); + local_bh_disable(); + rcu_read_lock(); + schedule_and_wake_txq(sta->sdata->local, txqi); + rcu_read_unlock(); + local_bh_enable(); +} + +/* + * splice packets from the STA's pending to the local pending, + * requires a call to ieee80211_agg_splice_finish later + */ +static void __acquires(agg_queue) +ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata, + struct tid_ampdu_tx *tid_tx, u16 tid) +{ + struct ieee80211_local *local = sdata->local; + int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; + unsigned long flags; + + ieee80211_stop_queue_agg(sdata, tid); + + if (WARN(!tid_tx, + "TID %d gone but expected when splicing aggregates from the pending queue\n", + tid)) + return; + + if (!skb_queue_empty(&tid_tx->pending)) { + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + /* copy over remaining packets */ + skb_queue_splice_tail_init(&tid_tx->pending, + &local->pending[queue]); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + } +} + +static void __releases(agg_queue) +ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid) +{ + ieee80211_wake_queue_agg(sdata, tid); +} + +static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid) +{ + struct tid_ampdu_tx *tid_tx; + + lockdep_assert_held(&sta->ampdu_mlme.mtx); + lockdep_assert_held(&sta->lock); + + tid_tx = rcu_dereference_protected_tid_tx(sta, tid); + + /* + * When we get here, the TX path will not be lockless any more wrt. + * aggregation, since the OPERATIONAL bit has long been cleared. + * Thus it will block on getting the lock, if it occurs. So if we + * stop the queue now, we will not get any more packets, and any + * that might be being processed will wait for us here, thereby + * guaranteeing that no packets go to the tid_tx pending queue any + * more. + */ + + ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); + + /* future packets must not find the tid_tx struct any more */ + ieee80211_assign_tid_tx(sta, tid, NULL); + + ieee80211_agg_splice_finish(sta->sdata, tid); + + kfree_rcu(tid_tx, rcu_head); +} + +int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, + enum ieee80211_agg_stop_reason reason) +{ + struct ieee80211_local *local = sta->local; + struct tid_ampdu_tx *tid_tx; + struct ieee80211_ampdu_params params = { + .sta = &sta->sta, + .tid = tid, + .buf_size = 0, + .amsdu = false, + .timeout = 0, + .ssn = 0, + }; + int ret; + + lockdep_assert_held(&sta->ampdu_mlme.mtx); + + switch (reason) { + case AGG_STOP_DECLINED: + case AGG_STOP_LOCAL_REQUEST: + case AGG_STOP_PEER_REQUEST: + params.action = IEEE80211_AMPDU_TX_STOP_CONT; + break; + case AGG_STOP_DESTROY_STA: + params.action = IEEE80211_AMPDU_TX_STOP_FLUSH; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + spin_lock_bh(&sta->lock); + + /* free struct pending for start, if present */ + tid_tx = sta->ampdu_mlme.tid_start_tx[tid]; + kfree(tid_tx); + sta->ampdu_mlme.tid_start_tx[tid] = NULL; + + tid_tx = rcu_dereference_protected_tid_tx(sta, tid); + if (!tid_tx) { + spin_unlock_bh(&sta->lock); + return -ENOENT; + } + + /* + * if we're already stopping ignore any new requests to stop + * unless we're destroying it in which case notify the driver + */ + if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { + spin_unlock_bh(&sta->lock); + if (reason != AGG_STOP_DESTROY_STA) + return -EALREADY; + params.action = IEEE80211_AMPDU_TX_STOP_FLUSH_CONT; + ret = drv_ampdu_action(local, sta->sdata, ¶ms); + WARN_ON_ONCE(ret); + return 0; + } + + if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { + /* not even started yet! */ + ieee80211_assign_tid_tx(sta, tid, NULL); + spin_unlock_bh(&sta->lock); + kfree_rcu(tid_tx, rcu_head); + return 0; + } + + set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); + + ieee80211_agg_stop_txq(sta, tid); + + spin_unlock_bh(&sta->lock); + + ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", + sta->sta.addr, tid); + + del_timer_sync(&tid_tx->addba_resp_timer); + del_timer_sync(&tid_tx->session_timer); + + /* + * After this packets are no longer handed right through + * to the driver but are put onto tid_tx->pending instead, + * with locking to ensure proper access. + */ + clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); + + /* + * There might be a few packets being processed right now (on + * another CPU) that have already gotten past the aggregation + * check when it was still OPERATIONAL and consequently have + * IEEE80211_TX_CTL_AMPDU set. In that case, this code might + * call into the driver at the same time or even before the + * TX paths calls into it, which could confuse the driver. + * + * Wait for all currently running TX paths to finish before + * telling the driver. New packets will not go through since + * the aggregation session is no longer OPERATIONAL. + */ + if (!local->in_reconfig) + synchronize_net(); + + tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ? + WLAN_BACK_RECIPIENT : + WLAN_BACK_INITIATOR; + tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST; + + ret = drv_ampdu_action(local, sta->sdata, ¶ms); + + /* HW shall not deny going back to legacy */ + if (WARN_ON(ret)) { + /* + * We may have pending packets get stuck in this case... + * Not bothering with a workaround for now. + */ + } + + /* + * In the case of AGG_STOP_DESTROY_STA, the driver won't + * necessarily call ieee80211_stop_tx_ba_cb(), so this may + * seem like we can leave the tid_tx data pending forever. + * This is true, in a way, but "forever" is only until the + * station struct is actually destroyed. In the meantime, + * leaving it around ensures that we don't transmit packets + * to the driver on this TID which might confuse it. + */ + + return 0; +} + +/* + * After sending add Block Ack request we activated a timer until + * add Block Ack response will arrive from the recipient. + * If this timer expires sta_addba_resp_timer_expired will be executed. + */ +static void sta_addba_resp_timer_expired(struct timer_list *t) +{ + struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, addba_resp_timer); + struct sta_info *sta = tid_tx->sta; + u8 tid = tid_tx->tid; + + /* check if the TID waits for addBA response */ + if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { + ht_dbg(sta->sdata, + "timer expired on %pM tid %d not expecting addBA response\n", + sta->sta.addr, tid); + return; + } + + ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n", + sta->sta.addr, tid); + + ieee80211_stop_tx_ba_session(&sta->sta, tid); +} + +static void ieee80211_send_addba_with_timeout(struct sta_info *sta, + struct tid_ampdu_tx *tid_tx) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sta->local; + u8 tid = tid_tx->tid; + u16 buf_size; + + if (WARN_ON_ONCE(test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state) || + test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))) + return; + + lockdep_assert_held(&sta->ampdu_mlme.mtx); + + /* activate the timer for the recipient's addBA response */ + mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); + ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n", + sta->sta.addr, tid); + + spin_lock_bh(&sta->lock); + sta->ampdu_mlme.last_addba_req_time[tid] = jiffies; + sta->ampdu_mlme.addba_req_num[tid]++; + spin_unlock_bh(&sta->lock); + + if (sta->sta.deflink.he_cap.has_he) { + buf_size = local->hw.max_tx_aggregation_subframes; + } else { + /* + * We really should use what the driver told us it will + * transmit as the maximum, but certain APs (e.g. the + * LinkSys WRT120N with FW v1.0.07 build 002 Jun 18 2012) + * will crash when we use a lower number. + */ + buf_size = IEEE80211_MAX_AMPDU_BUF_HT; + } + + /* send AddBA request */ + ieee80211_send_addba_request(sdata, sta->sta.addr, tid, + tid_tx->dialog_token, tid_tx->ssn, + buf_size, tid_tx->timeout); + + WARN_ON(test_and_set_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state)); +} + +void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) +{ + struct tid_ampdu_tx *tid_tx; + struct ieee80211_local *local = sta->local; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_ampdu_params params = { + .sta = &sta->sta, + .action = IEEE80211_AMPDU_TX_START, + .tid = tid, + .buf_size = 0, + .amsdu = false, + .timeout = 0, + }; + int ret; + + tid_tx = rcu_dereference_protected_tid_tx(sta, tid); + + /* + * Start queuing up packets for this aggregation session. + * We're going to release them once the driver is OK with + * that. + */ + clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); + + /* + * Make sure no packets are being processed. This ensures that + * we have a valid starting sequence number and that in-flight + * packets have been flushed out and no packets for this TID + * will go into the driver during the ampdu_action call. + */ + synchronize_net(); + + sdata = sta->sdata; + params.ssn = sta->tid_seq[tid] >> 4; + ret = drv_ampdu_action(local, sdata, ¶ms); + tid_tx->ssn = params.ssn; + if (ret == IEEE80211_AMPDU_TX_START_DELAY_ADDBA) { + return; + } else if (ret == IEEE80211_AMPDU_TX_START_IMMEDIATE) { + /* + * We didn't send the request yet, so don't need to check + * here if we already got a response, just mark as driver + * ready immediately. + */ + set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state); + } else if (ret) { + if (!sdata) + return; + + ht_dbg(sdata, + "BA request denied - HW unavailable for %pM tid %d\n", + sta->sta.addr, tid); + spin_lock_bh(&sta->lock); + ieee80211_agg_splice_packets(sdata, tid_tx, tid); + ieee80211_assign_tid_tx(sta, tid, NULL); + ieee80211_agg_splice_finish(sdata, tid); + spin_unlock_bh(&sta->lock); + + ieee80211_agg_start_txq(sta, tid, false); + + kfree_rcu(tid_tx, rcu_head); + return; + } + + ieee80211_send_addba_with_timeout(sta, tid_tx); +} + +void ieee80211_refresh_tx_agg_session_timer(struct ieee80211_sta *pubsta, + u16 tid) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct tid_ampdu_tx *tid_tx; + + if (WARN_ON_ONCE(tid >= IEEE80211_NUM_TIDS)) + return; + + tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); + if (!tid_tx) + return; + + tid_tx->last_tx = jiffies; +} +EXPORT_SYMBOL(ieee80211_refresh_tx_agg_session_timer); + +/* + * After accepting the AddBA Response we activated a timer, + * resetting it after each frame that we send. + */ +static void sta_tx_agg_session_timer_expired(struct timer_list *t) +{ + struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, session_timer); + struct sta_info *sta = tid_tx->sta; + u8 tid = tid_tx->tid; + unsigned long timeout; + + if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { + return; + } + + timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout); + if (time_is_after_jiffies(timeout)) { + mod_timer(&tid_tx->session_timer, timeout); + return; + } + + ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n", + sta->sta.addr, tid); + + ieee80211_stop_tx_ba_session(&sta->sta, tid); +} + +int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, + u16 timeout) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct tid_ampdu_tx *tid_tx; + int ret = 0; + + trace_api_start_tx_ba_session(pubsta, tid); + + if (WARN(sta->reserved_tid == tid, + "Requested to start BA session on reserved tid=%d", tid)) + return -EINVAL; + + if (!pubsta->deflink.ht_cap.ht_supported && + sta->sdata->vif.bss_conf.chandef.chan->band != NL80211_BAND_6GHZ) + return -EINVAL; + + if (WARN_ON_ONCE(!local->ops->ampdu_action)) + return -EINVAL; + + if ((tid >= IEEE80211_NUM_TIDS) || + !ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) || + ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) + return -EINVAL; + + if (WARN_ON(tid >= IEEE80211_FIRST_TSPEC_TSID)) + return -EINVAL; + + ht_dbg(sdata, "Open BA session requested for %pM tid %u\n", + pubsta->addr, tid); + + if (sdata->vif.type != NL80211_IFTYPE_STATION && + sdata->vif.type != NL80211_IFTYPE_MESH_POINT && + sdata->vif.type != NL80211_IFTYPE_AP_VLAN && + sdata->vif.type != NL80211_IFTYPE_AP && + sdata->vif.type != NL80211_IFTYPE_ADHOC) + return -EINVAL; + + if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { + ht_dbg(sdata, + "BA sessions blocked - Denying BA session request %pM tid %d\n", + sta->sta.addr, tid); + return -EINVAL; + } + + if (test_sta_flag(sta, WLAN_STA_MFP) && + !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { + ht_dbg(sdata, + "MFP STA not authorized - deny BA session request %pM tid %d\n", + sta->sta.addr, tid); + return -EINVAL; + } + + /* + * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a + * member of an IBSS, and has no other existing Block Ack agreement + * with the recipient STA, then the initiating STA shall transmit a + * Probe Request frame to the recipient STA and shall not transmit an + * ADDBA Request frame unless it receives a Probe Response frame + * from the recipient within dot11ADDBAFailureTimeout. + * + * The probe request mechanism for ADDBA is currently not implemented, + * but we only build up Block Ack session with HT STAs. This information + * is set when we receive a bss info from a probe response or a beacon. + */ + if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC && + !sta->sta.deflink.ht_cap.ht_supported) { + ht_dbg(sdata, + "BA request denied - IBSS STA %pM does not advertise HT support\n", + pubsta->addr); + return -EINVAL; + } + + spin_lock_bh(&sta->lock); + + /* we have tried too many times, receiver does not want A-MPDU */ + if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { + ret = -EBUSY; + goto err_unlock_sta; + } + + /* + * if we have tried more than HT_AGG_BURST_RETRIES times we + * will spread our requests in time to avoid stalling connection + * for too long + */ + if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES && + time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] + + HT_AGG_RETRIES_PERIOD)) { + ht_dbg(sdata, + "BA request denied - %d failed requests on %pM tid %u\n", + sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid); + ret = -EBUSY; + goto err_unlock_sta; + } + + tid_tx = rcu_dereference_protected_tid_tx(sta, tid); + /* check if the TID is not in aggregation flow already */ + if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) { + ht_dbg(sdata, + "BA request denied - session is not idle on %pM tid %u\n", + sta->sta.addr, tid); + ret = -EAGAIN; + goto err_unlock_sta; + } + + /* prepare A-MPDU MLME for Tx aggregation */ + tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); + if (!tid_tx) { + ret = -ENOMEM; + goto err_unlock_sta; + } + + skb_queue_head_init(&tid_tx->pending); + __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); + + tid_tx->timeout = timeout; + tid_tx->sta = sta; + tid_tx->tid = tid; + + /* response timer */ + timer_setup(&tid_tx->addba_resp_timer, sta_addba_resp_timer_expired, 0); + + /* tx timer */ + timer_setup(&tid_tx->session_timer, + sta_tx_agg_session_timer_expired, TIMER_DEFERRABLE); + + /* assign a dialog token */ + sta->ampdu_mlme.dialog_token_allocator++; + tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator; + + /* + * Finally, assign it to the start array; the work item will + * collect it and move it to the normal array. + */ + sta->ampdu_mlme.tid_start_tx[tid] = tid_tx; + + ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); + + /* this flow continues off the work */ + err_unlock_sta: + spin_unlock_bh(&sta->lock); + return ret; +} +EXPORT_SYMBOL(ieee80211_start_tx_ba_session); + +static void ieee80211_agg_tx_operational(struct ieee80211_local *local, + struct sta_info *sta, u16 tid) +{ + struct tid_ampdu_tx *tid_tx; + struct ieee80211_ampdu_params params = { + .sta = &sta->sta, + .action = IEEE80211_AMPDU_TX_OPERATIONAL, + .tid = tid, + .timeout = 0, + .ssn = 0, + }; + + lockdep_assert_held(&sta->ampdu_mlme.mtx); + + tid_tx = rcu_dereference_protected_tid_tx(sta, tid); + params.buf_size = tid_tx->buf_size; + params.amsdu = tid_tx->amsdu; + + ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n", + sta->sta.addr, tid); + + drv_ampdu_action(local, sta->sdata, ¶ms); + + /* + * synchronize with TX path, while splicing the TX path + * should block so it won't put more packets onto pending. + */ + spin_lock_bh(&sta->lock); + + ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); + /* + * Now mark as operational. This will be visible + * in the TX path, and lets it go lock-free in + * the common case. + */ + set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); + ieee80211_agg_splice_finish(sta->sdata, tid); + + spin_unlock_bh(&sta->lock); + + ieee80211_agg_start_txq(sta, tid, true); +} + +void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid, + struct tid_ampdu_tx *tid_tx) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + + lockdep_assert_held(&sta->ampdu_mlme.mtx); + + if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) + return; + + if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state) || + test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state)) + return; + + if (!test_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state)) { + ieee80211_send_addba_with_timeout(sta, tid_tx); + /* RESPONSE_RECEIVED state whould trigger the flow again */ + return; + } + + if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) + ieee80211_agg_tx_operational(local, sta, tid); +} + +static struct tid_ampdu_tx * +ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata, + const u8 *ra, u16 tid, struct sta_info **sta) +{ + struct tid_ampdu_tx *tid_tx; + + if (tid >= IEEE80211_NUM_TIDS) { + ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", + tid, IEEE80211_NUM_TIDS); + return NULL; + } + + *sta = sta_info_get_bss(sdata, ra); + if (!*sta) { + ht_dbg(sdata, "Could not find station: %pM\n", ra); + return NULL; + } + + tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]); + + if (WARN_ON(!tid_tx)) + ht_dbg(sdata, "addBA was not requested!\n"); + + return tid_tx; +} + +void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, + const u8 *ra, u16 tid) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + struct tid_ampdu_tx *tid_tx; + + trace_api_start_tx_ba_cb(sdata, ra, tid); + + rcu_read_lock(); + tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta); + if (!tid_tx) + goto out; + + set_bit(HT_AGG_STATE_START_CB, &tid_tx->state); + ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); + out: + rcu_read_unlock(); +} +EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); + +int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, + enum ieee80211_agg_stop_reason reason) +{ + int ret; + + mutex_lock(&sta->ampdu_mlme.mtx); + + ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason); + + mutex_unlock(&sta->ampdu_mlme.mtx); + + return ret; +} + +int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct tid_ampdu_tx *tid_tx; + int ret = 0; + + trace_api_stop_tx_ba_session(pubsta, tid); + + if (!local->ops->ampdu_action) + return -EINVAL; + + if (tid >= IEEE80211_NUM_TIDS) + return -EINVAL; + + spin_lock_bh(&sta->lock); + tid_tx = rcu_dereference_protected_tid_tx(sta, tid); + + if (!tid_tx) { + ret = -ENOENT; + goto unlock; + } + + WARN(sta->reserved_tid == tid, + "Requested to stop BA session on reserved tid=%d", tid); + + if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { + /* already in progress stopping it */ + ret = 0; + goto unlock; + } + + set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state); + ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); + + unlock: + spin_unlock_bh(&sta->lock); + return ret; +} +EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); + +void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid, + struct tid_ampdu_tx *tid_tx) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + bool send_delba = false; + bool start_txq = false; + + ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", + sta->sta.addr, tid); + + spin_lock_bh(&sta->lock); + + if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { + ht_dbg(sdata, + "unexpected callback to A-MPDU stop for %pM tid %d\n", + sta->sta.addr, tid); + goto unlock_sta; + } + + if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop) + send_delba = true; + + ieee80211_remove_tid_tx(sta, tid); + start_txq = true; + + unlock_sta: + spin_unlock_bh(&sta->lock); + + if (start_txq) + ieee80211_agg_start_txq(sta, tid, false); + + if (send_delba) + ieee80211_send_delba(sdata, sta->sta.addr, tid, + WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); +} + +void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, + const u8 *ra, u16 tid) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + struct tid_ampdu_tx *tid_tx; + + trace_api_stop_tx_ba_cb(sdata, ra, tid); + + rcu_read_lock(); + tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta); + if (!tid_tx) + goto out; + + set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state); + ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); + out: + rcu_read_unlock(); +} +EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); + + +void ieee80211_process_addba_resp(struct ieee80211_local *local, + struct sta_info *sta, + struct ieee80211_mgmt *mgmt, + size_t len) +{ + struct tid_ampdu_tx *tid_tx; + struct ieee80211_txq *txq; + u16 capab, tid, buf_size; + bool amsdu; + + capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); + amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK; + tid = u16_get_bits(capab, IEEE80211_ADDBA_PARAM_TID_MASK); + buf_size = u16_get_bits(capab, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK); + buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes); + + txq = sta->sta.txq[tid]; + if (!amsdu && txq) + set_bit(IEEE80211_TXQ_NO_AMSDU, &to_txq_info(txq)->flags); + + mutex_lock(&sta->ampdu_mlme.mtx); + + tid_tx = rcu_dereference_protected_tid_tx(sta, tid); + if (!tid_tx) + goto out; + + if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { + ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n", + sta->sta.addr, tid); + goto out; + } + + del_timer_sync(&tid_tx->addba_resp_timer); + + ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n", + sta->sta.addr, tid); + + /* + * addba_resp_timer may have fired before we got here, and + * caused WANT_STOP to be set. If the stop then was already + * processed further, STOPPING might be set. + */ + if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || + test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { + ht_dbg(sta->sdata, + "got addBA resp for %pM tid %d but we already gave up\n", + sta->sta.addr, tid); + goto out; + } + + /* + * IEEE 802.11-2007 7.3.1.14: + * In an ADDBA Response frame, when the Status Code field + * is set to 0, the Buffer Size subfield is set to a value + * of at least 1. + */ + if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) + == WLAN_STATUS_SUCCESS && buf_size) { + if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, + &tid_tx->state)) { + /* ignore duplicate response */ + goto out; + } + + tid_tx->buf_size = buf_size; + tid_tx->amsdu = amsdu; + + if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)) + ieee80211_agg_tx_operational(local, sta, tid); + + sta->ampdu_mlme.addba_req_num[tid] = 0; + + tid_tx->timeout = + le16_to_cpu(mgmt->u.action.u.addba_resp.timeout); + + if (tid_tx->timeout) { + mod_timer(&tid_tx->session_timer, + TU_TO_EXP_TIME(tid_tx->timeout)); + tid_tx->last_tx = jiffies; + } + + } else { + ___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED); + } + + out: + mutex_unlock(&sta->ampdu_mlme.mtx); +} diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c new file mode 100644 index 0000000000..e8ebd343e2 --- /dev/null +++ b/net/mac80211/airtime.c @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (C) 2019 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2021-2022 Intel Corporation + */ + +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "sta_info.h" + +#define AVG_PKT_SIZE 1024 + +/* Number of bits for an average sized packet */ +#define MCS_NBITS (AVG_PKT_SIZE << 3) + +/* Number of kilo-symbols (symbols * 1024) for a packet with (bps) bits per + * symbol. We use k-symbols to avoid rounding in the _TIME macros below. + */ +#define MCS_N_KSYMS(bps) DIV_ROUND_UP(MCS_NBITS << 10, (bps)) + +/* Transmission time (in 1024 * usec) for a packet containing (ksyms) * 1024 + * symbols. + */ +#define MCS_SYMBOL_TIME(sgi, ksyms) \ + (sgi ? \ + ((ksyms) * 4 * 18) / 20 : /* 3.6 us per sym */ \ + ((ksyms) * 4) /* 4.0 us per sym */ \ + ) + +/* Transmit duration for the raw data part of an average sized packet */ +#define MCS_DURATION(streams, sgi, bps) \ + ((u32)MCS_SYMBOL_TIME(sgi, MCS_N_KSYMS((streams) * (bps)))) + +#define MCS_DURATION_S(shift, streams, sgi, bps) \ + ((u16)((MCS_DURATION(streams, sgi, bps) >> shift))) + +/* These should match the values in enum nl80211_he_gi */ +#define HE_GI_08 0 +#define HE_GI_16 1 +#define HE_GI_32 2 + +/* Transmission time (1024 usec) for a packet containing (ksyms) * k-symbols */ +#define HE_SYMBOL_TIME(gi, ksyms) \ + (gi == HE_GI_08 ? \ + ((ksyms) * 16 * 17) / 20 : /* 13.6 us per sym */ \ + (gi == HE_GI_16 ? \ + ((ksyms) * 16 * 18) / 20 : /* 14.4 us per sym */ \ + ((ksyms) * 16) /* 16.0 us per sym */ \ + )) + +/* Transmit duration for the raw data part of an average sized packet */ +#define HE_DURATION(streams, gi, bps) \ + ((u32)HE_SYMBOL_TIME(gi, MCS_N_KSYMS((streams) * (bps)))) + +#define HE_DURATION_S(shift, streams, gi, bps) \ + (HE_DURATION(streams, gi, bps) >> shift) + +#define BW_20 0 +#define BW_40 1 +#define BW_80 2 +#define BW_160 3 + +/* + * Define group sort order: HT40 -> SGI -> #streams + */ +#define IEEE80211_MAX_STREAMS 4 +#define IEEE80211_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */ +#define IEEE80211_VHT_STREAM_GROUPS 8 /* BW(=4) * SGI(=2) */ + +#define IEEE80211_HE_MAX_STREAMS 8 + +#define IEEE80211_HT_GROUPS_NB (IEEE80211_MAX_STREAMS * \ + IEEE80211_HT_STREAM_GROUPS) +#define IEEE80211_VHT_GROUPS_NB (IEEE80211_MAX_STREAMS * \ + IEEE80211_VHT_STREAM_GROUPS) + +#define IEEE80211_HT_GROUP_0 0 +#define IEEE80211_VHT_GROUP_0 (IEEE80211_HT_GROUP_0 + IEEE80211_HT_GROUPS_NB) +#define IEEE80211_HE_GROUP_0 (IEEE80211_VHT_GROUP_0 + IEEE80211_VHT_GROUPS_NB) + +#define MCS_GROUP_RATES 12 + +#define HT_GROUP_IDX(_streams, _sgi, _ht40) \ + IEEE80211_HT_GROUP_0 + \ + IEEE80211_MAX_STREAMS * 2 * _ht40 + \ + IEEE80211_MAX_STREAMS * _sgi + \ + _streams - 1 + +#define _MAX(a, b) (((a)>(b))?(a):(b)) + +#define GROUP_SHIFT(duration) \ + _MAX(0, 16 - __builtin_clz(duration)) + +/* MCS rate information for an MCS group */ +#define __MCS_GROUP(_streams, _sgi, _ht40, _s) \ + [HT_GROUP_IDX(_streams, _sgi, _ht40)] = { \ + .shift = _s, \ + .duration = { \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 54 : 26), \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 108 : 52), \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 162 : 78), \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 216 : 104), \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 324 : 156), \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 432 : 208), \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 486 : 234), \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 540 : 260) \ + } \ +} + +#define MCS_GROUP_SHIFT(_streams, _sgi, _ht40) \ + GROUP_SHIFT(MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26)) + +#define MCS_GROUP(_streams, _sgi, _ht40) \ + __MCS_GROUP(_streams, _sgi, _ht40, \ + MCS_GROUP_SHIFT(_streams, _sgi, _ht40)) + +#define VHT_GROUP_IDX(_streams, _sgi, _bw) \ + (IEEE80211_VHT_GROUP_0 + \ + IEEE80211_MAX_STREAMS * 2 * (_bw) + \ + IEEE80211_MAX_STREAMS * (_sgi) + \ + (_streams) - 1) + +#define BW2VBPS(_bw, r4, r3, r2, r1) \ + (_bw == BW_160 ? r4 : _bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1) + +#define __VHT_GROUP(_streams, _sgi, _bw, _s) \ + [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \ + .shift = _s, \ + .duration = { \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 234, 117, 54, 26)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 468, 234, 108, 52)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 702, 351, 162, 78)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 936, 468, 216, 104)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 1404, 702, 324, 156)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 1872, 936, 432, 208)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 2106, 1053, 486, 234)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 2340, 1170, 540, 260)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 2808, 1404, 648, 312)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 3120, 1560, 720, 346)) \ + } \ +} + +#define VHT_GROUP_SHIFT(_streams, _sgi, _bw) \ + GROUP_SHIFT(MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 243, 117, 54, 26))) + +#define VHT_GROUP(_streams, _sgi, _bw) \ + __VHT_GROUP(_streams, _sgi, _bw, \ + VHT_GROUP_SHIFT(_streams, _sgi, _bw)) + + +#define HE_GROUP_IDX(_streams, _gi, _bw) \ + (IEEE80211_HE_GROUP_0 + \ + IEEE80211_HE_MAX_STREAMS * 3 * (_bw) + \ + IEEE80211_HE_MAX_STREAMS * (_gi) + \ + (_streams) - 1) + +#define __HE_GROUP(_streams, _gi, _bw, _s) \ + [HE_GROUP_IDX(_streams, _gi, _bw)] = { \ + .shift = _s, \ + .duration = { \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 979, 489, 230, 115)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 1958, 979, 475, 230)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 2937, 1468, 705, 345)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 3916, 1958, 936, 475)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 5875, 2937, 1411, 705)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 7833, 3916, 1872, 936)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 8827, 4406, 2102, 1051)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 9806, 4896, 2347, 1166)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 11764, 5875, 2808, 1411)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 13060, 6523, 3124, 1555)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 14702, 7344, 3513, 1756)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 16329, 8164, 3902, 1944)) \ + } \ +} + +#define HE_GROUP_SHIFT(_streams, _gi, _bw) \ + GROUP_SHIFT(HE_DURATION(_streams, _gi, \ + BW2VBPS(_bw, 979, 489, 230, 115))) + +#define HE_GROUP(_streams, _gi, _bw) \ + __HE_GROUP(_streams, _gi, _bw, \ + HE_GROUP_SHIFT(_streams, _gi, _bw)) +struct mcs_group { + u8 shift; + u16 duration[MCS_GROUP_RATES]; +}; + +static const struct mcs_group airtime_mcs_groups[] = { + MCS_GROUP(1, 0, BW_20), + MCS_GROUP(2, 0, BW_20), + MCS_GROUP(3, 0, BW_20), + MCS_GROUP(4, 0, BW_20), + + MCS_GROUP(1, 1, BW_20), + MCS_GROUP(2, 1, BW_20), + MCS_GROUP(3, 1, BW_20), + MCS_GROUP(4, 1, BW_20), + + MCS_GROUP(1, 0, BW_40), + MCS_GROUP(2, 0, BW_40), + MCS_GROUP(3, 0, BW_40), + MCS_GROUP(4, 0, BW_40), + + MCS_GROUP(1, 1, BW_40), + MCS_GROUP(2, 1, BW_40), + MCS_GROUP(3, 1, BW_40), + MCS_GROUP(4, 1, BW_40), + + VHT_GROUP(1, 0, BW_20), + VHT_GROUP(2, 0, BW_20), + VHT_GROUP(3, 0, BW_20), + VHT_GROUP(4, 0, BW_20), + + VHT_GROUP(1, 1, BW_20), + VHT_GROUP(2, 1, BW_20), + VHT_GROUP(3, 1, BW_20), + VHT_GROUP(4, 1, BW_20), + + VHT_GROUP(1, 0, BW_40), + VHT_GROUP(2, 0, BW_40), + VHT_GROUP(3, 0, BW_40), + VHT_GROUP(4, 0, BW_40), + + VHT_GROUP(1, 1, BW_40), + VHT_GROUP(2, 1, BW_40), + VHT_GROUP(3, 1, BW_40), + VHT_GROUP(4, 1, BW_40), + + VHT_GROUP(1, 0, BW_80), + VHT_GROUP(2, 0, BW_80), + VHT_GROUP(3, 0, BW_80), + VHT_GROUP(4, 0, BW_80), + + VHT_GROUP(1, 1, BW_80), + VHT_GROUP(2, 1, BW_80), + VHT_GROUP(3, 1, BW_80), + VHT_GROUP(4, 1, BW_80), + + VHT_GROUP(1, 0, BW_160), + VHT_GROUP(2, 0, BW_160), + VHT_GROUP(3, 0, BW_160), + VHT_GROUP(4, 0, BW_160), + + VHT_GROUP(1, 1, BW_160), + VHT_GROUP(2, 1, BW_160), + VHT_GROUP(3, 1, BW_160), + VHT_GROUP(4, 1, BW_160), + + HE_GROUP(1, HE_GI_08, BW_20), + HE_GROUP(2, HE_GI_08, BW_20), + HE_GROUP(3, HE_GI_08, BW_20), + HE_GROUP(4, HE_GI_08, BW_20), + HE_GROUP(5, HE_GI_08, BW_20), + HE_GROUP(6, HE_GI_08, BW_20), + HE_GROUP(7, HE_GI_08, BW_20), + HE_GROUP(8, HE_GI_08, BW_20), + + HE_GROUP(1, HE_GI_16, BW_20), + HE_GROUP(2, HE_GI_16, BW_20), + HE_GROUP(3, HE_GI_16, BW_20), + HE_GROUP(4, HE_GI_16, BW_20), + HE_GROUP(5, HE_GI_16, BW_20), + HE_GROUP(6, HE_GI_16, BW_20), + HE_GROUP(7, HE_GI_16, BW_20), + HE_GROUP(8, HE_GI_16, BW_20), + + HE_GROUP(1, HE_GI_32, BW_20), + HE_GROUP(2, HE_GI_32, BW_20), + HE_GROUP(3, HE_GI_32, BW_20), + HE_GROUP(4, HE_GI_32, BW_20), + HE_GROUP(5, HE_GI_32, BW_20), + HE_GROUP(6, HE_GI_32, BW_20), + HE_GROUP(7, HE_GI_32, BW_20), + HE_GROUP(8, HE_GI_32, BW_20), + + HE_GROUP(1, HE_GI_08, BW_40), + HE_GROUP(2, HE_GI_08, BW_40), + HE_GROUP(3, HE_GI_08, BW_40), + HE_GROUP(4, HE_GI_08, BW_40), + HE_GROUP(5, HE_GI_08, BW_40), + HE_GROUP(6, HE_GI_08, BW_40), + HE_GROUP(7, HE_GI_08, BW_40), + HE_GROUP(8, HE_GI_08, BW_40), + + HE_GROUP(1, HE_GI_16, BW_40), + HE_GROUP(2, HE_GI_16, BW_40), + HE_GROUP(3, HE_GI_16, BW_40), + HE_GROUP(4, HE_GI_16, BW_40), + HE_GROUP(5, HE_GI_16, BW_40), + HE_GROUP(6, HE_GI_16, BW_40), + HE_GROUP(7, HE_GI_16, BW_40), + HE_GROUP(8, HE_GI_16, BW_40), + + HE_GROUP(1, HE_GI_32, BW_40), + HE_GROUP(2, HE_GI_32, BW_40), + HE_GROUP(3, HE_GI_32, BW_40), + HE_GROUP(4, HE_GI_32, BW_40), + HE_GROUP(5, HE_GI_32, BW_40), + HE_GROUP(6, HE_GI_32, BW_40), + HE_GROUP(7, HE_GI_32, BW_40), + HE_GROUP(8, HE_GI_32, BW_40), + + HE_GROUP(1, HE_GI_08, BW_80), + HE_GROUP(2, HE_GI_08, BW_80), + HE_GROUP(3, HE_GI_08, BW_80), + HE_GROUP(4, HE_GI_08, BW_80), + HE_GROUP(5, HE_GI_08, BW_80), + HE_GROUP(6, HE_GI_08, BW_80), + HE_GROUP(7, HE_GI_08, BW_80), + HE_GROUP(8, HE_GI_08, BW_80), + + HE_GROUP(1, HE_GI_16, BW_80), + HE_GROUP(2, HE_GI_16, BW_80), + HE_GROUP(3, HE_GI_16, BW_80), + HE_GROUP(4, HE_GI_16, BW_80), + HE_GROUP(5, HE_GI_16, BW_80), + HE_GROUP(6, HE_GI_16, BW_80), + HE_GROUP(7, HE_GI_16, BW_80), + HE_GROUP(8, HE_GI_16, BW_80), + + HE_GROUP(1, HE_GI_32, BW_80), + HE_GROUP(2, HE_GI_32, BW_80), + HE_GROUP(3, HE_GI_32, BW_80), + HE_GROUP(4, HE_GI_32, BW_80), + HE_GROUP(5, HE_GI_32, BW_80), + HE_GROUP(6, HE_GI_32, BW_80), + HE_GROUP(7, HE_GI_32, BW_80), + HE_GROUP(8, HE_GI_32, BW_80), + + HE_GROUP(1, HE_GI_08, BW_160), + HE_GROUP(2, HE_GI_08, BW_160), + HE_GROUP(3, HE_GI_08, BW_160), + HE_GROUP(4, HE_GI_08, BW_160), + HE_GROUP(5, HE_GI_08, BW_160), + HE_GROUP(6, HE_GI_08, BW_160), + HE_GROUP(7, HE_GI_08, BW_160), + HE_GROUP(8, HE_GI_08, BW_160), + + HE_GROUP(1, HE_GI_16, BW_160), + HE_GROUP(2, HE_GI_16, BW_160), + HE_GROUP(3, HE_GI_16, BW_160), + HE_GROUP(4, HE_GI_16, BW_160), + HE_GROUP(5, HE_GI_16, BW_160), + HE_GROUP(6, HE_GI_16, BW_160), + HE_GROUP(7, HE_GI_16, BW_160), + HE_GROUP(8, HE_GI_16, BW_160), + + HE_GROUP(1, HE_GI_32, BW_160), + HE_GROUP(2, HE_GI_32, BW_160), + HE_GROUP(3, HE_GI_32, BW_160), + HE_GROUP(4, HE_GI_32, BW_160), + HE_GROUP(5, HE_GI_32, BW_160), + HE_GROUP(6, HE_GI_32, BW_160), + HE_GROUP(7, HE_GI_32, BW_160), + HE_GROUP(8, HE_GI_32, BW_160), +}; + +static u32 +ieee80211_calc_legacy_rate_duration(u16 bitrate, bool short_pre, + bool cck, int len) +{ + u32 duration; + + if (cck) { + duration = 144 + 48; /* preamble + PLCP */ + if (short_pre) + duration >>= 1; + + duration += 10; /* SIFS */ + } else { + duration = 20 + 16; /* premable + SIFS */ + } + + len <<= 3; + duration += (len * 10) / bitrate; + + return duration; +} + +static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw, + struct ieee80211_rx_status *status, + u32 *overhead) +{ + bool sgi = status->enc_flags & RX_ENC_FLAG_SHORT_GI; + int bw, streams; + int group, idx; + u32 duration; + + switch (status->bw) { + case RATE_INFO_BW_20: + bw = BW_20; + break; + case RATE_INFO_BW_40: + bw = BW_40; + break; + case RATE_INFO_BW_80: + bw = BW_80; + break; + case RATE_INFO_BW_160: + bw = BW_160; + break; + default: + WARN_ON_ONCE(1); + return 0; + } + + switch (status->encoding) { + case RX_ENC_VHT: + streams = status->nss; + idx = status->rate_idx; + group = VHT_GROUP_IDX(streams, sgi, bw); + break; + case RX_ENC_HT: + streams = ((status->rate_idx >> 3) & 3) + 1; + idx = status->rate_idx & 7; + group = HT_GROUP_IDX(streams, sgi, bw); + break; + case RX_ENC_HE: + streams = status->nss; + idx = status->rate_idx; + group = HE_GROUP_IDX(streams, status->he_gi, bw); + break; + default: + WARN_ON_ONCE(1); + return 0; + } + + if (WARN_ON_ONCE((status->encoding != RX_ENC_HE && streams > 4) || + (status->encoding == RX_ENC_HE && streams > 8))) + return 0; + + if (idx >= MCS_GROUP_RATES) + return 0; + + duration = airtime_mcs_groups[group].duration[idx]; + duration <<= airtime_mcs_groups[group].shift; + *overhead = 36 + (streams << 2); + + return duration; +} + + +u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw, + struct ieee80211_rx_status *status, + int len) +{ + struct ieee80211_supported_band *sband; + u32 duration, overhead = 0; + + if (status->encoding == RX_ENC_LEGACY) { + const struct ieee80211_rate *rate; + bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE; + bool cck; + + /* on 60GHz or sub-1GHz band, there are no legacy rates */ + if (WARN_ON_ONCE(status->band == NL80211_BAND_60GHZ || + status->band == NL80211_BAND_S1GHZ)) + return 0; + + sband = hw->wiphy->bands[status->band]; + if (!sband || status->rate_idx >= sband->n_bitrates) + return 0; + + rate = &sband->bitrates[status->rate_idx]; + cck = rate->flags & IEEE80211_RATE_MANDATORY_B; + + return ieee80211_calc_legacy_rate_duration(rate->bitrate, sp, + cck, len); + } + + duration = ieee80211_get_rate_duration(hw, status, &overhead); + if (!duration) + return 0; + + duration *= len; + duration /= AVG_PKT_SIZE; + duration /= 1024; + + return duration + overhead; +} +EXPORT_SYMBOL_GPL(ieee80211_calc_rx_airtime); + +static bool ieee80211_fill_rate_info(struct ieee80211_hw *hw, + struct ieee80211_rx_status *stat, u8 band, + struct rate_info *ri) +{ + struct ieee80211_supported_band *sband = hw->wiphy->bands[band]; + int i; + + if (!ri || !sband) + return false; + + stat->bw = ri->bw; + stat->nss = ri->nss; + stat->rate_idx = ri->mcs; + + if (ri->flags & RATE_INFO_FLAGS_HE_MCS) + stat->encoding = RX_ENC_HE; + else if (ri->flags & RATE_INFO_FLAGS_VHT_MCS) + stat->encoding = RX_ENC_VHT; + else if (ri->flags & RATE_INFO_FLAGS_MCS) + stat->encoding = RX_ENC_HT; + else + stat->encoding = RX_ENC_LEGACY; + + if (ri->flags & RATE_INFO_FLAGS_SHORT_GI) + stat->enc_flags |= RX_ENC_FLAG_SHORT_GI; + + stat->he_gi = ri->he_gi; + + if (stat->encoding != RX_ENC_LEGACY) + return true; + + stat->rate_idx = 0; + for (i = 0; i < sband->n_bitrates; i++) { + if (ri->legacy != sband->bitrates[i].bitrate) + continue; + + stat->rate_idx = i; + return true; + } + + return false; +} + +static int ieee80211_fill_rx_status(struct ieee80211_rx_status *stat, + struct ieee80211_hw *hw, + struct ieee80211_tx_rate *rate, + struct rate_info *ri, u8 band, int len) +{ + memset(stat, 0, sizeof(*stat)); + stat->band = band; + + if (ieee80211_fill_rate_info(hw, stat, band, ri)) + return 0; + + if (rate->idx < 0 || !rate->count) + return -1; + + if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) + stat->bw = RATE_INFO_BW_160; + else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + stat->bw = RATE_INFO_BW_80; + else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + stat->bw = RATE_INFO_BW_40; + else + stat->bw = RATE_INFO_BW_20; + + stat->enc_flags = 0; + if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + stat->enc_flags |= RX_ENC_FLAG_SHORTPRE; + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + stat->enc_flags |= RX_ENC_FLAG_SHORT_GI; + + stat->rate_idx = rate->idx; + if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + stat->encoding = RX_ENC_VHT; + stat->rate_idx = ieee80211_rate_get_vht_mcs(rate); + stat->nss = ieee80211_rate_get_vht_nss(rate); + } else if (rate->flags & IEEE80211_TX_RC_MCS) { + stat->encoding = RX_ENC_HT; + } else { + stat->encoding = RX_ENC_LEGACY; + } + + return 0; +} + +static u32 ieee80211_calc_tx_airtime_rate(struct ieee80211_hw *hw, + struct ieee80211_tx_rate *rate, + struct rate_info *ri, + u8 band, int len) +{ + struct ieee80211_rx_status stat; + + if (ieee80211_fill_rx_status(&stat, hw, rate, ri, band, len)) + return 0; + + return ieee80211_calc_rx_airtime(hw, &stat, len); +} + +u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw, + struct ieee80211_tx_info *info, + int len) +{ + u32 duration = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) { + struct ieee80211_tx_rate *rate = &info->status.rates[i]; + u32 cur_duration; + + cur_duration = ieee80211_calc_tx_airtime_rate(hw, rate, NULL, + info->band, len); + if (!cur_duration) + break; + + duration += cur_duration * rate->count; + } + + return duration; +} +EXPORT_SYMBOL_GPL(ieee80211_calc_tx_airtime); + +u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *pubsta, + int len, bool ampdu) +{ + struct ieee80211_supported_band *sband; + struct ieee80211_chanctx_conf *conf; + int rateidx, shift = 0; + bool cck, short_pream; + u32 basic_rates; + u8 band = 0; + u16 rate; + + len += 38; /* Ethernet header length */ + + conf = rcu_dereference(vif->bss_conf.chanctx_conf); + if (conf) { + band = conf->def.chan->band; + shift = ieee80211_chandef_get_shift(&conf->def); + } + + if (pubsta) { + struct sta_info *sta = container_of(pubsta, struct sta_info, + sta); + struct ieee80211_rx_status stat; + struct ieee80211_tx_rate *tx_rate = &sta->deflink.tx_stats.last_rate; + struct rate_info *ri = &sta->deflink.tx_stats.last_rate_info; + u32 duration, overhead; + u8 agg_shift; + + if (ieee80211_fill_rx_status(&stat, hw, tx_rate, ri, band, len)) + return 0; + + if (stat.encoding == RX_ENC_LEGACY || !ampdu) + return ieee80211_calc_rx_airtime(hw, &stat, len); + + duration = ieee80211_get_rate_duration(hw, &stat, &overhead); + /* + * Assume that HT/VHT transmission on any AC except VO will + * use aggregation. Since we don't have reliable reporting + * of aggregation length, assume an average size based on the + * tx rate. + * This will not be very accurate, but much better than simply + * assuming un-aggregated tx in all cases. + */ + if (duration > 400 * 1024) /* <= VHT20 MCS2 1S */ + agg_shift = 1; + else if (duration > 250 * 1024) /* <= VHT20 MCS3 1S or MCS1 2S */ + agg_shift = 2; + else if (duration > 150 * 1024) /* <= VHT20 MCS5 1S or MCS2 2S */ + agg_shift = 3; + else if (duration > 70 * 1024) /* <= VHT20 MCS5 2S */ + agg_shift = 4; + else if (stat.encoding != RX_ENC_HE || + duration > 20 * 1024) /* <= HE40 MCS6 2S */ + agg_shift = 5; + else + agg_shift = 6; + + duration *= len; + duration /= AVG_PKT_SIZE; + duration /= 1024; + duration += (overhead >> agg_shift); + + return max_t(u32, duration, 4); + } + + if (!conf) + return 0; + + /* No station to get latest rate from, so calculate the worst-case + * duration using the lowest configured basic rate. + */ + sband = hw->wiphy->bands[band]; + + basic_rates = vif->bss_conf.basic_rates; + short_pream = vif->bss_conf.use_short_preamble; + + rateidx = basic_rates ? ffs(basic_rates) - 1 : 0; + rate = sband->bitrates[rateidx].bitrate << shift; + cck = sband->bitrates[rateidx].flags & IEEE80211_RATE_MANDATORY_B; + + return ieee80211_calc_legacy_rate_duration(rate, short_pream, cck, len); +} diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c new file mode 100644 index 0000000000..f7cb50b0dd --- /dev/null +++ b/net/mac80211/cfg.c @@ -0,0 +1,5143 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * mac80211 configuration hooks for cfg80211 + * + * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2022 Intel Corporation + */ + +#include <linux/ieee80211.h> +#include <linux/nl80211.h> +#include <linux/rtnetlink.h> +#include <linux/slab.h> +#include <net/net_namespace.h> +#include <linux/rcupdate.h> +#include <linux/fips.h> +#include <linux/if_ether.h> +#include <net/cfg80211.h> +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" +#include "mesh.h" +#include "wme.h" + +static struct ieee80211_link_data * +ieee80211_link_or_deflink(struct ieee80211_sub_if_data *sdata, int link_id, + bool require_valid) +{ + struct ieee80211_link_data *link; + + if (link_id < 0) { + /* + * For keys, if sdata is not an MLD, we might not use + * the return value at all (if it's not a pairwise key), + * so in that case (require_valid==false) don't error. + */ + if (require_valid && ieee80211_vif_is_mld(&sdata->vif)) + return ERR_PTR(-EINVAL); + + return &sdata->deflink; + } + + link = sdata_dereference(sdata->link[link_id], sdata); + if (!link) + return ERR_PTR(-ENOLINK); + return link; +} + +static void ieee80211_set_mu_mimo_follow(struct ieee80211_sub_if_data *sdata, + struct vif_params *params) +{ + bool mu_mimo_groups = false; + bool mu_mimo_follow = false; + + if (params->vht_mumimo_groups) { + u64 membership; + + BUILD_BUG_ON(sizeof(membership) != WLAN_MEMBERSHIP_LEN); + + memcpy(sdata->vif.bss_conf.mu_group.membership, + params->vht_mumimo_groups, WLAN_MEMBERSHIP_LEN); + memcpy(sdata->vif.bss_conf.mu_group.position, + params->vht_mumimo_groups + WLAN_MEMBERSHIP_LEN, + WLAN_USER_POSITION_LEN); + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_MU_GROUPS); + /* don't care about endianness - just check for 0 */ + memcpy(&membership, params->vht_mumimo_groups, + WLAN_MEMBERSHIP_LEN); + mu_mimo_groups = membership != 0; + } + + if (params->vht_mumimo_follow_addr) { + mu_mimo_follow = + is_valid_ether_addr(params->vht_mumimo_follow_addr); + ether_addr_copy(sdata->u.mntr.mu_follow_addr, + params->vht_mumimo_follow_addr); + } + + sdata->vif.bss_conf.mu_mimo_owner = mu_mimo_groups || mu_mimo_follow; +} + +static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata, + struct vif_params *params) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_sub_if_data *monitor_sdata; + + /* check flags first */ + if (params->flags && ieee80211_sdata_running(sdata)) { + u32 mask = MONITOR_FLAG_COOK_FRAMES | MONITOR_FLAG_ACTIVE; + + /* + * Prohibit MONITOR_FLAG_COOK_FRAMES and + * MONITOR_FLAG_ACTIVE to be changed while the + * interface is up. + * Else we would need to add a lot of cruft + * to update everything: + * cooked_mntrs, monitor and all fif_* counters + * reconfigure hardware + */ + if ((params->flags & mask) != (sdata->u.mntr.flags & mask)) + return -EBUSY; + } + + /* also validate MU-MIMO change */ + monitor_sdata = wiphy_dereference(local->hw.wiphy, + local->monitor_sdata); + + if (!monitor_sdata && + (params->vht_mumimo_groups || params->vht_mumimo_follow_addr)) + return -EOPNOTSUPP; + + /* apply all changes now - no failures allowed */ + + if (monitor_sdata) + ieee80211_set_mu_mimo_follow(monitor_sdata, params); + + if (params->flags) { + if (ieee80211_sdata_running(sdata)) { + ieee80211_adjust_monitor_flags(sdata, -1); + sdata->u.mntr.flags = params->flags; + ieee80211_adjust_monitor_flags(sdata, 1); + + ieee80211_configure_filter(local); + } else { + /* + * Because the interface is down, ieee80211_do_stop + * and ieee80211_do_open take care of "everything" + * mentioned in the comment above. + */ + sdata->u.mntr.flags = params->flags; + } + } + + return 0; +} + +static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata, + struct cfg80211_mbssid_config params, + struct ieee80211_bss_conf *link_conf) +{ + struct ieee80211_sub_if_data *tx_sdata; + + sdata->vif.mbssid_tx_vif = NULL; + link_conf->bssid_index = 0; + link_conf->nontransmitted = false; + link_conf->ema_ap = false; + link_conf->bssid_indicator = 0; + + if (sdata->vif.type != NL80211_IFTYPE_AP || !params.tx_wdev) + return -EINVAL; + + tx_sdata = IEEE80211_WDEV_TO_SUB_IF(params.tx_wdev); + if (!tx_sdata) + return -EINVAL; + + if (tx_sdata == sdata) { + sdata->vif.mbssid_tx_vif = &sdata->vif; + } else { + sdata->vif.mbssid_tx_vif = &tx_sdata->vif; + link_conf->nontransmitted = true; + link_conf->bssid_index = params.index; + } + if (params.ema) + link_conf->ema_ap = true; + + return 0; +} + +static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy, + const char *name, + unsigned char name_assign_type, + enum nl80211_iftype type, + struct vif_params *params) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct wireless_dev *wdev; + struct ieee80211_sub_if_data *sdata; + int err; + + err = ieee80211_if_add(local, name, name_assign_type, &wdev, type, params); + if (err) + return ERR_PTR(err); + + sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + + if (type == NL80211_IFTYPE_MONITOR) { + err = ieee80211_set_mon_options(sdata, params); + if (err) { + ieee80211_if_remove(sdata); + return NULL; + } + } + + return wdev; +} + +static int ieee80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + ieee80211_if_remove(IEEE80211_WDEV_TO_SUB_IF(wdev)); + + return 0; +} + +static int ieee80211_change_iface(struct wiphy *wiphy, + struct net_device *dev, + enum nl80211_iftype type, + struct vif_params *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + int ret; + + ret = ieee80211_if_change_type(sdata, type); + if (ret) + return ret; + + if (type == NL80211_IFTYPE_AP_VLAN && params->use_4addr == 0) { + RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); + ieee80211_check_fast_rx_iface(sdata); + } else if (type == NL80211_IFTYPE_STATION && params->use_4addr >= 0) { + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + if (params->use_4addr == ifmgd->use_4addr) + return 0; + + /* FIXME: no support for 4-addr MLO yet */ + if (ieee80211_vif_is_mld(&sdata->vif)) + return -EOPNOTSUPP; + + sdata->u.mgd.use_4addr = params->use_4addr; + if (!ifmgd->associated) + return 0; + + mutex_lock(&local->sta_mtx); + sta = sta_info_get(sdata, sdata->deflink.u.mgd.bssid); + if (sta) + drv_sta_set_4addr(local, sdata, &sta->sta, + params->use_4addr); + mutex_unlock(&local->sta_mtx); + + if (params->use_4addr) + ieee80211_send_4addr_nullfunc(local, sdata); + } + + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { + ret = ieee80211_set_mon_options(sdata, params); + if (ret) + return ret; + } + + return 0; +} + +static int ieee80211_start_p2p_device(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + int ret; + + mutex_lock(&sdata->local->chanctx_mtx); + ret = ieee80211_check_combinations(sdata, NULL, 0, 0); + mutex_unlock(&sdata->local->chanctx_mtx); + if (ret < 0) + return ret; + + return ieee80211_do_open(wdev, true); +} + +static void ieee80211_stop_p2p_device(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + ieee80211_sdata_stop(IEEE80211_WDEV_TO_SUB_IF(wdev)); +} + +static int ieee80211_start_nan(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct cfg80211_nan_conf *conf) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + int ret; + + mutex_lock(&sdata->local->chanctx_mtx); + ret = ieee80211_check_combinations(sdata, NULL, 0, 0); + mutex_unlock(&sdata->local->chanctx_mtx); + if (ret < 0) + return ret; + + ret = ieee80211_do_open(wdev, true); + if (ret) + return ret; + + ret = drv_start_nan(sdata->local, sdata, conf); + if (ret) + ieee80211_sdata_stop(sdata); + + sdata->u.nan.conf = *conf; + + return ret; +} + +static void ieee80211_stop_nan(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + + drv_stop_nan(sdata->local, sdata); + ieee80211_sdata_stop(sdata); +} + +static int ieee80211_nan_change_conf(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct cfg80211_nan_conf *conf, + u32 changes) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + struct cfg80211_nan_conf new_conf; + int ret = 0; + + if (sdata->vif.type != NL80211_IFTYPE_NAN) + return -EOPNOTSUPP; + + if (!ieee80211_sdata_running(sdata)) + return -ENETDOWN; + + new_conf = sdata->u.nan.conf; + + if (changes & CFG80211_NAN_CONF_CHANGED_PREF) + new_conf.master_pref = conf->master_pref; + + if (changes & CFG80211_NAN_CONF_CHANGED_BANDS) + new_conf.bands = conf->bands; + + ret = drv_nan_change_conf(sdata->local, sdata, &new_conf, changes); + if (!ret) + sdata->u.nan.conf = new_conf; + + return ret; +} + +static int ieee80211_add_nan_func(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct cfg80211_nan_func *nan_func) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + int ret; + + if (sdata->vif.type != NL80211_IFTYPE_NAN) + return -EOPNOTSUPP; + + if (!ieee80211_sdata_running(sdata)) + return -ENETDOWN; + + spin_lock_bh(&sdata->u.nan.func_lock); + + ret = idr_alloc(&sdata->u.nan.function_inst_ids, + nan_func, 1, sdata->local->hw.max_nan_de_entries + 1, + GFP_ATOMIC); + spin_unlock_bh(&sdata->u.nan.func_lock); + + if (ret < 0) + return ret; + + nan_func->instance_id = ret; + + WARN_ON(nan_func->instance_id == 0); + + ret = drv_add_nan_func(sdata->local, sdata, nan_func); + if (ret) { + spin_lock_bh(&sdata->u.nan.func_lock); + idr_remove(&sdata->u.nan.function_inst_ids, + nan_func->instance_id); + spin_unlock_bh(&sdata->u.nan.func_lock); + } + + return ret; +} + +static struct cfg80211_nan_func * +ieee80211_find_nan_func_by_cookie(struct ieee80211_sub_if_data *sdata, + u64 cookie) +{ + struct cfg80211_nan_func *func; + int id; + + lockdep_assert_held(&sdata->u.nan.func_lock); + + idr_for_each_entry(&sdata->u.nan.function_inst_ids, func, id) { + if (func->cookie == cookie) + return func; + } + + return NULL; +} + +static void ieee80211_del_nan_func(struct wiphy *wiphy, + struct wireless_dev *wdev, u64 cookie) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + struct cfg80211_nan_func *func; + u8 instance_id = 0; + + if (sdata->vif.type != NL80211_IFTYPE_NAN || + !ieee80211_sdata_running(sdata)) + return; + + spin_lock_bh(&sdata->u.nan.func_lock); + + func = ieee80211_find_nan_func_by_cookie(sdata, cookie); + if (func) + instance_id = func->instance_id; + + spin_unlock_bh(&sdata->u.nan.func_lock); + + if (instance_id) + drv_del_nan_func(sdata->local, sdata, instance_id); +} + +static int ieee80211_set_noack_map(struct wiphy *wiphy, + struct net_device *dev, + u16 noack_map) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + sdata->noack_map = noack_map; + + ieee80211_check_fast_xmit_iface(sdata); + + return 0; +} + +static int ieee80211_set_tx(struct ieee80211_sub_if_data *sdata, + const u8 *mac_addr, u8 key_idx) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_key *key; + struct sta_info *sta; + int ret = -EINVAL; + + if (!wiphy_ext_feature_isset(local->hw.wiphy, + NL80211_EXT_FEATURE_EXT_KEY_ID)) + return -EINVAL; + + sta = sta_info_get_bss(sdata, mac_addr); + + if (!sta) + return -EINVAL; + + if (sta->ptk_idx == key_idx) + return 0; + + mutex_lock(&local->key_mtx); + key = key_mtx_dereference(local, sta->ptk[key_idx]); + + if (key && key->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX) + ret = ieee80211_set_tx_key(key); + + mutex_unlock(&local->key_mtx); + return ret; +} + +static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, + int link_id, u8 key_idx, bool pairwise, + const u8 *mac_addr, struct key_params *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_link_data *link = + ieee80211_link_or_deflink(sdata, link_id, false); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta = NULL; + struct ieee80211_key *key; + int err; + + if (!ieee80211_sdata_running(sdata)) + return -ENETDOWN; + + if (IS_ERR(link)) + return PTR_ERR(link); + + if (pairwise && params->mode == NL80211_KEY_SET_TX) + return ieee80211_set_tx(sdata, mac_addr, key_idx); + + /* reject WEP and TKIP keys if WEP failed to initialize */ + switch (params->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_WEP104: + if (link_id >= 0) + return -EINVAL; + if (WARN_ON_ONCE(fips_enabled)) + return -EINVAL; + break; + default: + break; + } + + key = ieee80211_key_alloc(params->cipher, key_idx, params->key_len, + params->key, params->seq_len, params->seq); + if (IS_ERR(key)) + return PTR_ERR(key); + + key->conf.link_id = link_id; + + if (pairwise) + key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE; + + if (params->mode == NL80211_KEY_NO_TX) + key->conf.flags |= IEEE80211_KEY_FLAG_NO_AUTO_TX; + + mutex_lock(&local->sta_mtx); + + if (mac_addr) { + sta = sta_info_get_bss(sdata, mac_addr); + /* + * The ASSOC test makes sure the driver is ready to + * receive the key. When wpa_supplicant has roamed + * using FT, it attempts to set the key before + * association has completed, this rejects that attempt + * so it will set the key again after association. + * + * TODO: accept the key if we have a station entry and + * add it to the device after the station. + */ + if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) { + ieee80211_key_free_unused(key); + err = -ENOENT; + goto out_unlock; + } + } + + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED) + key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + /* Keys without a station are used for TX only */ + if (sta && test_sta_flag(sta, WLAN_STA_MFP)) + key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; + break; + case NL80211_IFTYPE_ADHOC: + /* no MFP (yet) */ + break; + case NL80211_IFTYPE_MESH_POINT: +#ifdef CONFIG_MAC80211_MESH + if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE) + key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; + break; +#endif + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_P2P_DEVICE: + case NL80211_IFTYPE_NAN: + case NL80211_IFTYPE_UNSPECIFIED: + case NUM_NL80211_IFTYPES: + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_OCB: + /* shouldn't happen */ + WARN_ON_ONCE(1); + break; + } + + err = ieee80211_key_link(key, link, sta); + /* KRACK protection, shouldn't happen but just silently accept key */ + if (err == -EALREADY) + err = 0; + + out_unlock: + mutex_unlock(&local->sta_mtx); + + return err; +} + +static struct ieee80211_key * +ieee80211_lookup_key(struct ieee80211_sub_if_data *sdata, int link_id, + u8 key_idx, bool pairwise, const u8 *mac_addr) +{ + struct ieee80211_local *local __maybe_unused = sdata->local; + struct ieee80211_link_data *link = &sdata->deflink; + struct ieee80211_key *key; + + if (link_id >= 0) { + link = rcu_dereference_check(sdata->link[link_id], + lockdep_is_held(&sdata->wdev.mtx)); + if (!link) + return NULL; + } + + if (mac_addr) { + struct sta_info *sta; + struct link_sta_info *link_sta; + + sta = sta_info_get_bss(sdata, mac_addr); + if (!sta) + return NULL; + + if (link_id >= 0) { + link_sta = rcu_dereference_check(sta->link[link_id], + lockdep_is_held(&local->sta_mtx)); + if (!link_sta) + return NULL; + } else { + link_sta = &sta->deflink; + } + + if (pairwise && key_idx < NUM_DEFAULT_KEYS) + return rcu_dereference_check_key_mtx(local, + sta->ptk[key_idx]); + + if (!pairwise && + key_idx < NUM_DEFAULT_KEYS + + NUM_DEFAULT_MGMT_KEYS + + NUM_DEFAULT_BEACON_KEYS) + return rcu_dereference_check_key_mtx(local, + link_sta->gtk[key_idx]); + + return NULL; + } + + if (pairwise && key_idx < NUM_DEFAULT_KEYS) + return rcu_dereference_check_key_mtx(local, + sdata->keys[key_idx]); + + key = rcu_dereference_check_key_mtx(local, link->gtk[key_idx]); + if (key) + return key; + + /* or maybe it was a WEP key */ + if (key_idx < NUM_DEFAULT_KEYS) + return rcu_dereference_check_key_mtx(local, sdata->keys[key_idx]); + + return NULL; +} + +static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, + int link_id, u8 key_idx, bool pairwise, + const u8 *mac_addr) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct ieee80211_key *key; + int ret; + + mutex_lock(&local->sta_mtx); + mutex_lock(&local->key_mtx); + + key = ieee80211_lookup_key(sdata, link_id, key_idx, pairwise, mac_addr); + if (!key) { + ret = -ENOENT; + goto out_unlock; + } + + ieee80211_key_free(key, sdata->vif.type == NL80211_IFTYPE_STATION); + + ret = 0; + out_unlock: + mutex_unlock(&local->key_mtx); + mutex_unlock(&local->sta_mtx); + + return ret; +} + +static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, + int link_id, u8 key_idx, bool pairwise, + const u8 *mac_addr, void *cookie, + void (*callback)(void *cookie, + struct key_params *params)) +{ + struct ieee80211_sub_if_data *sdata; + u8 seq[6] = {0}; + struct key_params params; + struct ieee80211_key *key; + u64 pn64; + u32 iv32; + u16 iv16; + int err = -ENOENT; + struct ieee80211_key_seq kseq = {}; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + rcu_read_lock(); + + key = ieee80211_lookup_key(sdata, link_id, key_idx, pairwise, mac_addr); + if (!key) + goto out; + + memset(¶ms, 0, sizeof(params)); + + params.cipher = key->conf.cipher; + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_TKIP: + pn64 = atomic64_read(&key->conf.tx_pn); + iv32 = TKIP_PN_TO_IV32(pn64); + iv16 = TKIP_PN_TO_IV16(pn64); + + if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && + !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { + drv_get_key_seq(sdata->local, key, &kseq); + iv32 = kseq.tkip.iv32; + iv16 = kseq.tkip.iv16; + } + + seq[0] = iv16 & 0xff; + seq[1] = (iv16 >> 8) & 0xff; + seq[2] = iv32 & 0xff; + seq[3] = (iv32 >> 8) & 0xff; + seq[4] = (iv32 >> 16) & 0xff; + seq[5] = (iv32 >> 24) & 0xff; + params.seq = seq; + params.seq_len = 6; + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) != + offsetof(typeof(kseq), aes_cmac)); + fallthrough; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) != + offsetof(typeof(kseq), aes_gmac)); + fallthrough; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) != + offsetof(typeof(kseq), gcmp)); + + if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && + !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { + drv_get_key_seq(sdata->local, key, &kseq); + memcpy(seq, kseq.ccmp.pn, 6); + } else { + pn64 = atomic64_read(&key->conf.tx_pn); + seq[0] = pn64; + seq[1] = pn64 >> 8; + seq[2] = pn64 >> 16; + seq[3] = pn64 >> 24; + seq[4] = pn64 >> 32; + seq[5] = pn64 >> 40; + } + params.seq = seq; + params.seq_len = 6; + break; + default: + if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) + break; + if (WARN_ON(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) + break; + drv_get_key_seq(sdata->local, key, &kseq); + params.seq = kseq.hw.seq; + params.seq_len = kseq.hw.seq_len; + break; + } + + params.key = key->conf.key; + params.key_len = key->conf.keylen; + + callback(cookie, ¶ms); + err = 0; + + out: + rcu_read_unlock(); + return err; +} + +static int ieee80211_config_default_key(struct wiphy *wiphy, + struct net_device *dev, + int link_id, u8 key_idx, bool uni, + bool multi) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_link_data *link = + ieee80211_link_or_deflink(sdata, link_id, false); + + if (IS_ERR(link)) + return PTR_ERR(link); + + ieee80211_set_default_key(link, key_idx, uni, multi); + + return 0; +} + +static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy, + struct net_device *dev, + int link_id, u8 key_idx) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_link_data *link = + ieee80211_link_or_deflink(sdata, link_id, true); + + if (IS_ERR(link)) + return PTR_ERR(link); + + ieee80211_set_default_mgmt_key(link, key_idx); + + return 0; +} + +static int ieee80211_config_default_beacon_key(struct wiphy *wiphy, + struct net_device *dev, + int link_id, u8 key_idx) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_link_data *link = + ieee80211_link_or_deflink(sdata, link_id, true); + + if (IS_ERR(link)) + return PTR_ERR(link); + + ieee80211_set_default_beacon_key(link, key_idx); + + return 0; +} + +void sta_set_rate_info_tx(struct sta_info *sta, + const struct ieee80211_tx_rate *rate, + struct rate_info *rinfo) +{ + rinfo->flags = 0; + if (rate->flags & IEEE80211_TX_RC_MCS) { + rinfo->flags |= RATE_INFO_FLAGS_MCS; + rinfo->mcs = rate->idx; + } else if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; + rinfo->mcs = ieee80211_rate_get_vht_mcs(rate); + rinfo->nss = ieee80211_rate_get_vht_nss(rate); + } else { + struct ieee80211_supported_band *sband; + int shift = ieee80211_vif_get_shift(&sta->sdata->vif); + u16 brate; + + sband = ieee80211_get_sband(sta->sdata); + WARN_ON_ONCE(sband && !sband->bitrates); + if (sband && sband->bitrates) { + brate = sband->bitrates[rate->idx].bitrate; + rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); + } + } + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + rinfo->bw = RATE_INFO_BW_40; + else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + rinfo->bw = RATE_INFO_BW_80; + else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) + rinfo->bw = RATE_INFO_BW_160; + else + rinfo->bw = RATE_INFO_BW_20; + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; +} + +static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *mac, struct station_info *sinfo) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + int ret = -ENOENT; + + mutex_lock(&local->sta_mtx); + + sta = sta_info_get_by_idx(sdata, idx); + if (sta) { + ret = 0; + memcpy(mac, sta->sta.addr, ETH_ALEN); + sta_set_sinfo(sta, sinfo, true); + } + + mutex_unlock(&local->sta_mtx); + + return ret; +} + +static int ieee80211_dump_survey(struct wiphy *wiphy, struct net_device *dev, + int idx, struct survey_info *survey) +{ + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + + return drv_get_survey(local, idx, survey); +} + +static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, + const u8 *mac, struct station_info *sinfo) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + int ret = -ENOENT; + + mutex_lock(&local->sta_mtx); + + sta = sta_info_get_bss(sdata, mac); + if (sta) { + ret = 0; + sta_set_sinfo(sta, sinfo, true); + } + + mutex_unlock(&local->sta_mtx); + + return ret; +} + +static int ieee80211_set_monitor_channel(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_sub_if_data *sdata; + int ret = 0; + + if (cfg80211_chandef_identical(&local->monitor_chandef, chandef)) + return 0; + + if (local->use_chanctx) { + sdata = wiphy_dereference(local->hw.wiphy, + local->monitor_sdata); + if (sdata) { + sdata_lock(sdata); + mutex_lock(&local->mtx); + ieee80211_link_release_channel(&sdata->deflink); + ret = ieee80211_link_use_channel(&sdata->deflink, + chandef, + IEEE80211_CHANCTX_EXCLUSIVE); + mutex_unlock(&local->mtx); + sdata_unlock(sdata); + } + } else { + mutex_lock(&local->mtx); + if (local->open_count == local->monitors) { + local->_oper_chandef = *chandef; + ieee80211_hw_config(local, 0); + } + mutex_unlock(&local->mtx); + } + + if (ret == 0) + local->monitor_chandef = *chandef; + + return ret; +} + +static int +ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, + const u8 *resp, size_t resp_len, + const struct ieee80211_csa_settings *csa, + const struct ieee80211_color_change_settings *cca, + struct ieee80211_link_data *link) +{ + struct probe_resp *new, *old; + + if (!resp || !resp_len) + return 1; + + old = sdata_dereference(link->u.ap.probe_resp, sdata); + + new = kzalloc(sizeof(struct probe_resp) + resp_len, GFP_KERNEL); + if (!new) + return -ENOMEM; + + new->len = resp_len; + memcpy(new->data, resp, resp_len); + + if (csa) + memcpy(new->cntdwn_counter_offsets, csa->counter_offsets_presp, + csa->n_counter_offsets_presp * + sizeof(new->cntdwn_counter_offsets[0])); + else if (cca) + new->cntdwn_counter_offsets[0] = cca->counter_offset_presp; + + rcu_assign_pointer(link->u.ap.probe_resp, new); + if (old) + kfree_rcu(old, rcu_head); + + return 0; +} + +static int ieee80211_set_fils_discovery(struct ieee80211_sub_if_data *sdata, + struct cfg80211_fils_discovery *params, + struct ieee80211_link_data *link, + struct ieee80211_bss_conf *link_conf) +{ + struct fils_discovery_data *new, *old = NULL; + struct ieee80211_fils_discovery *fd; + + if (!params->tmpl || !params->tmpl_len) + return -EINVAL; + + fd = &link_conf->fils_discovery; + fd->min_interval = params->min_interval; + fd->max_interval = params->max_interval; + + old = sdata_dereference(link->u.ap.fils_discovery, sdata); + new = kzalloc(sizeof(*new) + params->tmpl_len, GFP_KERNEL); + if (!new) + return -ENOMEM; + new->len = params->tmpl_len; + memcpy(new->data, params->tmpl, params->tmpl_len); + rcu_assign_pointer(link->u.ap.fils_discovery, new); + + if (old) + kfree_rcu(old, rcu_head); + + return 0; +} + +static int +ieee80211_set_unsol_bcast_probe_resp(struct ieee80211_sub_if_data *sdata, + struct cfg80211_unsol_bcast_probe_resp *params, + struct ieee80211_link_data *link, + struct ieee80211_bss_conf *link_conf) +{ + struct unsol_bcast_probe_resp_data *new, *old = NULL; + + if (!params->tmpl || !params->tmpl_len) + return -EINVAL; + + old = sdata_dereference(link->u.ap.unsol_bcast_probe_resp, sdata); + new = kzalloc(sizeof(*new) + params->tmpl_len, GFP_KERNEL); + if (!new) + return -ENOMEM; + new->len = params->tmpl_len; + memcpy(new->data, params->tmpl, params->tmpl_len); + rcu_assign_pointer(link->u.ap.unsol_bcast_probe_resp, new); + + if (old) + kfree_rcu(old, rcu_head); + + link_conf->unsol_bcast_probe_resp_interval = params->interval; + + return 0; +} + +static int ieee80211_set_ftm_responder_params( + struct ieee80211_sub_if_data *sdata, + const u8 *lci, size_t lci_len, + const u8 *civicloc, size_t civicloc_len, + struct ieee80211_bss_conf *link_conf) +{ + struct ieee80211_ftm_responder_params *new, *old; + u8 *pos; + int len; + + if (!lci_len && !civicloc_len) + return 0; + + old = link_conf->ftmr_params; + len = lci_len + civicloc_len; + + new = kzalloc(sizeof(*new) + len, GFP_KERNEL); + if (!new) + return -ENOMEM; + + pos = (u8 *)(new + 1); + if (lci_len) { + new->lci_len = lci_len; + new->lci = pos; + memcpy(pos, lci, lci_len); + pos += lci_len; + } + + if (civicloc_len) { + new->civicloc_len = civicloc_len; + new->civicloc = pos; + memcpy(pos, civicloc, civicloc_len); + pos += civicloc_len; + } + + link_conf->ftmr_params = new; + kfree(old); + + return 0; +} + +static int +ieee80211_copy_mbssid_beacon(u8 *pos, struct cfg80211_mbssid_elems *dst, + struct cfg80211_mbssid_elems *src) +{ + int i, offset = 0; + + for (i = 0; i < src->cnt; i++) { + memcpy(pos + offset, src->elem[i].data, src->elem[i].len); + dst->elem[i].len = src->elem[i].len; + dst->elem[i].data = pos + offset; + offset += dst->elem[i].len; + } + dst->cnt = src->cnt; + + return offset; +} + +static int +ieee80211_copy_rnr_beacon(u8 *pos, struct cfg80211_rnr_elems *dst, + struct cfg80211_rnr_elems *src) +{ + int i, offset = 0; + + for (i = 0; i < src->cnt; i++) { + memcpy(pos + offset, src->elem[i].data, src->elem[i].len); + dst->elem[i].len = src->elem[i].len; + dst->elem[i].data = pos + offset; + offset += dst->elem[i].len; + } + dst->cnt = src->cnt; + + return offset; +} + +static int +ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link, + struct cfg80211_beacon_data *params, + const struct ieee80211_csa_settings *csa, + const struct ieee80211_color_change_settings *cca, + u64 *changed) +{ + struct cfg80211_mbssid_elems *mbssid = NULL; + struct cfg80211_rnr_elems *rnr = NULL; + struct beacon_data *new, *old; + int new_head_len, new_tail_len; + int size, err; + u64 _changed = BSS_CHANGED_BEACON; + struct ieee80211_bss_conf *link_conf = link->conf; + + old = sdata_dereference(link->u.ap.beacon, sdata); + + /* Need to have a beacon head if we don't have one yet */ + if (!params->head && !old) + return -EINVAL; + + /* new or old head? */ + if (params->head) + new_head_len = params->head_len; + else + new_head_len = old->head_len; + + /* new or old tail? */ + if (params->tail || !old) + /* params->tail_len will be zero for !params->tail */ + new_tail_len = params->tail_len; + else + new_tail_len = old->tail_len; + + size = sizeof(*new) + new_head_len + new_tail_len; + + /* new or old multiple BSSID elements? */ + if (params->mbssid_ies) { + mbssid = params->mbssid_ies; + size += struct_size(new->mbssid_ies, elem, mbssid->cnt); + if (params->rnr_ies) { + rnr = params->rnr_ies; + size += struct_size(new->rnr_ies, elem, rnr->cnt); + } + size += ieee80211_get_mbssid_beacon_len(mbssid, rnr, + mbssid->cnt); + } else if (old && old->mbssid_ies) { + mbssid = old->mbssid_ies; + size += struct_size(new->mbssid_ies, elem, mbssid->cnt); + if (old && old->rnr_ies) { + rnr = old->rnr_ies; + size += struct_size(new->rnr_ies, elem, rnr->cnt); + } + size += ieee80211_get_mbssid_beacon_len(mbssid, rnr, + mbssid->cnt); + } + + new = kzalloc(size, GFP_KERNEL); + if (!new) + return -ENOMEM; + + /* start filling the new info now */ + + /* + * pointers go into the block we allocated, + * memory is | beacon_data | head | tail | mbssid_ies | rnr_ies + */ + new->head = ((u8 *) new) + sizeof(*new); + new->tail = new->head + new_head_len; + new->head_len = new_head_len; + new->tail_len = new_tail_len; + /* copy in optional mbssid_ies */ + if (mbssid) { + u8 *pos = new->tail + new->tail_len; + + new->mbssid_ies = (void *)pos; + pos += struct_size(new->mbssid_ies, elem, mbssid->cnt); + pos += ieee80211_copy_mbssid_beacon(pos, new->mbssid_ies, + mbssid); + if (rnr) { + new->rnr_ies = (void *)pos; + pos += struct_size(new->rnr_ies, elem, rnr->cnt); + ieee80211_copy_rnr_beacon(pos, new->rnr_ies, rnr); + } + /* update bssid_indicator */ + link_conf->bssid_indicator = + ilog2(__roundup_pow_of_two(mbssid->cnt + 1)); + } + + if (csa) { + new->cntdwn_current_counter = csa->count; + memcpy(new->cntdwn_counter_offsets, csa->counter_offsets_beacon, + csa->n_counter_offsets_beacon * + sizeof(new->cntdwn_counter_offsets[0])); + } else if (cca) { + new->cntdwn_current_counter = cca->count; + new->cntdwn_counter_offsets[0] = cca->counter_offset_beacon; + } + + /* copy in head */ + if (params->head) + memcpy(new->head, params->head, new_head_len); + else + memcpy(new->head, old->head, new_head_len); + + /* copy in optional tail */ + if (params->tail) + memcpy(new->tail, params->tail, new_tail_len); + else + if (old) + memcpy(new->tail, old->tail, new_tail_len); + + err = ieee80211_set_probe_resp(sdata, params->probe_resp, + params->probe_resp_len, csa, cca, link); + if (err < 0) { + kfree(new); + return err; + } + if (err == 0) + _changed |= BSS_CHANGED_AP_PROBE_RESP; + + if (params->ftm_responder != -1) { + link_conf->ftm_responder = params->ftm_responder; + err = ieee80211_set_ftm_responder_params(sdata, + params->lci, + params->lci_len, + params->civicloc, + params->civicloc_len, + link_conf); + + if (err < 0) { + kfree(new); + return err; + } + + _changed |= BSS_CHANGED_FTM_RESPONDER; + } + + rcu_assign_pointer(link->u.ap.beacon, new); + sdata->u.ap.active = true; + + if (old) + kfree_rcu(old, rcu_head); + + *changed |= _changed; + return 0; +} + +static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ap_settings *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct beacon_data *old; + struct ieee80211_sub_if_data *vlan; + u64 changed = BSS_CHANGED_BEACON_INT | + BSS_CHANGED_BEACON_ENABLED | + BSS_CHANGED_BEACON | + BSS_CHANGED_P2P_PS | + BSS_CHANGED_TXPOWER | + BSS_CHANGED_TWT; + int i, err; + int prev_beacon_int; + unsigned int link_id = params->beacon.link_id; + struct ieee80211_link_data *link; + struct ieee80211_bss_conf *link_conf; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (!link) + return -ENOLINK; + + link_conf = link->conf; + + old = sdata_dereference(link->u.ap.beacon, sdata); + if (old) + return -EALREADY; + + if (params->smps_mode != NL80211_SMPS_OFF) + return -ENOTSUPP; + + link->smps_mode = IEEE80211_SMPS_OFF; + + link->needed_rx_chains = sdata->local->rx_chains; + + prev_beacon_int = link_conf->beacon_int; + link_conf->beacon_int = params->beacon_interval; + + if (params->ht_cap) + link_conf->ht_ldpc = + params->ht_cap->cap_info & + cpu_to_le16(IEEE80211_HT_CAP_LDPC_CODING); + + if (params->vht_cap) { + link_conf->vht_ldpc = + params->vht_cap->vht_cap_info & + cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC); + link_conf->vht_su_beamformer = + params->vht_cap->vht_cap_info & + cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); + link_conf->vht_su_beamformee = + params->vht_cap->vht_cap_info & + cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); + link_conf->vht_mu_beamformer = + params->vht_cap->vht_cap_info & + cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE); + link_conf->vht_mu_beamformee = + params->vht_cap->vht_cap_info & + cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); + } + + if (params->he_cap && params->he_oper) { + link_conf->he_support = true; + link_conf->htc_trig_based_pkt_ext = + le32_get_bits(params->he_oper->he_oper_params, + IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK); + link_conf->frame_time_rts_th = + le32_get_bits(params->he_oper->he_oper_params, + IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK); + changed |= BSS_CHANGED_HE_OBSS_PD; + + if (params->beacon.he_bss_color.enabled) + changed |= BSS_CHANGED_HE_BSS_COLOR; + } + + if (params->he_cap) { + link_conf->he_ldpc = + params->he_cap->phy_cap_info[1] & + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD; + link_conf->he_su_beamformer = + params->he_cap->phy_cap_info[3] & + IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER; + link_conf->he_su_beamformee = + params->he_cap->phy_cap_info[4] & + IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE; + link_conf->he_mu_beamformer = + params->he_cap->phy_cap_info[4] & + IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; + link_conf->he_full_ul_mumimo = + params->he_cap->phy_cap_info[2] & + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO; + } + + if (params->eht_cap) { + if (!link_conf->he_support) + return -EOPNOTSUPP; + + link_conf->eht_support = true; + link_conf->eht_puncturing = params->punct_bitmap; + changed |= BSS_CHANGED_EHT_PUNCTURING; + + link_conf->eht_su_beamformer = + params->eht_cap->fixed.phy_cap_info[0] & + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER; + link_conf->eht_su_beamformee = + params->eht_cap->fixed.phy_cap_info[0] & + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE; + link_conf->eht_mu_beamformer = + params->eht_cap->fixed.phy_cap_info[7] & + (IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ); + } else { + link_conf->eht_su_beamformer = false; + link_conf->eht_su_beamformee = false; + link_conf->eht_mu_beamformer = false; + } + + if (sdata->vif.type == NL80211_IFTYPE_AP && + params->mbssid_config.tx_wdev) { + err = ieee80211_set_ap_mbssid_options(sdata, + params->mbssid_config, + link_conf); + if (err) + return err; + } + + mutex_lock(&local->mtx); + err = ieee80211_link_use_channel(link, ¶ms->chandef, + IEEE80211_CHANCTX_SHARED); + if (!err) + ieee80211_link_copy_chanctx_to_vlans(link, false); + mutex_unlock(&local->mtx); + if (err) { + link_conf->beacon_int = prev_beacon_int; + return err; + } + + /* + * Apply control port protocol, this allows us to + * not encrypt dynamic WEP control frames. + */ + sdata->control_port_protocol = params->crypto.control_port_ethertype; + sdata->control_port_no_encrypt = params->crypto.control_port_no_encrypt; + sdata->control_port_over_nl80211 = + params->crypto.control_port_over_nl80211; + sdata->control_port_no_preauth = + params->crypto.control_port_no_preauth; + + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { + vlan->control_port_protocol = + params->crypto.control_port_ethertype; + vlan->control_port_no_encrypt = + params->crypto.control_port_no_encrypt; + vlan->control_port_over_nl80211 = + params->crypto.control_port_over_nl80211; + vlan->control_port_no_preauth = + params->crypto.control_port_no_preauth; + } + + link_conf->dtim_period = params->dtim_period; + link_conf->enable_beacon = true; + link_conf->allow_p2p_go_ps = sdata->vif.p2p; + link_conf->twt_responder = params->twt_responder; + link_conf->he_obss_pd = params->he_obss_pd; + link_conf->he_bss_color = params->beacon.he_bss_color; + sdata->vif.cfg.s1g = params->chandef.chan->band == + NL80211_BAND_S1GHZ; + + sdata->vif.cfg.ssid_len = params->ssid_len; + if (params->ssid_len) + memcpy(sdata->vif.cfg.ssid, params->ssid, + params->ssid_len); + link_conf->hidden_ssid = + (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE); + + memset(&link_conf->p2p_noa_attr, 0, + sizeof(link_conf->p2p_noa_attr)); + link_conf->p2p_noa_attr.oppps_ctwindow = + params->p2p_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK; + if (params->p2p_opp_ps) + link_conf->p2p_noa_attr.oppps_ctwindow |= + IEEE80211_P2P_OPPPS_ENABLE_BIT; + + sdata->beacon_rate_set = false; + if (wiphy_ext_feature_isset(local->hw.wiphy, + NL80211_EXT_FEATURE_BEACON_RATE_LEGACY)) { + for (i = 0; i < NUM_NL80211_BANDS; i++) { + sdata->beacon_rateidx_mask[i] = + params->beacon_rate.control[i].legacy; + if (sdata->beacon_rateidx_mask[i]) + sdata->beacon_rate_set = true; + } + } + + if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) + link_conf->beacon_tx_rate = params->beacon_rate; + + err = ieee80211_assign_beacon(sdata, link, ¶ms->beacon, NULL, NULL, + &changed); + if (err < 0) + goto error; + + if (params->fils_discovery.max_interval) { + err = ieee80211_set_fils_discovery(sdata, + ¶ms->fils_discovery, + link, link_conf); + if (err < 0) + goto error; + changed |= BSS_CHANGED_FILS_DISCOVERY; + } + + if (params->unsol_bcast_probe_resp.interval) { + err = ieee80211_set_unsol_bcast_probe_resp(sdata, + ¶ms->unsol_bcast_probe_resp, + link, link_conf); + if (err < 0) + goto error; + changed |= BSS_CHANGED_UNSOL_BCAST_PROBE_RESP; + } + + err = drv_start_ap(sdata->local, sdata, link_conf); + if (err) { + old = sdata_dereference(link->u.ap.beacon, sdata); + + if (old) + kfree_rcu(old, rcu_head); + RCU_INIT_POINTER(link->u.ap.beacon, NULL); + sdata->u.ap.active = false; + goto error; + } + + ieee80211_recalc_dtim(local, sdata); + ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_SSID); + ieee80211_link_info_change_notify(sdata, link, changed); + + netif_carrier_on(dev); + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) + netif_carrier_on(vlan->dev); + + return 0; + +error: + mutex_lock(&local->mtx); + ieee80211_link_release_channel(link); + mutex_unlock(&local->mtx); + + return err; +} + +static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_beacon_data *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_link_data *link; + struct beacon_data *old; + int err; + struct ieee80211_bss_conf *link_conf; + u64 changed = 0; + + sdata_assert_lock(sdata); + + link = sdata_dereference(sdata->link[params->link_id], sdata); + if (!link) + return -ENOLINK; + + link_conf = link->conf; + + /* don't allow changing the beacon while a countdown is in place - offset + * of channel switch counter may change + */ + if (link_conf->csa_active || link_conf->color_change_active) + return -EBUSY; + + old = sdata_dereference(link->u.ap.beacon, sdata); + if (!old) + return -ENOENT; + + err = ieee80211_assign_beacon(sdata, link, params, NULL, NULL, + &changed); + if (err < 0) + return err; + + if (params->he_bss_color_valid && + params->he_bss_color.enabled != link_conf->he_bss_color.enabled) { + link_conf->he_bss_color.enabled = params->he_bss_color.enabled; + changed |= BSS_CHANGED_HE_BSS_COLOR; + } + + ieee80211_link_info_change_notify(sdata, link, changed); + return 0; +} + +static void ieee80211_free_next_beacon(struct ieee80211_link_data *link) +{ + if (!link->u.ap.next_beacon) + return; + + kfree(link->u.ap.next_beacon->mbssid_ies); + kfree(link->u.ap.next_beacon->rnr_ies); + kfree(link->u.ap.next_beacon); + link->u.ap.next_beacon = NULL; +} + +static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev, + unsigned int link_id) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_sub_if_data *vlan; + struct ieee80211_local *local = sdata->local; + struct beacon_data *old_beacon; + struct probe_resp *old_probe_resp; + struct fils_discovery_data *old_fils_discovery; + struct unsol_bcast_probe_resp_data *old_unsol_bcast_probe_resp; + struct cfg80211_chan_def chandef; + struct ieee80211_link_data *link = + sdata_dereference(sdata->link[link_id], sdata); + struct ieee80211_bss_conf *link_conf = link->conf; + + sdata_assert_lock(sdata); + + old_beacon = sdata_dereference(link->u.ap.beacon, sdata); + if (!old_beacon) + return -ENOENT; + old_probe_resp = sdata_dereference(link->u.ap.probe_resp, + sdata); + old_fils_discovery = sdata_dereference(link->u.ap.fils_discovery, + sdata); + old_unsol_bcast_probe_resp = + sdata_dereference(link->u.ap.unsol_bcast_probe_resp, + sdata); + + /* abort any running channel switch or color change */ + mutex_lock(&local->mtx); + link_conf->csa_active = false; + link_conf->color_change_active = false; + if (link->csa_block_tx) { + ieee80211_wake_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_CSA); + link->csa_block_tx = false; + } + + mutex_unlock(&local->mtx); + + ieee80211_free_next_beacon(link); + + /* turn off carrier for this interface and dependent VLANs */ + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) + netif_carrier_off(vlan->dev); + netif_carrier_off(dev); + + /* remove beacon and probe response */ + sdata->u.ap.active = false; + RCU_INIT_POINTER(link->u.ap.beacon, NULL); + RCU_INIT_POINTER(link->u.ap.probe_resp, NULL); + RCU_INIT_POINTER(link->u.ap.fils_discovery, NULL); + RCU_INIT_POINTER(link->u.ap.unsol_bcast_probe_resp, NULL); + kfree_rcu(old_beacon, rcu_head); + if (old_probe_resp) + kfree_rcu(old_probe_resp, rcu_head); + if (old_fils_discovery) + kfree_rcu(old_fils_discovery, rcu_head); + if (old_unsol_bcast_probe_resp) + kfree_rcu(old_unsol_bcast_probe_resp, rcu_head); + + kfree(link_conf->ftmr_params); + link_conf->ftmr_params = NULL; + + sdata->vif.mbssid_tx_vif = NULL; + link_conf->bssid_index = 0; + link_conf->nontransmitted = false; + link_conf->ema_ap = false; + link_conf->bssid_indicator = 0; + + __sta_info_flush(sdata, true); + ieee80211_free_keys(sdata, true); + + link_conf->enable_beacon = false; + sdata->beacon_rate_set = false; + sdata->vif.cfg.ssid_len = 0; + clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); + ieee80211_link_info_change_notify(sdata, link, + BSS_CHANGED_BEACON_ENABLED); + + if (sdata->wdev.cac_started) { + chandef = link_conf->chandef; + cancel_delayed_work_sync(&link->dfs_cac_timer_work); + cfg80211_cac_event(sdata->dev, &chandef, + NL80211_RADAR_CAC_ABORTED, + GFP_KERNEL); + } + + drv_stop_ap(sdata->local, sdata, link_conf); + + /* free all potentially still buffered bcast frames */ + local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); + ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf); + + mutex_lock(&local->mtx); + ieee80211_link_copy_chanctx_to_vlans(link, true); + ieee80211_link_release_channel(link); + mutex_unlock(&local->mtx); + + return 0; +} + +static int sta_apply_auth_flags(struct ieee80211_local *local, + struct sta_info *sta, + u32 mask, u32 set) +{ + int ret; + + if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED) && + set & BIT(NL80211_STA_FLAG_AUTHENTICATED) && + !test_sta_flag(sta, WLAN_STA_AUTH)) { + ret = sta_info_move_state(sta, IEEE80211_STA_AUTH); + if (ret) + return ret; + } + + if (mask & BIT(NL80211_STA_FLAG_ASSOCIATED) && + set & BIT(NL80211_STA_FLAG_ASSOCIATED) && + !test_sta_flag(sta, WLAN_STA_ASSOC)) { + /* + * When peer becomes associated, init rate control as + * well. Some drivers require rate control initialized + * before drv_sta_state() is called. + */ + if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) + rate_control_rate_init(sta); + + ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); + if (ret) + return ret; + } + + if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) { + if (set & BIT(NL80211_STA_FLAG_AUTHORIZED)) + ret = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); + else if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); + else + ret = 0; + if (ret) + return ret; + } + + if (mask & BIT(NL80211_STA_FLAG_ASSOCIATED) && + !(set & BIT(NL80211_STA_FLAG_ASSOCIATED)) && + test_sta_flag(sta, WLAN_STA_ASSOC)) { + ret = sta_info_move_state(sta, IEEE80211_STA_AUTH); + if (ret) + return ret; + } + + if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED) && + !(set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) && + test_sta_flag(sta, WLAN_STA_AUTH)) { + ret = sta_info_move_state(sta, IEEE80211_STA_NONE); + if (ret) + return ret; + } + + return 0; +} + +static void sta_apply_mesh_params(struct ieee80211_local *local, + struct sta_info *sta, + struct station_parameters *params) +{ +#ifdef CONFIG_MAC80211_MESH + struct ieee80211_sub_if_data *sdata = sta->sdata; + u64 changed = 0; + + if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) { + switch (params->plink_state) { + case NL80211_PLINK_ESTAB: + if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) + changed = mesh_plink_inc_estab_count(sdata); + sta->mesh->plink_state = params->plink_state; + sta->mesh->aid = params->peer_aid; + + ieee80211_mps_sta_status_update(sta); + changed |= ieee80211_mps_set_sta_local_pm(sta, + sdata->u.mesh.mshcfg.power_mode); + + ewma_mesh_tx_rate_avg_init(&sta->mesh->tx_rate_avg); + /* init at low value */ + ewma_mesh_tx_rate_avg_add(&sta->mesh->tx_rate_avg, 10); + + break; + case NL80211_PLINK_LISTEN: + case NL80211_PLINK_BLOCKED: + case NL80211_PLINK_OPN_SNT: + case NL80211_PLINK_OPN_RCVD: + case NL80211_PLINK_CNF_RCVD: + case NL80211_PLINK_HOLDING: + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) + changed = mesh_plink_dec_estab_count(sdata); + sta->mesh->plink_state = params->plink_state; + + ieee80211_mps_sta_status_update(sta); + changed |= ieee80211_mps_set_sta_local_pm(sta, + NL80211_MESH_POWER_UNKNOWN); + break; + default: + /* nothing */ + break; + } + } + + switch (params->plink_action) { + case NL80211_PLINK_ACTION_NO_ACTION: + /* nothing */ + break; + case NL80211_PLINK_ACTION_OPEN: + changed |= mesh_plink_open(sta); + break; + case NL80211_PLINK_ACTION_BLOCK: + changed |= mesh_plink_block(sta); + break; + } + + if (params->local_pm) + changed |= ieee80211_mps_set_sta_local_pm(sta, + params->local_pm); + + ieee80211_mbss_info_change_notify(sdata, changed); +#endif +} + +static int sta_link_apply_parameters(struct ieee80211_local *local, + struct sta_info *sta, bool new_link, + struct link_station_parameters *params) +{ + int ret = 0; + struct ieee80211_supported_band *sband; + struct ieee80211_sub_if_data *sdata = sta->sdata; + u32 link_id = params->link_id < 0 ? 0 : params->link_id; + struct ieee80211_link_data *link = + sdata_dereference(sdata->link[link_id], sdata); + struct link_sta_info *link_sta = + rcu_dereference_protected(sta->link[link_id], + lockdep_is_held(&local->sta_mtx)); + + /* + * If there are no changes, then accept a link that exist, + * unless it's a new link. + */ + if (params->link_id >= 0 && !new_link && + !params->link_mac && !params->txpwr_set && + !params->supported_rates_len && + !params->ht_capa && !params->vht_capa && + !params->he_capa && !params->eht_capa && + !params->opmode_notif_used) + return 0; + + if (!link || !link_sta) + return -EINVAL; + + sband = ieee80211_get_link_sband(link); + if (!sband) + return -EINVAL; + + if (params->link_mac) { + if (new_link) { + memcpy(link_sta->addr, params->link_mac, ETH_ALEN); + memcpy(link_sta->pub->addr, params->link_mac, ETH_ALEN); + } else if (!ether_addr_equal(link_sta->addr, + params->link_mac)) { + return -EINVAL; + } + } else if (new_link) { + return -EINVAL; + } + + if (params->txpwr_set) { + link_sta->pub->txpwr.type = params->txpwr.type; + if (params->txpwr.type == NL80211_TX_POWER_LIMITED) + link_sta->pub->txpwr.power = params->txpwr.power; + ret = drv_sta_set_txpwr(local, sdata, sta); + if (ret) + return ret; + } + + if (params->supported_rates && + params->supported_rates_len) { + ieee80211_parse_bitrates(link->conf->chandef.width, + sband, params->supported_rates, + params->supported_rates_len, + &link_sta->pub->supp_rates[sband->band]); + } + + if (params->ht_capa) + ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, + params->ht_capa, link_sta); + + /* VHT can override some HT caps such as the A-MSDU max length */ + if (params->vht_capa) + ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, + params->vht_capa, NULL, + link_sta); + + if (params->he_capa) + ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, + (void *)params->he_capa, + params->he_capa_len, + (void *)params->he_6ghz_capa, + link_sta); + + if (params->he_capa && params->eht_capa) + ieee80211_eht_cap_ie_to_sta_eht_cap(sdata, sband, + (u8 *)params->he_capa, + params->he_capa_len, + params->eht_capa, + params->eht_capa_len, + link_sta); + + if (params->opmode_notif_used) { + /* returned value is only needed for rc update, but the + * rc isn't initialized here yet, so ignore it + */ + __ieee80211_vht_handle_opmode(sdata, link_sta, + params->opmode_notif, + sband->band); + } + + return ret; +} + +static int sta_apply_parameters(struct ieee80211_local *local, + struct sta_info *sta, + struct station_parameters *params) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + u32 mask, set; + int ret = 0; + + mask = params->sta_flags_mask; + set = params->sta_flags_set; + + if (ieee80211_vif_is_mesh(&sdata->vif)) { + /* + * In mesh mode, ASSOCIATED isn't part of the nl80211 + * API but must follow AUTHENTICATED for driver state. + */ + if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) + mask |= BIT(NL80211_STA_FLAG_ASSOCIATED); + if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) + set |= BIT(NL80211_STA_FLAG_ASSOCIATED); + } else if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { + /* + * TDLS -- everything follows authorized, but + * only becoming authorized is possible, not + * going back + */ + if (set & BIT(NL80211_STA_FLAG_AUTHORIZED)) { + set |= BIT(NL80211_STA_FLAG_AUTHENTICATED) | + BIT(NL80211_STA_FLAG_ASSOCIATED); + mask |= BIT(NL80211_STA_FLAG_AUTHENTICATED) | + BIT(NL80211_STA_FLAG_ASSOCIATED); + } + } + + if (mask & BIT(NL80211_STA_FLAG_WME) && + local->hw.queues >= IEEE80211_NUM_ACS) + sta->sta.wme = set & BIT(NL80211_STA_FLAG_WME); + + /* auth flags will be set later for TDLS, + * and for unassociated stations that move to associated */ + if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER) && + !((mask & BIT(NL80211_STA_FLAG_ASSOCIATED)) && + (set & BIT(NL80211_STA_FLAG_ASSOCIATED)))) { + ret = sta_apply_auth_flags(local, sta, mask, set); + if (ret) + return ret; + } + + if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) { + if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) + set_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE); + else + clear_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE); + } + + if (mask & BIT(NL80211_STA_FLAG_MFP)) { + sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP)); + if (set & BIT(NL80211_STA_FLAG_MFP)) + set_sta_flag(sta, WLAN_STA_MFP); + else + clear_sta_flag(sta, WLAN_STA_MFP); + } + + if (mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) { + if (set & BIT(NL80211_STA_FLAG_TDLS_PEER)) + set_sta_flag(sta, WLAN_STA_TDLS_PEER); + else + clear_sta_flag(sta, WLAN_STA_TDLS_PEER); + } + + /* mark TDLS channel switch support, if the AP allows it */ + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && + !sdata->deflink.u.mgd.tdls_chan_switch_prohibited && + params->ext_capab_len >= 4 && + params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) + set_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH); + + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && + !sdata->u.mgd.tdls_wider_bw_prohibited && + ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) && + params->ext_capab_len >= 8 && + params->ext_capab[7] & WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) + set_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW); + + if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) { + sta->sta.uapsd_queues = params->uapsd_queues; + sta->sta.max_sp = params->max_sp; + } + + ieee80211_sta_set_max_amsdu_subframes(sta, params->ext_capab, + params->ext_capab_len); + + /* + * cfg80211 validates this (1-2007) and allows setting the AID + * only when creating a new station entry + */ + if (params->aid) + sta->sta.aid = params->aid; + + /* + * Some of the following updates would be racy if called on an + * existing station, via ieee80211_change_station(). However, + * all such changes are rejected by cfg80211 except for updates + * changing the supported rates on an existing but not yet used + * TDLS peer. + */ + + if (params->listen_interval >= 0) + sta->listen_interval = params->listen_interval; + + ret = sta_link_apply_parameters(local, sta, false, + ¶ms->link_sta_params); + if (ret) + return ret; + + if (params->support_p2p_ps >= 0) + sta->sta.support_p2p_ps = params->support_p2p_ps; + + if (ieee80211_vif_is_mesh(&sdata->vif)) + sta_apply_mesh_params(local, sta, params); + + if (params->airtime_weight) + sta->airtime_weight = params->airtime_weight; + + /* set the STA state after all sta info from usermode has been set */ + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) || + set & BIT(NL80211_STA_FLAG_ASSOCIATED)) { + ret = sta_apply_auth_flags(local, sta, mask, set); + if (ret) + return ret; + } + + /* Mark the STA as MLO if MLD MAC address is available */ + if (params->link_sta_params.mld_mac) + sta->sta.mlo = true; + + return 0; +} + +static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, + const u8 *mac, + struct station_parameters *params) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct sta_info *sta; + struct ieee80211_sub_if_data *sdata; + int err; + + if (params->vlan) { + sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); + + if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && + sdata->vif.type != NL80211_IFTYPE_AP) + return -EINVAL; + } else + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (ether_addr_equal(mac, sdata->vif.addr)) + return -EINVAL; + + if (!is_valid_ether_addr(mac)) + return -EINVAL; + + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) && + sdata->vif.type == NL80211_IFTYPE_STATION && + !sdata->u.mgd.associated) + return -EINVAL; + + /* + * If we have a link ID, it can be a non-MLO station on an AP MLD, + * but we need to have a link_mac in that case as well, so use the + * STA's MAC address in that case. + */ + if (params->link_sta_params.link_id >= 0) + sta = sta_info_alloc_with_link(sdata, mac, + params->link_sta_params.link_id, + params->link_sta_params.link_mac ?: mac, + GFP_KERNEL); + else + sta = sta_info_alloc(sdata, mac, GFP_KERNEL); + + if (!sta) + return -ENOMEM; + + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) + sta->sta.tdls = true; + + /* Though the mutex is not needed here (since the station is not + * visible yet), sta_apply_parameters (and inner functions) require + * the mutex due to other paths. + */ + mutex_lock(&local->sta_mtx); + err = sta_apply_parameters(local, sta, params); + mutex_unlock(&local->sta_mtx); + if (err) { + sta_info_free(local, sta); + return err; + } + + /* + * for TDLS and for unassociated station, rate control should be + * initialized only when rates are known and station is marked + * authorized/associated + */ + if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER) && + test_sta_flag(sta, WLAN_STA_ASSOC)) + rate_control_rate_init(sta); + + return sta_info_insert(sta); +} + +static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev, + struct station_del_parameters *params) +{ + struct ieee80211_sub_if_data *sdata; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (params->mac) + return sta_info_destroy_addr_bss(sdata, params->mac); + + sta_info_flush(sdata); + return 0; +} + +static int ieee80211_change_station(struct wiphy *wiphy, + struct net_device *dev, const u8 *mac, + struct station_parameters *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = wiphy_priv(wiphy); + struct sta_info *sta; + struct ieee80211_sub_if_data *vlansdata; + enum cfg80211_station_type statype; + int err; + + mutex_lock(&local->sta_mtx); + + sta = sta_info_get_bss(sdata, mac); + if (!sta) { + err = -ENOENT; + goto out_err; + } + + switch (sdata->vif.type) { + case NL80211_IFTYPE_MESH_POINT: + if (sdata->u.mesh.user_mpm) + statype = CFG80211_STA_MESH_PEER_USER; + else + statype = CFG80211_STA_MESH_PEER_KERNEL; + break; + case NL80211_IFTYPE_ADHOC: + statype = CFG80211_STA_IBSS; + break; + case NL80211_IFTYPE_STATION: + if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { + statype = CFG80211_STA_AP_STA; + break; + } + if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + statype = CFG80211_STA_TDLS_PEER_ACTIVE; + else + statype = CFG80211_STA_TDLS_PEER_SETUP; + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + if (test_sta_flag(sta, WLAN_STA_ASSOC)) + statype = CFG80211_STA_AP_CLIENT; + else + statype = CFG80211_STA_AP_CLIENT_UNASSOC; + break; + default: + err = -EOPNOTSUPP; + goto out_err; + } + + err = cfg80211_check_station_change(wiphy, params, statype); + if (err) + goto out_err; + + if (params->vlan && params->vlan != sta->sdata->dev) { + vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); + + if (params->vlan->ieee80211_ptr->use_4addr) { + if (vlansdata->u.vlan.sta) { + err = -EBUSY; + goto out_err; + } + + rcu_assign_pointer(vlansdata->u.vlan.sta, sta); + __ieee80211_check_fast_rx_iface(vlansdata); + drv_sta_set_4addr(local, sta->sdata, &sta->sta, true); + } + + if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + sta->sdata->u.vlan.sta) { + ieee80211_clear_fast_rx(sta); + RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL); + } + + if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + ieee80211_vif_dec_num_mcast(sta->sdata); + + sta->sdata = vlansdata; + ieee80211_check_fast_xmit(sta); + + if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { + ieee80211_vif_inc_num_mcast(sta->sdata); + cfg80211_send_layer2_update(sta->sdata->dev, + sta->sta.addr); + } + } + + /* we use sta_info_get_bss() so this might be different */ + if (sdata != sta->sdata) { + mutex_lock_nested(&sta->sdata->wdev.mtx, 1); + err = sta_apply_parameters(local, sta, params); + mutex_unlock(&sta->sdata->wdev.mtx); + } else { + err = sta_apply_parameters(local, sta, params); + } + if (err) + goto out_err; + + mutex_unlock(&local->sta_mtx); + + if (sdata->vif.type == NL80211_IFTYPE_STATION && + params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) { + ieee80211_recalc_ps(local); + ieee80211_recalc_ps_vif(sdata); + } + + return 0; +out_err: + mutex_unlock(&local->sta_mtx); + return err; +} + +#ifdef CONFIG_MAC80211_MESH +static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, + const u8 *dst, const u8 *next_hop) +{ + struct ieee80211_sub_if_data *sdata; + struct mesh_path *mpath; + struct sta_info *sta; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + rcu_read_lock(); + sta = sta_info_get(sdata, next_hop); + if (!sta) { + rcu_read_unlock(); + return -ENOENT; + } + + mpath = mesh_path_add(sdata, dst); + if (IS_ERR(mpath)) { + rcu_read_unlock(); + return PTR_ERR(mpath); + } + + mesh_path_fix_nexthop(mpath, sta); + + rcu_read_unlock(); + return 0; +} + +static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, + const u8 *dst) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (dst) + return mesh_path_del(sdata, dst); + + mesh_path_flush_by_iface(sdata); + return 0; +} + +static int ieee80211_change_mpath(struct wiphy *wiphy, struct net_device *dev, + const u8 *dst, const u8 *next_hop) +{ + struct ieee80211_sub_if_data *sdata; + struct mesh_path *mpath; + struct sta_info *sta; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + rcu_read_lock(); + + sta = sta_info_get(sdata, next_hop); + if (!sta) { + rcu_read_unlock(); + return -ENOENT; + } + + mpath = mesh_path_lookup(sdata, dst); + if (!mpath) { + rcu_read_unlock(); + return -ENOENT; + } + + mesh_path_fix_nexthop(mpath, sta); + + rcu_read_unlock(); + return 0; +} + +static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, + struct mpath_info *pinfo) +{ + struct sta_info *next_hop_sta = rcu_dereference(mpath->next_hop); + + if (next_hop_sta) + memcpy(next_hop, next_hop_sta->sta.addr, ETH_ALEN); + else + eth_zero_addr(next_hop); + + memset(pinfo, 0, sizeof(*pinfo)); + + pinfo->generation = mpath->sdata->u.mesh.mesh_paths_generation; + + pinfo->filled = MPATH_INFO_FRAME_QLEN | + MPATH_INFO_SN | + MPATH_INFO_METRIC | + MPATH_INFO_EXPTIME | + MPATH_INFO_DISCOVERY_TIMEOUT | + MPATH_INFO_DISCOVERY_RETRIES | + MPATH_INFO_FLAGS | + MPATH_INFO_HOP_COUNT | + MPATH_INFO_PATH_CHANGE; + + pinfo->frame_qlen = mpath->frame_queue.qlen; + pinfo->sn = mpath->sn; + pinfo->metric = mpath->metric; + if (time_before(jiffies, mpath->exp_time)) + pinfo->exptime = jiffies_to_msecs(mpath->exp_time - jiffies); + pinfo->discovery_timeout = + jiffies_to_msecs(mpath->discovery_timeout); + pinfo->discovery_retries = mpath->discovery_retries; + if (mpath->flags & MESH_PATH_ACTIVE) + pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE; + if (mpath->flags & MESH_PATH_RESOLVING) + pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING; + if (mpath->flags & MESH_PATH_SN_VALID) + pinfo->flags |= NL80211_MPATH_FLAG_SN_VALID; + if (mpath->flags & MESH_PATH_FIXED) + pinfo->flags |= NL80211_MPATH_FLAG_FIXED; + if (mpath->flags & MESH_PATH_RESOLVED) + pinfo->flags |= NL80211_MPATH_FLAG_RESOLVED; + pinfo->hop_count = mpath->hop_count; + pinfo->path_change_count = mpath->path_change_count; +} + +static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev, + u8 *dst, u8 *next_hop, struct mpath_info *pinfo) + +{ + struct ieee80211_sub_if_data *sdata; + struct mesh_path *mpath; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + rcu_read_lock(); + mpath = mesh_path_lookup(sdata, dst); + if (!mpath) { + rcu_read_unlock(); + return -ENOENT; + } + memcpy(dst, mpath->dst, ETH_ALEN); + mpath_set_pinfo(mpath, next_hop, pinfo); + rcu_read_unlock(); + return 0; +} + +static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *dst, u8 *next_hop, + struct mpath_info *pinfo) +{ + struct ieee80211_sub_if_data *sdata; + struct mesh_path *mpath; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + rcu_read_lock(); + mpath = mesh_path_lookup_by_idx(sdata, idx); + if (!mpath) { + rcu_read_unlock(); + return -ENOENT; + } + memcpy(dst, mpath->dst, ETH_ALEN); + mpath_set_pinfo(mpath, next_hop, pinfo); + rcu_read_unlock(); + return 0; +} + +static void mpp_set_pinfo(struct mesh_path *mpath, u8 *mpp, + struct mpath_info *pinfo) +{ + memset(pinfo, 0, sizeof(*pinfo)); + memcpy(mpp, mpath->mpp, ETH_ALEN); + + pinfo->generation = mpath->sdata->u.mesh.mpp_paths_generation; +} + +static int ieee80211_get_mpp(struct wiphy *wiphy, struct net_device *dev, + u8 *dst, u8 *mpp, struct mpath_info *pinfo) + +{ + struct ieee80211_sub_if_data *sdata; + struct mesh_path *mpath; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + rcu_read_lock(); + mpath = mpp_path_lookup(sdata, dst); + if (!mpath) { + rcu_read_unlock(); + return -ENOENT; + } + memcpy(dst, mpath->dst, ETH_ALEN); + mpp_set_pinfo(mpath, mpp, pinfo); + rcu_read_unlock(); + return 0; +} + +static int ieee80211_dump_mpp(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *dst, u8 *mpp, + struct mpath_info *pinfo) +{ + struct ieee80211_sub_if_data *sdata; + struct mesh_path *mpath; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + rcu_read_lock(); + mpath = mpp_path_lookup_by_idx(sdata, idx); + if (!mpath) { + rcu_read_unlock(); + return -ENOENT; + } + memcpy(dst, mpath->dst, ETH_ALEN); + mpp_set_pinfo(mpath, mpp, pinfo); + rcu_read_unlock(); + return 0; +} + +static int ieee80211_get_mesh_config(struct wiphy *wiphy, + struct net_device *dev, + struct mesh_config *conf) +{ + struct ieee80211_sub_if_data *sdata; + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + memcpy(conf, &(sdata->u.mesh.mshcfg), sizeof(struct mesh_config)); + return 0; +} + +static inline bool _chg_mesh_attr(enum nl80211_meshconf_params parm, u32 mask) +{ + return (mask >> (parm-1)) & 0x1; +} + +static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, + const struct mesh_setup *setup) +{ + u8 *new_ie; + struct ieee80211_sub_if_data *sdata = container_of(ifmsh, + struct ieee80211_sub_if_data, u.mesh); + int i; + + /* allocate information elements */ + new_ie = NULL; + + if (setup->ie_len) { + new_ie = kmemdup(setup->ie, setup->ie_len, + GFP_KERNEL); + if (!new_ie) + return -ENOMEM; + } + ifmsh->ie_len = setup->ie_len; + ifmsh->ie = new_ie; + + /* now copy the rest of the setup parameters */ + ifmsh->mesh_id_len = setup->mesh_id_len; + memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len); + ifmsh->mesh_sp_id = setup->sync_method; + ifmsh->mesh_pp_id = setup->path_sel_proto; + ifmsh->mesh_pm_id = setup->path_metric; + ifmsh->user_mpm = setup->user_mpm; + ifmsh->mesh_auth_id = setup->auth_id; + ifmsh->security = IEEE80211_MESH_SEC_NONE; + ifmsh->userspace_handles_dfs = setup->userspace_handles_dfs; + if (setup->is_authenticated) + ifmsh->security |= IEEE80211_MESH_SEC_AUTHED; + if (setup->is_secure) + ifmsh->security |= IEEE80211_MESH_SEC_SECURED; + + /* mcast rate setting in Mesh Node */ + memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate, + sizeof(setup->mcast_rate)); + sdata->vif.bss_conf.basic_rates = setup->basic_rates; + + sdata->vif.bss_conf.beacon_int = setup->beacon_interval; + sdata->vif.bss_conf.dtim_period = setup->dtim_period; + + sdata->beacon_rate_set = false; + if (wiphy_ext_feature_isset(sdata->local->hw.wiphy, + NL80211_EXT_FEATURE_BEACON_RATE_LEGACY)) { + for (i = 0; i < NUM_NL80211_BANDS; i++) { + sdata->beacon_rateidx_mask[i] = + setup->beacon_rate.control[i].legacy; + if (sdata->beacon_rateidx_mask[i]) + sdata->beacon_rate_set = true; + } + } + + return 0; +} + +static int ieee80211_update_mesh_config(struct wiphy *wiphy, + struct net_device *dev, u32 mask, + const struct mesh_config *nconf) +{ + struct mesh_config *conf; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_if_mesh *ifmsh; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + ifmsh = &sdata->u.mesh; + + /* Set the config options which we are interested in setting */ + conf = &(sdata->u.mesh.mshcfg); + if (_chg_mesh_attr(NL80211_MESHCONF_RETRY_TIMEOUT, mask)) + conf->dot11MeshRetryTimeout = nconf->dot11MeshRetryTimeout; + if (_chg_mesh_attr(NL80211_MESHCONF_CONFIRM_TIMEOUT, mask)) + conf->dot11MeshConfirmTimeout = nconf->dot11MeshConfirmTimeout; + if (_chg_mesh_attr(NL80211_MESHCONF_HOLDING_TIMEOUT, mask)) + conf->dot11MeshHoldingTimeout = nconf->dot11MeshHoldingTimeout; + if (_chg_mesh_attr(NL80211_MESHCONF_MAX_PEER_LINKS, mask)) + conf->dot11MeshMaxPeerLinks = nconf->dot11MeshMaxPeerLinks; + if (_chg_mesh_attr(NL80211_MESHCONF_MAX_RETRIES, mask)) + conf->dot11MeshMaxRetries = nconf->dot11MeshMaxRetries; + if (_chg_mesh_attr(NL80211_MESHCONF_TTL, mask)) + conf->dot11MeshTTL = nconf->dot11MeshTTL; + if (_chg_mesh_attr(NL80211_MESHCONF_ELEMENT_TTL, mask)) + conf->element_ttl = nconf->element_ttl; + if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask)) { + if (ifmsh->user_mpm) + return -EBUSY; + conf->auto_open_plinks = nconf->auto_open_plinks; + } + if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask)) + conf->dot11MeshNbrOffsetMaxNeighbor = + nconf->dot11MeshNbrOffsetMaxNeighbor; + if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, mask)) + conf->dot11MeshHWMPmaxPREQretries = + nconf->dot11MeshHWMPmaxPREQretries; + if (_chg_mesh_attr(NL80211_MESHCONF_PATH_REFRESH_TIME, mask)) + conf->path_refresh_time = nconf->path_refresh_time; + if (_chg_mesh_attr(NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, mask)) + conf->min_discovery_timeout = nconf->min_discovery_timeout; + if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, mask)) + conf->dot11MeshHWMPactivePathTimeout = + nconf->dot11MeshHWMPactivePathTimeout; + if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, mask)) + conf->dot11MeshHWMPpreqMinInterval = + nconf->dot11MeshHWMPpreqMinInterval; + if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, mask)) + conf->dot11MeshHWMPperrMinInterval = + nconf->dot11MeshHWMPperrMinInterval; + if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, + mask)) + conf->dot11MeshHWMPnetDiameterTraversalTime = + nconf->dot11MeshHWMPnetDiameterTraversalTime; + if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ROOTMODE, mask)) { + conf->dot11MeshHWMPRootMode = nconf->dot11MeshHWMPRootMode; + ieee80211_mesh_root_setup(ifmsh); + } + if (_chg_mesh_attr(NL80211_MESHCONF_GATE_ANNOUNCEMENTS, mask)) { + /* our current gate announcement implementation rides on root + * announcements, so require this ifmsh to also be a root node + * */ + if (nconf->dot11MeshGateAnnouncementProtocol && + !(conf->dot11MeshHWMPRootMode > IEEE80211_ROOTMODE_ROOT)) { + conf->dot11MeshHWMPRootMode = IEEE80211_PROACTIVE_RANN; + ieee80211_mesh_root_setup(ifmsh); + } + conf->dot11MeshGateAnnouncementProtocol = + nconf->dot11MeshGateAnnouncementProtocol; + } + if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask)) + conf->dot11MeshHWMPRannInterval = + nconf->dot11MeshHWMPRannInterval; + if (_chg_mesh_attr(NL80211_MESHCONF_FORWARDING, mask)) + conf->dot11MeshForwarding = nconf->dot11MeshForwarding; + if (_chg_mesh_attr(NL80211_MESHCONF_RSSI_THRESHOLD, mask)) { + /* our RSSI threshold implementation is supported only for + * devices that report signal in dBm. + */ + if (!ieee80211_hw_check(&sdata->local->hw, SIGNAL_DBM)) + return -ENOTSUPP; + conf->rssi_threshold = nconf->rssi_threshold; + } + if (_chg_mesh_attr(NL80211_MESHCONF_HT_OPMODE, mask)) { + conf->ht_opmode = nconf->ht_opmode; + sdata->vif.bss_conf.ht_operation_mode = nconf->ht_opmode; + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_HT); + } + if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, mask)) + conf->dot11MeshHWMPactivePathToRootTimeout = + nconf->dot11MeshHWMPactivePathToRootTimeout; + if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ROOT_INTERVAL, mask)) + conf->dot11MeshHWMProotInterval = + nconf->dot11MeshHWMProotInterval; + if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, mask)) + conf->dot11MeshHWMPconfirmationInterval = + nconf->dot11MeshHWMPconfirmationInterval; + if (_chg_mesh_attr(NL80211_MESHCONF_POWER_MODE, mask)) { + conf->power_mode = nconf->power_mode; + ieee80211_mps_local_status_update(sdata); + } + if (_chg_mesh_attr(NL80211_MESHCONF_AWAKE_WINDOW, mask)) + conf->dot11MeshAwakeWindowDuration = + nconf->dot11MeshAwakeWindowDuration; + if (_chg_mesh_attr(NL80211_MESHCONF_PLINK_TIMEOUT, mask)) + conf->plink_timeout = nconf->plink_timeout; + if (_chg_mesh_attr(NL80211_MESHCONF_CONNECTED_TO_GATE, mask)) + conf->dot11MeshConnectedToMeshGate = + nconf->dot11MeshConnectedToMeshGate; + if (_chg_mesh_attr(NL80211_MESHCONF_NOLEARN, mask)) + conf->dot11MeshNolearn = nconf->dot11MeshNolearn; + if (_chg_mesh_attr(NL80211_MESHCONF_CONNECTED_TO_AS, mask)) + conf->dot11MeshConnectedToAuthServer = + nconf->dot11MeshConnectedToAuthServer; + ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON); + return 0; +} + +static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev, + const struct mesh_config *conf, + const struct mesh_setup *setup) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + int err; + + memcpy(&ifmsh->mshcfg, conf, sizeof(struct mesh_config)); + err = copy_mesh_setup(ifmsh, setup); + if (err) + return err; + + sdata->control_port_over_nl80211 = setup->control_port_over_nl80211; + + /* can mesh use other SMPS modes? */ + sdata->deflink.smps_mode = IEEE80211_SMPS_OFF; + sdata->deflink.needed_rx_chains = sdata->local->rx_chains; + + mutex_lock(&sdata->local->mtx); + err = ieee80211_link_use_channel(&sdata->deflink, &setup->chandef, + IEEE80211_CHANCTX_SHARED); + mutex_unlock(&sdata->local->mtx); + if (err) + return err; + + return ieee80211_start_mesh(sdata); +} + +static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + ieee80211_stop_mesh(sdata); + mutex_lock(&sdata->local->mtx); + ieee80211_link_release_channel(&sdata->deflink); + kfree(sdata->u.mesh.ie); + mutex_unlock(&sdata->local->mtx); + + return 0; +} +#endif + +static int ieee80211_change_bss(struct wiphy *wiphy, + struct net_device *dev, + struct bss_parameters *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_link_data *link; + struct ieee80211_supported_band *sband; + u64 changed = 0; + + link = ieee80211_link_or_deflink(sdata, params->link_id, true); + if (IS_ERR(link)) + return PTR_ERR(link); + + if (!sdata_dereference(link->u.ap.beacon, sdata)) + return -ENOENT; + + sband = ieee80211_get_link_sband(link); + if (!sband) + return -EINVAL; + + if (params->basic_rates) { + if (!ieee80211_parse_bitrates(link->conf->chandef.width, + wiphy->bands[sband->band], + params->basic_rates, + params->basic_rates_len, + &link->conf->basic_rates)) + return -EINVAL; + changed |= BSS_CHANGED_BASIC_RATES; + ieee80211_check_rate_mask(link); + } + + if (params->use_cts_prot >= 0) { + link->conf->use_cts_prot = params->use_cts_prot; + changed |= BSS_CHANGED_ERP_CTS_PROT; + } + if (params->use_short_preamble >= 0) { + link->conf->use_short_preamble = params->use_short_preamble; + changed |= BSS_CHANGED_ERP_PREAMBLE; + } + + if (!link->conf->use_short_slot && + (sband->band == NL80211_BAND_5GHZ || + sband->band == NL80211_BAND_6GHZ)) { + link->conf->use_short_slot = true; + changed |= BSS_CHANGED_ERP_SLOT; + } + + if (params->use_short_slot_time >= 0) { + link->conf->use_short_slot = params->use_short_slot_time; + changed |= BSS_CHANGED_ERP_SLOT; + } + + if (params->ap_isolate >= 0) { + if (params->ap_isolate) + sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS; + else + sdata->flags &= ~IEEE80211_SDATA_DONT_BRIDGE_PACKETS; + ieee80211_check_fast_rx_iface(sdata); + } + + if (params->ht_opmode >= 0) { + link->conf->ht_operation_mode = (u16)params->ht_opmode; + changed |= BSS_CHANGED_HT; + } + + if (params->p2p_ctwindow >= 0) { + link->conf->p2p_noa_attr.oppps_ctwindow &= + ~IEEE80211_P2P_OPPPS_CTWINDOW_MASK; + link->conf->p2p_noa_attr.oppps_ctwindow |= + params->p2p_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK; + changed |= BSS_CHANGED_P2P_PS; + } + + if (params->p2p_opp_ps > 0) { + link->conf->p2p_noa_attr.oppps_ctwindow |= + IEEE80211_P2P_OPPPS_ENABLE_BIT; + changed |= BSS_CHANGED_P2P_PS; + } else if (params->p2p_opp_ps == 0) { + link->conf->p2p_noa_attr.oppps_ctwindow &= + ~IEEE80211_P2P_OPPPS_ENABLE_BIT; + changed |= BSS_CHANGED_P2P_PS; + } + + ieee80211_link_info_change_notify(sdata, link, changed); + + return 0; +} + +static int ieee80211_set_txq_params(struct wiphy *wiphy, + struct net_device *dev, + struct ieee80211_txq_params *params) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_link_data *link = + ieee80211_link_or_deflink(sdata, params->link_id, true); + struct ieee80211_tx_queue_params p; + + if (!local->ops->conf_tx) + return -EOPNOTSUPP; + + if (local->hw.queues < IEEE80211_NUM_ACS) + return -EOPNOTSUPP; + + if (IS_ERR(link)) + return PTR_ERR(link); + + memset(&p, 0, sizeof(p)); + p.aifs = params->aifs; + p.cw_max = params->cwmax; + p.cw_min = params->cwmin; + p.txop = params->txop; + + /* + * Setting tx queue params disables u-apsd because it's only + * called in master mode. + */ + p.uapsd = false; + + ieee80211_regulatory_limit_wmm_params(sdata, &p, params->ac); + + link->tx_conf[params->ac] = p; + if (drv_conf_tx(local, link, params->ac, &p)) { + wiphy_debug(local->hw.wiphy, + "failed to set TX queue parameters for AC %d\n", + params->ac); + return -EINVAL; + } + + ieee80211_link_info_change_notify(sdata, link, + BSS_CHANGED_QOS); + + return 0; +} + +#ifdef CONFIG_PM +static int ieee80211_suspend(struct wiphy *wiphy, + struct cfg80211_wowlan *wowlan) +{ + return __ieee80211_suspend(wiphy_priv(wiphy), wowlan); +} + +static int ieee80211_resume(struct wiphy *wiphy) +{ + return __ieee80211_resume(wiphy_priv(wiphy)); +} +#else +#define ieee80211_suspend NULL +#define ieee80211_resume NULL +#endif + +static int ieee80211_scan(struct wiphy *wiphy, + struct cfg80211_scan_request *req) +{ + struct ieee80211_sub_if_data *sdata; + + sdata = IEEE80211_WDEV_TO_SUB_IF(req->wdev); + + switch (ieee80211_vif_type_p2p(&sdata->vif)) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_DEVICE: + break; + case NL80211_IFTYPE_P2P_GO: + if (sdata->local->ops->hw_scan) + break; + /* + * FIXME: implement NoA while scanning in software, + * for now fall through to allow scanning only when + * beaconing hasn't been configured yet + */ + fallthrough; + case NL80211_IFTYPE_AP: + /* + * If the scan has been forced (and the driver supports + * forcing), don't care about being beaconing already. + * This will create problems to the attached stations (e.g. all + * the frames sent while scanning on other channel will be + * lost) + */ + if (sdata->deflink.u.ap.beacon && + (!(wiphy->features & NL80211_FEATURE_AP_SCAN) || + !(req->flags & NL80211_SCAN_FLAG_AP))) + return -EOPNOTSUPP; + break; + case NL80211_IFTYPE_NAN: + default: + return -EOPNOTSUPP; + } + + return ieee80211_request_scan(sdata, req); +} + +static void ieee80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + ieee80211_scan_cancel(wiphy_priv(wiphy)); +} + +static int +ieee80211_sched_scan_start(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_sched_scan_request *req) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (!sdata->local->ops->sched_scan_start) + return -EOPNOTSUPP; + + return ieee80211_request_sched_scan_start(sdata, req); +} + +static int +ieee80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, + u64 reqid) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + if (!local->ops->sched_scan_stop) + return -EOPNOTSUPP; + + return ieee80211_request_sched_scan_stop(local); +} + +static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_auth_request *req) +{ + return ieee80211_mgd_auth(IEEE80211_DEV_TO_SUB_IF(dev), req); +} + +static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_assoc_request *req) +{ + return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req); +} + +static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_deauth_request *req) +{ + return ieee80211_mgd_deauth(IEEE80211_DEV_TO_SUB_IF(dev), req); +} + +static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_disassoc_request *req) +{ + return ieee80211_mgd_disassoc(IEEE80211_DEV_TO_SUB_IF(dev), req); +} + +static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params) +{ + return ieee80211_ibss_join(IEEE80211_DEV_TO_SUB_IF(dev), params); +} + +static int ieee80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) +{ + return ieee80211_ibss_leave(IEEE80211_DEV_TO_SUB_IF(dev)); +} + +static int ieee80211_join_ocb(struct wiphy *wiphy, struct net_device *dev, + struct ocb_setup *setup) +{ + return ieee80211_ocb_join(IEEE80211_DEV_TO_SUB_IF(dev), setup); +} + +static int ieee80211_leave_ocb(struct wiphy *wiphy, struct net_device *dev) +{ + return ieee80211_ocb_leave(IEEE80211_DEV_TO_SUB_IF(dev)); +} + +static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev, + int rate[NUM_NL80211_BANDS]) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + memcpy(sdata->vif.bss_conf.mcast_rate, rate, + sizeof(int) * NUM_NL80211_BANDS); + + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_MCAST_RATE); + + return 0; +} + +static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + int err; + + if (changed & WIPHY_PARAM_FRAG_THRESHOLD) { + ieee80211_check_fast_xmit_all(local); + + err = drv_set_frag_threshold(local, wiphy->frag_threshold); + + if (err) { + ieee80211_check_fast_xmit_all(local); + return err; + } + } + + if ((changed & WIPHY_PARAM_COVERAGE_CLASS) || + (changed & WIPHY_PARAM_DYN_ACK)) { + s16 coverage_class; + + coverage_class = changed & WIPHY_PARAM_COVERAGE_CLASS ? + wiphy->coverage_class : -1; + err = drv_set_coverage_class(local, coverage_class); + + if (err) + return err; + } + + if (changed & WIPHY_PARAM_RTS_THRESHOLD) { + err = drv_set_rts_threshold(local, wiphy->rts_threshold); + + if (err) + return err; + } + + if (changed & WIPHY_PARAM_RETRY_SHORT) { + if (wiphy->retry_short > IEEE80211_MAX_TX_RETRY) + return -EINVAL; + local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; + } + if (changed & WIPHY_PARAM_RETRY_LONG) { + if (wiphy->retry_long > IEEE80211_MAX_TX_RETRY) + return -EINVAL; + local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; + } + if (changed & + (WIPHY_PARAM_RETRY_SHORT | WIPHY_PARAM_RETRY_LONG)) + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS); + + if (changed & (WIPHY_PARAM_TXQ_LIMIT | + WIPHY_PARAM_TXQ_MEMORY_LIMIT | + WIPHY_PARAM_TXQ_QUANTUM)) + ieee80211_txq_set_params(local); + + return 0; +} + +static int ieee80211_set_tx_power(struct wiphy *wiphy, + struct wireless_dev *wdev, + enum nl80211_tx_power_setting type, int mbm) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_sub_if_data *sdata; + enum nl80211_tx_power_setting txp_type = type; + bool update_txp_type = false; + bool has_monitor = false; + + if (wdev) { + sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { + sdata = wiphy_dereference(local->hw.wiphy, + local->monitor_sdata); + if (!sdata) + return -EOPNOTSUPP; + } + + switch (type) { + case NL80211_TX_POWER_AUTOMATIC: + sdata->deflink.user_power_level = + IEEE80211_UNSET_POWER_LEVEL; + txp_type = NL80211_TX_POWER_LIMITED; + break; + case NL80211_TX_POWER_LIMITED: + case NL80211_TX_POWER_FIXED: + if (mbm < 0 || (mbm % 100)) + return -EOPNOTSUPP; + sdata->deflink.user_power_level = MBM_TO_DBM(mbm); + break; + } + + if (txp_type != sdata->vif.bss_conf.txpower_type) { + update_txp_type = true; + sdata->vif.bss_conf.txpower_type = txp_type; + } + + ieee80211_recalc_txpower(sdata, update_txp_type); + + return 0; + } + + switch (type) { + case NL80211_TX_POWER_AUTOMATIC: + local->user_power_level = IEEE80211_UNSET_POWER_LEVEL; + txp_type = NL80211_TX_POWER_LIMITED; + break; + case NL80211_TX_POWER_LIMITED: + case NL80211_TX_POWER_FIXED: + if (mbm < 0 || (mbm % 100)) + return -EOPNOTSUPP; + local->user_power_level = MBM_TO_DBM(mbm); + break; + } + + mutex_lock(&local->iflist_mtx); + list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { + has_monitor = true; + continue; + } + sdata->deflink.user_power_level = local->user_power_level; + if (txp_type != sdata->vif.bss_conf.txpower_type) + update_txp_type = true; + sdata->vif.bss_conf.txpower_type = txp_type; + } + list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) + continue; + ieee80211_recalc_txpower(sdata, update_txp_type); + } + mutex_unlock(&local->iflist_mtx); + + if (has_monitor) { + sdata = wiphy_dereference(local->hw.wiphy, + local->monitor_sdata); + if (sdata) { + sdata->deflink.user_power_level = local->user_power_level; + if (txp_type != sdata->vif.bss_conf.txpower_type) + update_txp_type = true; + sdata->vif.bss_conf.txpower_type = txp_type; + + ieee80211_recalc_txpower(sdata, update_txp_type); + } + } + + return 0; +} + +static int ieee80211_get_tx_power(struct wiphy *wiphy, + struct wireless_dev *wdev, + int *dbm) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + + if (local->ops->get_txpower) + return drv_get_txpower(local, sdata, dbm); + + if (!local->use_chanctx) + *dbm = local->hw.conf.power_level; + else + *dbm = sdata->vif.bss_conf.txpower; + + /* INT_MIN indicates no power level was set yet */ + if (*dbm == INT_MIN) + return -EINVAL; + + return 0; +} + +static void ieee80211_rfkill_poll(struct wiphy *wiphy) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + drv_rfkill_poll(local); +} + +#ifdef CONFIG_NL80211_TESTMODE +static int ieee80211_testmode_cmd(struct wiphy *wiphy, + struct wireless_dev *wdev, + void *data, int len) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_vif *vif = NULL; + + if (!local->ops->testmode_cmd) + return -EOPNOTSUPP; + + if (wdev) { + struct ieee80211_sub_if_data *sdata; + + sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + if (sdata->flags & IEEE80211_SDATA_IN_DRIVER) + vif = &sdata->vif; + } + + return local->ops->testmode_cmd(&local->hw, vif, data, len); +} + +static int ieee80211_testmode_dump(struct wiphy *wiphy, + struct sk_buff *skb, + struct netlink_callback *cb, + void *data, int len) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + if (!local->ops->testmode_dump) + return -EOPNOTSUPP; + + return local->ops->testmode_dump(&local->hw, skb, cb, data, len); +} +#endif + +int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link, + enum ieee80211_smps_mode smps_mode) +{ + const u8 *ap; + enum ieee80211_smps_mode old_req; + int err; + struct sta_info *sta; + bool tdls_peer_found = false; + + lockdep_assert_held(&sdata->wdev.mtx); + + if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION)) + return -EINVAL; + + old_req = link->u.mgd.req_smps; + link->u.mgd.req_smps = smps_mode; + + if (old_req == smps_mode && + smps_mode != IEEE80211_SMPS_AUTOMATIC) + return 0; + + /* + * If not associated, or current association is not an HT + * association, there's no need to do anything, just store + * the new value until we associate. + */ + if (!sdata->u.mgd.associated || + link->conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT) + return 0; + + ap = link->u.mgd.bssid; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { + if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded || + !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + continue; + + tdls_peer_found = true; + break; + } + rcu_read_unlock(); + + if (smps_mode == IEEE80211_SMPS_AUTOMATIC) { + if (tdls_peer_found || !sdata->u.mgd.powersave) + smps_mode = IEEE80211_SMPS_OFF; + else + smps_mode = IEEE80211_SMPS_DYNAMIC; + } + + /* send SM PS frame to AP */ + err = ieee80211_send_smps_action(sdata, smps_mode, + ap, ap); + if (err) + link->u.mgd.req_smps = old_req; + else if (smps_mode != IEEE80211_SMPS_OFF && tdls_peer_found) + ieee80211_teardown_tdls_peers(sdata); + + return err; +} + +static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, + bool enabled, int timeout) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + unsigned int link_id; + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS)) + return -EOPNOTSUPP; + + if (enabled == sdata->u.mgd.powersave && + timeout == local->dynamic_ps_forced_timeout) + return 0; + + sdata->u.mgd.powersave = enabled; + local->dynamic_ps_forced_timeout = timeout; + + /* no change, but if automatic follow powersave */ + sdata_lock(sdata); + for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { + struct ieee80211_link_data *link; + + link = sdata_dereference(sdata->link[link_id], sdata); + + if (!link) + continue; + __ieee80211_request_smps_mgd(sdata, link, + link->u.mgd.req_smps); + } + sdata_unlock(sdata); + + if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + + ieee80211_recalc_ps(local); + ieee80211_recalc_ps_vif(sdata); + ieee80211_check_fast_rx_iface(sdata); + + return 0; +} + +static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy, + struct net_device *dev, + s32 rssi_thold, u32 rssi_hyst) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_vif *vif = &sdata->vif; + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + + if (rssi_thold == bss_conf->cqm_rssi_thold && + rssi_hyst == bss_conf->cqm_rssi_hyst) + return 0; + + if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER && + !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) + return -EOPNOTSUPP; + + bss_conf->cqm_rssi_thold = rssi_thold; + bss_conf->cqm_rssi_hyst = rssi_hyst; + bss_conf->cqm_rssi_low = 0; + bss_conf->cqm_rssi_high = 0; + sdata->deflink.u.mgd.last_cqm_event_signal = 0; + + /* tell the driver upon association, unless already associated */ + if (sdata->u.mgd.associated && + sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI) + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_CQM); + + return 0; +} + +static int ieee80211_set_cqm_rssi_range_config(struct wiphy *wiphy, + struct net_device *dev, + s32 rssi_low, s32 rssi_high) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_vif *vif = &sdata->vif; + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + + if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER) + return -EOPNOTSUPP; + + bss_conf->cqm_rssi_low = rssi_low; + bss_conf->cqm_rssi_high = rssi_high; + bss_conf->cqm_rssi_thold = 0; + bss_conf->cqm_rssi_hyst = 0; + sdata->deflink.u.mgd.last_cqm_event_signal = 0; + + /* tell the driver upon association, unless already associated */ + if (sdata->u.mgd.associated && + sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI) + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_CQM); + + return 0; +} + +static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, + struct net_device *dev, + unsigned int link_id, + const u8 *addr, + const struct cfg80211_bitrate_mask *mask) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + int i, ret; + + if (!ieee80211_sdata_running(sdata)) + return -ENETDOWN; + + /* + * If active validate the setting and reject it if it doesn't leave + * at least one basic rate usable, since we really have to be able + * to send something, and if we're an AP we have to be able to do + * so at a basic rate so that all clients can receive it. + */ + if (rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) && + sdata->vif.bss_conf.chandef.chan) { + u32 basic_rates = sdata->vif.bss_conf.basic_rates; + enum nl80211_band band = sdata->vif.bss_conf.chandef.chan->band; + + if (!(mask->control[band].legacy & basic_rates)) + return -EINVAL; + } + + if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) { + ret = drv_set_bitrate_mask(local, sdata, mask); + if (ret) + return ret; + } + + for (i = 0; i < NUM_NL80211_BANDS; i++) { + struct ieee80211_supported_band *sband = wiphy->bands[i]; + int j; + + sdata->rc_rateidx_mask[i] = mask->control[i].legacy; + memcpy(sdata->rc_rateidx_mcs_mask[i], mask->control[i].ht_mcs, + sizeof(mask->control[i].ht_mcs)); + memcpy(sdata->rc_rateidx_vht_mcs_mask[i], + mask->control[i].vht_mcs, + sizeof(mask->control[i].vht_mcs)); + + sdata->rc_has_mcs_mask[i] = false; + sdata->rc_has_vht_mcs_mask[i] = false; + if (!sband) + continue; + + for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) { + if (sdata->rc_rateidx_mcs_mask[i][j] != 0xff) { + sdata->rc_has_mcs_mask[i] = true; + break; + } + } + + for (j = 0; j < NL80211_VHT_NSS_MAX; j++) { + if (sdata->rc_rateidx_vht_mcs_mask[i][j] != 0xffff) { + sdata->rc_has_vht_mcs_mask[i] = true; + break; + } + } + } + + return 0; +} + +static int ieee80211_start_radar_detection(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_chan_def *chandef, + u32 cac_time_ms) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + int err; + + mutex_lock(&local->mtx); + if (!list_empty(&local->roc_list) || local->scanning) { + err = -EBUSY; + goto out_unlock; + } + + /* whatever, but channel contexts should not complain about that one */ + sdata->deflink.smps_mode = IEEE80211_SMPS_OFF; + sdata->deflink.needed_rx_chains = local->rx_chains; + + err = ieee80211_link_use_channel(&sdata->deflink, chandef, + IEEE80211_CHANCTX_SHARED); + if (err) + goto out_unlock; + + ieee80211_queue_delayed_work(&sdata->local->hw, + &sdata->deflink.dfs_cac_timer_work, + msecs_to_jiffies(cac_time_ms)); + + out_unlock: + mutex_unlock(&local->mtx); + return err; +} + +static void ieee80211_end_cac(struct wiphy *wiphy, + struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + + mutex_lock(&local->mtx); + list_for_each_entry(sdata, &local->interfaces, list) { + /* it might be waiting for the local->mtx, but then + * by the time it gets it, sdata->wdev.cac_started + * will no longer be true + */ + cancel_delayed_work(&sdata->deflink.dfs_cac_timer_work); + + if (sdata->wdev.cac_started) { + ieee80211_link_release_channel(&sdata->deflink); + sdata->wdev.cac_started = false; + } + } + mutex_unlock(&local->mtx); +} + +static struct cfg80211_beacon_data * +cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon) +{ + struct cfg80211_beacon_data *new_beacon; + u8 *pos; + int len; + + len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len + + beacon->proberesp_ies_len + beacon->assocresp_ies_len + + beacon->probe_resp_len + beacon->lci_len + beacon->civicloc_len; + + if (beacon->mbssid_ies) + len += ieee80211_get_mbssid_beacon_len(beacon->mbssid_ies, + beacon->rnr_ies, + beacon->mbssid_ies->cnt); + + new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL); + if (!new_beacon) + return NULL; + + if (beacon->mbssid_ies && beacon->mbssid_ies->cnt) { + new_beacon->mbssid_ies = + kzalloc(struct_size(new_beacon->mbssid_ies, + elem, beacon->mbssid_ies->cnt), + GFP_KERNEL); + if (!new_beacon->mbssid_ies) { + kfree(new_beacon); + return NULL; + } + + if (beacon->rnr_ies && beacon->rnr_ies->cnt) { + new_beacon->rnr_ies = + kzalloc(struct_size(new_beacon->rnr_ies, + elem, beacon->rnr_ies->cnt), + GFP_KERNEL); + if (!new_beacon->rnr_ies) { + kfree(new_beacon->mbssid_ies); + kfree(new_beacon); + return NULL; + } + } + } + + pos = (u8 *)(new_beacon + 1); + if (beacon->head_len) { + new_beacon->head_len = beacon->head_len; + new_beacon->head = pos; + memcpy(pos, beacon->head, beacon->head_len); + pos += beacon->head_len; + } + if (beacon->tail_len) { + new_beacon->tail_len = beacon->tail_len; + new_beacon->tail = pos; + memcpy(pos, beacon->tail, beacon->tail_len); + pos += beacon->tail_len; + } + if (beacon->beacon_ies_len) { + new_beacon->beacon_ies_len = beacon->beacon_ies_len; + new_beacon->beacon_ies = pos; + memcpy(pos, beacon->beacon_ies, beacon->beacon_ies_len); + pos += beacon->beacon_ies_len; + } + if (beacon->proberesp_ies_len) { + new_beacon->proberesp_ies_len = beacon->proberesp_ies_len; + new_beacon->proberesp_ies = pos; + memcpy(pos, beacon->proberesp_ies, beacon->proberesp_ies_len); + pos += beacon->proberesp_ies_len; + } + if (beacon->assocresp_ies_len) { + new_beacon->assocresp_ies_len = beacon->assocresp_ies_len; + new_beacon->assocresp_ies = pos; + memcpy(pos, beacon->assocresp_ies, beacon->assocresp_ies_len); + pos += beacon->assocresp_ies_len; + } + if (beacon->probe_resp_len) { + new_beacon->probe_resp_len = beacon->probe_resp_len; + new_beacon->probe_resp = pos; + memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); + pos += beacon->probe_resp_len; + } + if (beacon->mbssid_ies && beacon->mbssid_ies->cnt) { + pos += ieee80211_copy_mbssid_beacon(pos, + new_beacon->mbssid_ies, + beacon->mbssid_ies); + if (beacon->rnr_ies && beacon->rnr_ies->cnt) + pos += ieee80211_copy_rnr_beacon(pos, + new_beacon->rnr_ies, + beacon->rnr_ies); + } + + /* might copy -1, meaning no changes requested */ + new_beacon->ftm_responder = beacon->ftm_responder; + if (beacon->lci) { + new_beacon->lci_len = beacon->lci_len; + new_beacon->lci = pos; + memcpy(pos, beacon->lci, beacon->lci_len); + pos += beacon->lci_len; + } + if (beacon->civicloc) { + new_beacon->civicloc_len = beacon->civicloc_len; + new_beacon->civicloc = pos; + memcpy(pos, beacon->civicloc, beacon->civicloc_len); + pos += beacon->civicloc_len; + } + + return new_beacon; +} + +void ieee80211_csa_finish(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + + rcu_read_lock(); + + if (vif->mbssid_tx_vif == vif) { + /* Trigger ieee80211_csa_finish() on the non-transmitting + * interfaces when channel switch is received on + * transmitting interface + */ + struct ieee80211_sub_if_data *iter; + + list_for_each_entry_rcu(iter, &local->interfaces, list) { + if (!ieee80211_sdata_running(iter)) + continue; + + if (iter == sdata || iter->vif.mbssid_tx_vif != vif) + continue; + + ieee80211_queue_work(&iter->local->hw, + &iter->deflink.csa_finalize_work); + } + } + ieee80211_queue_work(&local->hw, &sdata->deflink.csa_finalize_work); + + rcu_read_unlock(); +} +EXPORT_SYMBOL(ieee80211_csa_finish); + +void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_tx) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = sdata->local; + + sdata->deflink.csa_block_tx = block_tx; + sdata_info(sdata, "channel switch failed, disconnecting\n"); + wiphy_work_queue(local->hw.wiphy, &ifmgd->csa_connection_drop_work); +} +EXPORT_SYMBOL(ieee80211_channel_switch_disconnect); + +static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata, + u64 *changed) +{ + int err; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP: + if (!sdata->deflink.u.ap.next_beacon) + return -EINVAL; + + err = ieee80211_assign_beacon(sdata, &sdata->deflink, + sdata->deflink.u.ap.next_beacon, + NULL, NULL, changed); + ieee80211_free_next_beacon(&sdata->deflink); + + if (err < 0) + return err; + break; + case NL80211_IFTYPE_ADHOC: + err = ieee80211_ibss_finish_csa(sdata, changed); + if (err < 0) + return err; + break; +#ifdef CONFIG_MAC80211_MESH + case NL80211_IFTYPE_MESH_POINT: + err = ieee80211_mesh_finish_csa(sdata, changed); + if (err < 0) + return err; + break; +#endif + default: + WARN_ON(1); + return -EINVAL; + } + + return 0; +} + +static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + u64 changed = 0; + int err; + + sdata_assert_lock(sdata); + lockdep_assert_held(&local->mtx); + lockdep_assert_held(&local->chanctx_mtx); + + /* + * using reservation isn't immediate as it may be deferred until later + * with multi-vif. once reservation is complete it will re-schedule the + * work with no reserved_chanctx so verify chandef to check if it + * completed successfully + */ + + if (sdata->deflink.reserved_chanctx) { + /* + * with multi-vif csa driver may call ieee80211_csa_finish() + * many times while waiting for other interfaces to use their + * reservations + */ + if (sdata->deflink.reserved_ready) + return 0; + + return ieee80211_link_use_reserved_context(&sdata->deflink); + } + + if (!cfg80211_chandef_identical(&sdata->vif.bss_conf.chandef, + &sdata->deflink.csa_chandef)) + return -EINVAL; + + sdata->vif.bss_conf.csa_active = false; + + err = ieee80211_set_after_csa_beacon(sdata, &changed); + if (err) + return err; + + if (sdata->vif.bss_conf.eht_puncturing != sdata->vif.bss_conf.csa_punct_bitmap) { + sdata->vif.bss_conf.eht_puncturing = + sdata->vif.bss_conf.csa_punct_bitmap; + changed |= BSS_CHANGED_EHT_PUNCTURING; + } + + ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed); + + if (sdata->deflink.csa_block_tx) { + ieee80211_wake_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_CSA); + sdata->deflink.csa_block_tx = false; + } + + err = drv_post_channel_switch(sdata); + if (err) + return err; + + cfg80211_ch_switch_notify(sdata->dev, &sdata->deflink.csa_chandef, 0, + sdata->vif.bss_conf.eht_puncturing); + + return 0; +} + +static void ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata) +{ + if (__ieee80211_csa_finalize(sdata)) { + sdata_info(sdata, "failed to finalize CSA, disconnecting\n"); + cfg80211_stop_iface(sdata->local->hw.wiphy, &sdata->wdev, + GFP_KERNEL); + } +} + +void ieee80211_csa_finalize_work(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, + deflink.csa_finalize_work); + struct ieee80211_local *local = sdata->local; + + sdata_lock(sdata); + mutex_lock(&local->mtx); + mutex_lock(&local->chanctx_mtx); + + /* AP might have been stopped while waiting for the lock. */ + if (!sdata->vif.bss_conf.csa_active) + goto unlock; + + if (!ieee80211_sdata_running(sdata)) + goto unlock; + + ieee80211_csa_finalize(sdata); + +unlock: + mutex_unlock(&local->chanctx_mtx); + mutex_unlock(&local->mtx); + sdata_unlock(sdata); +} + +static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata, + struct cfg80211_csa_settings *params, + u64 *changed) +{ + struct ieee80211_csa_settings csa = {}; + int err; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP: + sdata->deflink.u.ap.next_beacon = + cfg80211_beacon_dup(¶ms->beacon_after); + if (!sdata->deflink.u.ap.next_beacon) + return -ENOMEM; + + /* + * With a count of 0, we don't have to wait for any + * TBTT before switching, so complete the CSA + * immediately. In theory, with a count == 1 we + * should delay the switch until just before the next + * TBTT, but that would complicate things so we switch + * immediately too. If we would delay the switch + * until the next TBTT, we would have to set the probe + * response here. + * + * TODO: A channel switch with count <= 1 without + * sending a CSA action frame is kind of useless, + * because the clients won't know we're changing + * channels. The action frame must be implemented + * either here or in the userspace. + */ + if (params->count <= 1) + break; + + if ((params->n_counter_offsets_beacon > + IEEE80211_MAX_CNTDWN_COUNTERS_NUM) || + (params->n_counter_offsets_presp > + IEEE80211_MAX_CNTDWN_COUNTERS_NUM)) { + ieee80211_free_next_beacon(&sdata->deflink); + return -EINVAL; + } + + csa.counter_offsets_beacon = params->counter_offsets_beacon; + csa.counter_offsets_presp = params->counter_offsets_presp; + csa.n_counter_offsets_beacon = params->n_counter_offsets_beacon; + csa.n_counter_offsets_presp = params->n_counter_offsets_presp; + csa.count = params->count; + + err = ieee80211_assign_beacon(sdata, &sdata->deflink, + ¶ms->beacon_csa, &csa, + NULL, changed); + if (err < 0) { + ieee80211_free_next_beacon(&sdata->deflink); + return err; + } + + break; + case NL80211_IFTYPE_ADHOC: + if (!sdata->vif.cfg.ibss_joined) + return -EINVAL; + + if (params->chandef.width != sdata->u.ibss.chandef.width) + return -EINVAL; + + switch (params->chandef.width) { + case NL80211_CHAN_WIDTH_40: + if (cfg80211_get_chandef_type(¶ms->chandef) != + cfg80211_get_chandef_type(&sdata->u.ibss.chandef)) + return -EINVAL; + break; + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + break; + default: + return -EINVAL; + } + + /* changes into another band are not supported */ + if (sdata->u.ibss.chandef.chan->band != + params->chandef.chan->band) + return -EINVAL; + + /* see comments in the NL80211_IFTYPE_AP block */ + if (params->count > 1) { + err = ieee80211_ibss_csa_beacon(sdata, params, changed); + if (err < 0) + return err; + } + + ieee80211_send_action_csa(sdata, params); + + break; +#ifdef CONFIG_MAC80211_MESH + case NL80211_IFTYPE_MESH_POINT: { + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + /* changes into another band are not supported */ + if (sdata->vif.bss_conf.chandef.chan->band != + params->chandef.chan->band) + return -EINVAL; + + if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_NONE) { + ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_INIT; + if (!ifmsh->pre_value) + ifmsh->pre_value = 1; + else + ifmsh->pre_value++; + } + + /* see comments in the NL80211_IFTYPE_AP block */ + if (params->count > 1) { + err = ieee80211_mesh_csa_beacon(sdata, params, changed); + if (err < 0) { + ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE; + return err; + } + } + + if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_INIT) + ieee80211_send_action_csa(sdata, params); + + break; + } +#endif + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static void ieee80211_color_change_abort(struct ieee80211_sub_if_data *sdata) +{ + sdata->vif.bss_conf.color_change_active = false; + + ieee80211_free_next_beacon(&sdata->deflink); + + cfg80211_color_change_aborted_notify(sdata->dev); +} + +static int +__ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_csa_settings *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct ieee80211_channel_switch ch_switch; + struct ieee80211_chanctx_conf *conf; + struct ieee80211_chanctx *chanctx; + u64 changed = 0; + int err; + + sdata_assert_lock(sdata); + lockdep_assert_held(&local->mtx); + + if (!list_empty(&local->roc_list) || local->scanning) + return -EBUSY; + + if (sdata->wdev.cac_started) + return -EBUSY; + + if (cfg80211_chandef_identical(¶ms->chandef, + &sdata->vif.bss_conf.chandef)) + return -EINVAL; + + /* don't allow another channel switch if one is already active. */ + if (sdata->vif.bss_conf.csa_active) + return -EBUSY; + + mutex_lock(&local->chanctx_mtx); + conf = rcu_dereference_protected(sdata->vif.bss_conf.chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + if (!conf) { + err = -EBUSY; + goto out; + } + + if (params->chandef.chan->freq_offset) { + /* this may work, but is untested */ + err = -EOPNOTSUPP; + goto out; + } + + chanctx = container_of(conf, struct ieee80211_chanctx, conf); + + ch_switch.timestamp = 0; + ch_switch.device_timestamp = 0; + ch_switch.block_tx = params->block_tx; + ch_switch.chandef = params->chandef; + ch_switch.count = params->count; + + err = drv_pre_channel_switch(sdata, &ch_switch); + if (err) + goto out; + + err = ieee80211_link_reserve_chanctx(&sdata->deflink, ¶ms->chandef, + chanctx->mode, + params->radar_required); + if (err) + goto out; + + /* if reservation is invalid then this will fail */ + err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0); + if (err) { + ieee80211_link_unreserve_chanctx(&sdata->deflink); + goto out; + } + + /* if there is a color change in progress, abort it */ + if (sdata->vif.bss_conf.color_change_active) + ieee80211_color_change_abort(sdata); + + err = ieee80211_set_csa_beacon(sdata, params, &changed); + if (err) { + ieee80211_link_unreserve_chanctx(&sdata->deflink); + goto out; + } + + if (params->punct_bitmap && !sdata->vif.bss_conf.eht_support) + goto out; + + sdata->deflink.csa_chandef = params->chandef; + sdata->deflink.csa_block_tx = params->block_tx; + sdata->vif.bss_conf.csa_active = true; + sdata->vif.bss_conf.csa_punct_bitmap = params->punct_bitmap; + + if (sdata->deflink.csa_block_tx) + ieee80211_stop_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_CSA); + + cfg80211_ch_switch_started_notify(sdata->dev, + &sdata->deflink.csa_chandef, 0, + params->count, params->block_tx, + sdata->vif.bss_conf.csa_punct_bitmap); + + if (changed) { + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + changed); + drv_channel_switch_beacon(sdata, ¶ms->chandef); + } else { + /* if the beacon didn't change, we can finalize immediately */ + ieee80211_csa_finalize(sdata); + } + +out: + mutex_unlock(&local->chanctx_mtx); + return err; +} + +int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_csa_settings *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + int err; + + mutex_lock(&local->mtx); + err = __ieee80211_channel_switch(wiphy, dev, params); + mutex_unlock(&local->mtx); + + return err; +} + +u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local) +{ + lockdep_assert_held(&local->mtx); + + local->roc_cookie_counter++; + + /* wow, you wrapped 64 bits ... more likely a bug */ + if (WARN_ON(local->roc_cookie_counter == 0)) + local->roc_cookie_counter++; + + return local->roc_cookie_counter; +} + +int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb, + u64 *cookie, gfp_t gfp) +{ + unsigned long spin_flags; + struct sk_buff *ack_skb; + int id; + + ack_skb = skb_copy(skb, gfp); + if (!ack_skb) + return -ENOMEM; + + spin_lock_irqsave(&local->ack_status_lock, spin_flags); + id = idr_alloc(&local->ack_status_frames, ack_skb, + 1, 0x2000, GFP_ATOMIC); + spin_unlock_irqrestore(&local->ack_status_lock, spin_flags); + + if (id < 0) { + kfree_skb(ack_skb); + return -ENOMEM; + } + + IEEE80211_SKB_CB(skb)->ack_frame_id = id; + + *cookie = ieee80211_mgmt_tx_cookie(local); + IEEE80211_SKB_CB(ack_skb)->ack.cookie = *cookie; + + return 0; +} + +static void +ieee80211_update_mgmt_frame_registrations(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct mgmt_frame_regs *upd) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + u32 preq_mask = BIT(IEEE80211_STYPE_PROBE_REQ >> 4); + u32 action_mask = BIT(IEEE80211_STYPE_ACTION >> 4); + bool global_change, intf_change; + + global_change = + (local->probe_req_reg != !!(upd->global_stypes & preq_mask)) || + (local->rx_mcast_action_reg != + !!(upd->global_mcast_stypes & action_mask)); + local->probe_req_reg = upd->global_stypes & preq_mask; + local->rx_mcast_action_reg = upd->global_mcast_stypes & action_mask; + + intf_change = (sdata->vif.probe_req_reg != + !!(upd->interface_stypes & preq_mask)) || + (sdata->vif.rx_mcast_action_reg != + !!(upd->interface_mcast_stypes & action_mask)); + sdata->vif.probe_req_reg = upd->interface_stypes & preq_mask; + sdata->vif.rx_mcast_action_reg = + upd->interface_mcast_stypes & action_mask; + + if (!local->open_count) + return; + + if (intf_change && ieee80211_sdata_running(sdata)) + drv_config_iface_filter(local, sdata, + sdata->vif.probe_req_reg ? + FIF_PROBE_REQ : 0, + FIF_PROBE_REQ); + + if (global_change) + ieee80211_configure_filter(local); +} + +static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + if (local->started) + return -EOPNOTSUPP; + + return drv_set_antenna(local, tx_ant, rx_ant); +} + +static int ieee80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + return drv_get_antenna(local, tx_ant, rx_ant); +} + +static int ieee80211_set_rekey_data(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_gtk_rekey_data *data) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (!local->ops->set_rekey_data) + return -EOPNOTSUPP; + + drv_set_rekey_data(local, sdata, data); + + return 0; +} + +static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u64 *cookie) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct ieee80211_qos_hdr *nullfunc; + struct sk_buff *skb; + int size = sizeof(*nullfunc); + __le16 fc; + bool qos; + struct ieee80211_tx_info *info; + struct sta_info *sta; + struct ieee80211_chanctx_conf *chanctx_conf; + enum nl80211_band band; + int ret; + + /* the lock is needed to assign the cookie later */ + mutex_lock(&local->mtx); + + rcu_read_lock(); + sta = sta_info_get_bss(sdata, peer); + if (!sta) { + ret = -ENOLINK; + goto unlock; + } + + qos = sta->sta.wme; + + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + if (WARN_ON(!chanctx_conf)) { + ret = -EINVAL; + goto unlock; + } + band = chanctx_conf->def.chan->band; + + if (qos) { + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_NULLFUNC | + IEEE80211_FCTL_FROMDS); + } else { + size -= 2; + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_NULLFUNC | + IEEE80211_FCTL_FROMDS); + } + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); + if (!skb) { + ret = -ENOMEM; + goto unlock; + } + + skb->dev = dev; + + skb_reserve(skb, local->hw.extra_tx_headroom); + + nullfunc = skb_put(skb, size); + nullfunc->frame_control = fc; + nullfunc->duration_id = 0; + memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); + memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); + memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); + nullfunc->seq_ctrl = 0; + + info = IEEE80211_SKB_CB(skb); + + info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS | + IEEE80211_TX_INTFL_NL80211_FRAME_TX; + info->band = band; + + skb_set_queue_mapping(skb, IEEE80211_AC_VO); + skb->priority = 7; + if (qos) + nullfunc->qos_ctrl = cpu_to_le16(7); + + ret = ieee80211_attach_ack_skb(local, skb, cookie, GFP_ATOMIC); + if (ret) { + kfree_skb(skb); + goto unlock; + } + + local_bh_disable(); + ieee80211_xmit(sdata, sta, skb); + local_bh_enable(); + + ret = 0; +unlock: + rcu_read_unlock(); + mutex_unlock(&local->mtx); + + return ret; +} + +static int ieee80211_cfg_get_channel(struct wiphy *wiphy, + struct wireless_dev *wdev, + unsigned int link_id, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_link_data *link; + int ret = -ENODATA; + + rcu_read_lock(); + link = rcu_dereference(sdata->link[link_id]); + if (!link) { + ret = -ENOLINK; + goto out; + } + + chanctx_conf = rcu_dereference(link->conf->chanctx_conf); + if (chanctx_conf) { + *chandef = link->conf->chandef; + ret = 0; + } else if (local->open_count > 0 && + local->open_count == local->monitors && + sdata->vif.type == NL80211_IFTYPE_MONITOR) { + if (local->use_chanctx) + *chandef = local->monitor_chandef; + else + *chandef = local->_oper_chandef; + ret = 0; + } +out: + rcu_read_unlock(); + + return ret; +} + +#ifdef CONFIG_PM +static void ieee80211_set_wakeup(struct wiphy *wiphy, bool enabled) +{ + drv_set_wakeup(wiphy_priv(wiphy), enabled); +} +#endif + +static int ieee80211_set_qos_map(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_qos_map *qos_map) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct mac80211_qos_map *new_qos_map, *old_qos_map; + + if (qos_map) { + new_qos_map = kzalloc(sizeof(*new_qos_map), GFP_KERNEL); + if (!new_qos_map) + return -ENOMEM; + memcpy(&new_qos_map->qos_map, qos_map, sizeof(*qos_map)); + } else { + /* A NULL qos_map was passed to disable QoS mapping */ + new_qos_map = NULL; + } + + old_qos_map = sdata_dereference(sdata->qos_map, sdata); + rcu_assign_pointer(sdata->qos_map, new_qos_map); + if (old_qos_map) + kfree_rcu(old_qos_map, rcu_head); + + return 0; +} + +static int ieee80211_set_ap_chanwidth(struct wiphy *wiphy, + struct net_device *dev, + unsigned int link_id, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_link_data *link; + int ret; + u64 changed = 0; + + link = sdata_dereference(sdata->link[link_id], sdata); + + ret = ieee80211_link_change_bandwidth(link, chandef, &changed); + if (ret == 0) + ieee80211_link_info_change_notify(sdata, link, changed); + + return ret; +} + +static int ieee80211_add_tx_ts(struct wiphy *wiphy, struct net_device *dev, + u8 tsid, const u8 *peer, u8 up, + u16 admitted_time) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + int ac = ieee802_1d_to_ac[up]; + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + if (!(sdata->wmm_acm & BIT(up))) + return -EINVAL; + + if (ifmgd->tx_tspec[ac].admitted_time) + return -EBUSY; + + if (admitted_time) { + ifmgd->tx_tspec[ac].admitted_time = 32 * admitted_time; + ifmgd->tx_tspec[ac].tsid = tsid; + ifmgd->tx_tspec[ac].up = up; + } + + return 0; +} + +static int ieee80211_del_tx_ts(struct wiphy *wiphy, struct net_device *dev, + u8 tsid, const u8 *peer) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = wiphy_priv(wiphy); + int ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac]; + + /* skip unused entries */ + if (!tx_tspec->admitted_time) + continue; + + if (tx_tspec->tsid != tsid) + continue; + + /* due to this new packets will be reassigned to non-ACM ACs */ + tx_tspec->up = -1; + + /* Make sure that all packets have been sent to avoid to + * restore the QoS params on packets that are still on the + * queues. + */ + synchronize_net(); + ieee80211_flush_queues(local, sdata, false); + + /* restore the normal QoS parameters + * (unconditionally to avoid races) + */ + tx_tspec->action = TX_TSPEC_ACTION_STOP_DOWNGRADE; + tx_tspec->downgraded = false; + ieee80211_sta_handle_tspec_ac_params(sdata); + + /* finally clear all the data */ + memset(tx_tspec, 0, sizeof(*tx_tspec)); + + return 0; + } + + return -ENOENT; +} + +void ieee80211_nan_func_terminated(struct ieee80211_vif *vif, + u8 inst_id, + enum nl80211_nan_func_term_reason reason, + gfp_t gfp) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct cfg80211_nan_func *func; + u64 cookie; + + if (WARN_ON(vif->type != NL80211_IFTYPE_NAN)) + return; + + spin_lock_bh(&sdata->u.nan.func_lock); + + func = idr_find(&sdata->u.nan.function_inst_ids, inst_id); + if (WARN_ON(!func)) { + spin_unlock_bh(&sdata->u.nan.func_lock); + return; + } + + cookie = func->cookie; + idr_remove(&sdata->u.nan.function_inst_ids, inst_id); + + spin_unlock_bh(&sdata->u.nan.func_lock); + + cfg80211_free_nan_func(func); + + cfg80211_nan_func_terminated(ieee80211_vif_to_wdev(vif), inst_id, + reason, cookie, gfp); +} +EXPORT_SYMBOL(ieee80211_nan_func_terminated); + +void ieee80211_nan_func_match(struct ieee80211_vif *vif, + struct cfg80211_nan_match_params *match, + gfp_t gfp) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct cfg80211_nan_func *func; + + if (WARN_ON(vif->type != NL80211_IFTYPE_NAN)) + return; + + spin_lock_bh(&sdata->u.nan.func_lock); + + func = idr_find(&sdata->u.nan.function_inst_ids, match->inst_id); + if (WARN_ON(!func)) { + spin_unlock_bh(&sdata->u.nan.func_lock); + return; + } + match->cookie = func->cookie; + + spin_unlock_bh(&sdata->u.nan.func_lock); + + cfg80211_nan_match(ieee80211_vif_to_wdev(vif), match, gfp); +} +EXPORT_SYMBOL(ieee80211_nan_func_match); + +static int ieee80211_set_multicast_to_unicast(struct wiphy *wiphy, + struct net_device *dev, + const bool enabled) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + sdata->u.ap.multicast_to_unicast = enabled; + + return 0; +} + +void ieee80211_fill_txq_stats(struct cfg80211_txq_stats *txqstats, + struct txq_info *txqi) +{ + if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_BACKLOG_BYTES))) { + txqstats->filled |= BIT(NL80211_TXQ_STATS_BACKLOG_BYTES); + txqstats->backlog_bytes = txqi->tin.backlog_bytes; + } + + if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_BACKLOG_PACKETS))) { + txqstats->filled |= BIT(NL80211_TXQ_STATS_BACKLOG_PACKETS); + txqstats->backlog_packets = txqi->tin.backlog_packets; + } + + if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_FLOWS))) { + txqstats->filled |= BIT(NL80211_TXQ_STATS_FLOWS); + txqstats->flows = txqi->tin.flows; + } + + if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_DROPS))) { + txqstats->filled |= BIT(NL80211_TXQ_STATS_DROPS); + txqstats->drops = txqi->cstats.drop_count; + } + + if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_ECN_MARKS))) { + txqstats->filled |= BIT(NL80211_TXQ_STATS_ECN_MARKS); + txqstats->ecn_marks = txqi->cstats.ecn_mark; + } + + if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_OVERLIMIT))) { + txqstats->filled |= BIT(NL80211_TXQ_STATS_OVERLIMIT); + txqstats->overlimit = txqi->tin.overlimit; + } + + if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_COLLISIONS))) { + txqstats->filled |= BIT(NL80211_TXQ_STATS_COLLISIONS); + txqstats->collisions = txqi->tin.collisions; + } + + if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_TX_BYTES))) { + txqstats->filled |= BIT(NL80211_TXQ_STATS_TX_BYTES); + txqstats->tx_bytes = txqi->tin.tx_bytes; + } + + if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_TX_PACKETS))) { + txqstats->filled |= BIT(NL80211_TXQ_STATS_TX_PACKETS); + txqstats->tx_packets = txqi->tin.tx_packets; + } +} + +static int ieee80211_get_txq_stats(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct cfg80211_txq_stats *txqstats) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_sub_if_data *sdata; + int ret = 0; + + spin_lock_bh(&local->fq.lock); + rcu_read_lock(); + + if (wdev) { + sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + if (!sdata->vif.txq) { + ret = 1; + goto out; + } + ieee80211_fill_txq_stats(txqstats, to_txq_info(sdata->vif.txq)); + } else { + /* phy stats */ + txqstats->filled |= BIT(NL80211_TXQ_STATS_BACKLOG_PACKETS) | + BIT(NL80211_TXQ_STATS_BACKLOG_BYTES) | + BIT(NL80211_TXQ_STATS_OVERLIMIT) | + BIT(NL80211_TXQ_STATS_OVERMEMORY) | + BIT(NL80211_TXQ_STATS_COLLISIONS) | + BIT(NL80211_TXQ_STATS_MAX_FLOWS); + txqstats->backlog_packets = local->fq.backlog; + txqstats->backlog_bytes = local->fq.memory_usage; + txqstats->overlimit = local->fq.overlimit; + txqstats->overmemory = local->fq.overmemory; + txqstats->collisions = local->fq.collisions; + txqstats->max_flows = local->fq.flows_cnt; + } + +out: + rcu_read_unlock(); + spin_unlock_bh(&local->fq.lock); + + return ret; +} + +static int +ieee80211_get_ftm_responder_stats(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_ftm_responder_stats *ftm_stats) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + return drv_get_ftm_responder_stats(local, sdata, ftm_stats); +} + +static int +ieee80211_start_pmsr(struct wiphy *wiphy, struct wireless_dev *dev, + struct cfg80211_pmsr_request *request) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(dev); + + return drv_start_pmsr(local, sdata, request); +} + +static void +ieee80211_abort_pmsr(struct wiphy *wiphy, struct wireless_dev *dev, + struct cfg80211_pmsr_request *request) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(dev); + + return drv_abort_pmsr(local, sdata, request); +} + +static int ieee80211_set_tid_config(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_tid_config *tid_conf) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct sta_info *sta; + int ret; + + if (!sdata->local->ops->set_tid_config) + return -EOPNOTSUPP; + + if (!tid_conf->peer) + return drv_set_tid_config(sdata->local, sdata, NULL, tid_conf); + + mutex_lock(&sdata->local->sta_mtx); + sta = sta_info_get_bss(sdata, tid_conf->peer); + if (!sta) { + mutex_unlock(&sdata->local->sta_mtx); + return -ENOENT; + } + + ret = drv_set_tid_config(sdata->local, sdata, &sta->sta, tid_conf); + mutex_unlock(&sdata->local->sta_mtx); + + return ret; +} + +static int ieee80211_reset_tid_config(struct wiphy *wiphy, + struct net_device *dev, + const u8 *peer, u8 tids) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct sta_info *sta; + int ret; + + if (!sdata->local->ops->reset_tid_config) + return -EOPNOTSUPP; + + if (!peer) + return drv_reset_tid_config(sdata->local, sdata, NULL, tids); + + mutex_lock(&sdata->local->sta_mtx); + sta = sta_info_get_bss(sdata, peer); + if (!sta) { + mutex_unlock(&sdata->local->sta_mtx); + return -ENOENT; + } + + ret = drv_reset_tid_config(sdata->local, sdata, &sta->sta, tids); + mutex_unlock(&sdata->local->sta_mtx); + + return ret; +} + +static int ieee80211_set_sar_specs(struct wiphy *wiphy, + struct cfg80211_sar_specs *sar) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + if (!local->ops->set_sar_specs) + return -EOPNOTSUPP; + + return local->ops->set_sar_specs(&local->hw, sar); +} + +static int +ieee80211_set_after_color_change_beacon(struct ieee80211_sub_if_data *sdata, + u64 *changed) +{ + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP: { + int ret; + + if (!sdata->deflink.u.ap.next_beacon) + return -EINVAL; + + ret = ieee80211_assign_beacon(sdata, &sdata->deflink, + sdata->deflink.u.ap.next_beacon, + NULL, NULL, changed); + ieee80211_free_next_beacon(&sdata->deflink); + + if (ret < 0) + return ret; + + break; + } + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + return 0; +} + +static int +ieee80211_set_color_change_beacon(struct ieee80211_sub_if_data *sdata, + struct cfg80211_color_change_settings *params, + u64 *changed) +{ + struct ieee80211_color_change_settings color_change = {}; + int err; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP: + sdata->deflink.u.ap.next_beacon = + cfg80211_beacon_dup(¶ms->beacon_next); + if (!sdata->deflink.u.ap.next_beacon) + return -ENOMEM; + + if (params->count <= 1) + break; + + color_change.counter_offset_beacon = + params->counter_offset_beacon; + color_change.counter_offset_presp = + params->counter_offset_presp; + color_change.count = params->count; + + err = ieee80211_assign_beacon(sdata, &sdata->deflink, + ¶ms->beacon_color_change, + NULL, &color_change, changed); + if (err < 0) { + ieee80211_free_next_beacon(&sdata->deflink); + return err; + } + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static void +ieee80211_color_change_bss_config_notify(struct ieee80211_sub_if_data *sdata, + u8 color, int enable, u64 changed) +{ + sdata->vif.bss_conf.he_bss_color.color = color; + sdata->vif.bss_conf.he_bss_color.enabled = enable; + changed |= BSS_CHANGED_HE_BSS_COLOR; + + ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed); + + if (!sdata->vif.bss_conf.nontransmitted && sdata->vif.mbssid_tx_vif) { + struct ieee80211_sub_if_data *child; + + mutex_lock(&sdata->local->iflist_mtx); + list_for_each_entry(child, &sdata->local->interfaces, list) { + if (child != sdata && child->vif.mbssid_tx_vif == &sdata->vif) { + child->vif.bss_conf.he_bss_color.color = color; + child->vif.bss_conf.he_bss_color.enabled = enable; + ieee80211_link_info_change_notify(child, + &child->deflink, + BSS_CHANGED_HE_BSS_COLOR); + } + } + mutex_unlock(&sdata->local->iflist_mtx); + } +} + +static int ieee80211_color_change_finalize(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + u64 changed = 0; + int err; + + sdata_assert_lock(sdata); + lockdep_assert_held(&local->mtx); + + sdata->vif.bss_conf.color_change_active = false; + + err = ieee80211_set_after_color_change_beacon(sdata, &changed); + if (err) { + cfg80211_color_change_aborted_notify(sdata->dev); + return err; + } + + ieee80211_color_change_bss_config_notify(sdata, + sdata->vif.bss_conf.color_change_color, + 1, changed); + cfg80211_color_change_notify(sdata->dev); + + return 0; +} + +void ieee80211_color_change_finalize_work(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, + deflink.color_change_finalize_work); + struct ieee80211_local *local = sdata->local; + + sdata_lock(sdata); + mutex_lock(&local->mtx); + + /* AP might have been stopped while waiting for the lock. */ + if (!sdata->vif.bss_conf.color_change_active) + goto unlock; + + if (!ieee80211_sdata_running(sdata)) + goto unlock; + + ieee80211_color_change_finalize(sdata); + +unlock: + mutex_unlock(&local->mtx); + sdata_unlock(sdata); +} + +void ieee80211_color_collision_detection_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct ieee80211_link_data *link = + container_of(delayed_work, struct ieee80211_link_data, + color_collision_detect_work); + struct ieee80211_sub_if_data *sdata = link->sdata; + + sdata_lock(sdata); + cfg80211_obss_color_collision_notify(sdata->dev, link->color_bitmap); + sdata_unlock(sdata); +} + +void ieee80211_color_change_finish(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + ieee80211_queue_work(&sdata->local->hw, + &sdata->deflink.color_change_finalize_work); +} +EXPORT_SYMBOL_GPL(ieee80211_color_change_finish); + +void +ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif, + u64 color_bitmap, gfp_t gfp) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_link_data *link = &sdata->deflink; + + if (sdata->vif.bss_conf.color_change_active || sdata->vif.bss_conf.csa_active) + return; + + if (delayed_work_pending(&link->color_collision_detect_work)) + return; + + link->color_bitmap = color_bitmap; + /* queue the color collision detection event every 500 ms in order to + * avoid sending too much netlink messages to userspace. + */ + ieee80211_queue_delayed_work(&sdata->local->hw, + &link->color_collision_detect_work, + msecs_to_jiffies(500)); +} +EXPORT_SYMBOL_GPL(ieee80211_obss_color_collision_notify); + +static int +ieee80211_color_change(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_color_change_settings *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + u64 changed = 0; + int err; + + sdata_assert_lock(sdata); + + if (sdata->vif.bss_conf.nontransmitted) + return -EINVAL; + + mutex_lock(&local->mtx); + + /* don't allow another color change if one is already active or if csa + * is active + */ + if (sdata->vif.bss_conf.color_change_active || sdata->vif.bss_conf.csa_active) { + err = -EBUSY; + goto out; + } + + err = ieee80211_set_color_change_beacon(sdata, params, &changed); + if (err) + goto out; + + sdata->vif.bss_conf.color_change_active = true; + sdata->vif.bss_conf.color_change_color = params->color; + + cfg80211_color_change_started_notify(sdata->dev, params->count); + + if (changed) + ieee80211_color_change_bss_config_notify(sdata, 0, 0, changed); + else + /* if the beacon didn't change, we can finalize immediately */ + ieee80211_color_change_finalize(sdata); + +out: + mutex_unlock(&local->mtx); + + return err; +} + +static int +ieee80211_set_radar_background(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + if (!local->ops->set_radar_background) + return -EOPNOTSUPP; + + return local->ops->set_radar_background(&local->hw, chandef); +} + +static int ieee80211_add_intf_link(struct wiphy *wiphy, + struct wireless_dev *wdev, + unsigned int link_id) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + int res; + + if (wdev->use_4addr) + return -EOPNOTSUPP; + + mutex_lock(&sdata->local->mtx); + res = ieee80211_vif_set_links(sdata, wdev->valid_links, 0); + mutex_unlock(&sdata->local->mtx); + + return res; +} + +static void ieee80211_del_intf_link(struct wiphy *wiphy, + struct wireless_dev *wdev, + unsigned int link_id) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + + mutex_lock(&sdata->local->mtx); + ieee80211_vif_set_links(sdata, wdev->valid_links, 0); + mutex_unlock(&sdata->local->mtx); +} + +static int sta_add_link_station(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct link_station_parameters *params) +{ + struct sta_info *sta; + int ret; + + sta = sta_info_get_bss(sdata, params->mld_mac); + if (!sta) + return -ENOENT; + + if (!sta->sta.valid_links) + return -EINVAL; + + if (sta->sta.valid_links & BIT(params->link_id)) + return -EALREADY; + + ret = ieee80211_sta_allocate_link(sta, params->link_id); + if (ret) + return ret; + + ret = sta_link_apply_parameters(local, sta, true, params); + if (ret) { + ieee80211_sta_free_link(sta, params->link_id); + return ret; + } + + /* ieee80211_sta_activate_link frees the link upon failure */ + return ieee80211_sta_activate_link(sta, params->link_id); +} + +static int +ieee80211_add_link_station(struct wiphy *wiphy, struct net_device *dev, + struct link_station_parameters *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = wiphy_priv(wiphy); + int ret; + + mutex_lock(&sdata->local->sta_mtx); + ret = sta_add_link_station(local, sdata, params); + mutex_unlock(&sdata->local->sta_mtx); + + return ret; +} + +static int sta_mod_link_station(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct link_station_parameters *params) +{ + struct sta_info *sta; + + sta = sta_info_get_bss(sdata, params->mld_mac); + if (!sta) + return -ENOENT; + + if (!(sta->sta.valid_links & BIT(params->link_id))) + return -EINVAL; + + return sta_link_apply_parameters(local, sta, false, params); +} + +static int +ieee80211_mod_link_station(struct wiphy *wiphy, struct net_device *dev, + struct link_station_parameters *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = wiphy_priv(wiphy); + int ret; + + mutex_lock(&sdata->local->sta_mtx); + ret = sta_mod_link_station(local, sdata, params); + mutex_unlock(&sdata->local->sta_mtx); + + return ret; +} + +static int sta_del_link_station(struct ieee80211_sub_if_data *sdata, + struct link_station_del_parameters *params) +{ + struct sta_info *sta; + + sta = sta_info_get_bss(sdata, params->mld_mac); + if (!sta) + return -ENOENT; + + if (!(sta->sta.valid_links & BIT(params->link_id))) + return -EINVAL; + + /* must not create a STA without links */ + if (sta->sta.valid_links == BIT(params->link_id)) + return -EINVAL; + + ieee80211_sta_remove_link(sta, params->link_id); + + return 0; +} + +static int +ieee80211_del_link_station(struct wiphy *wiphy, struct net_device *dev, + struct link_station_del_parameters *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + int ret; + + mutex_lock(&sdata->local->sta_mtx); + ret = sta_del_link_station(sdata, params); + mutex_unlock(&sdata->local->sta_mtx); + + return ret; +} + +static int ieee80211_set_hw_timestamp(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_set_hw_timestamp *hwts) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + + if (!local->ops->set_hw_timestamp) + return -EOPNOTSUPP; + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + return local->ops->set_hw_timestamp(&local->hw, &sdata->vif, hwts); +} + +const struct cfg80211_ops mac80211_config_ops = { + .add_virtual_intf = ieee80211_add_iface, + .del_virtual_intf = ieee80211_del_iface, + .change_virtual_intf = ieee80211_change_iface, + .start_p2p_device = ieee80211_start_p2p_device, + .stop_p2p_device = ieee80211_stop_p2p_device, + .add_key = ieee80211_add_key, + .del_key = ieee80211_del_key, + .get_key = ieee80211_get_key, + .set_default_key = ieee80211_config_default_key, + .set_default_mgmt_key = ieee80211_config_default_mgmt_key, + .set_default_beacon_key = ieee80211_config_default_beacon_key, + .start_ap = ieee80211_start_ap, + .change_beacon = ieee80211_change_beacon, + .stop_ap = ieee80211_stop_ap, + .add_station = ieee80211_add_station, + .del_station = ieee80211_del_station, + .change_station = ieee80211_change_station, + .get_station = ieee80211_get_station, + .dump_station = ieee80211_dump_station, + .dump_survey = ieee80211_dump_survey, +#ifdef CONFIG_MAC80211_MESH + .add_mpath = ieee80211_add_mpath, + .del_mpath = ieee80211_del_mpath, + .change_mpath = ieee80211_change_mpath, + .get_mpath = ieee80211_get_mpath, + .dump_mpath = ieee80211_dump_mpath, + .get_mpp = ieee80211_get_mpp, + .dump_mpp = ieee80211_dump_mpp, + .update_mesh_config = ieee80211_update_mesh_config, + .get_mesh_config = ieee80211_get_mesh_config, + .join_mesh = ieee80211_join_mesh, + .leave_mesh = ieee80211_leave_mesh, +#endif + .join_ocb = ieee80211_join_ocb, + .leave_ocb = ieee80211_leave_ocb, + .change_bss = ieee80211_change_bss, + .inform_bss = ieee80211_inform_bss, + .set_txq_params = ieee80211_set_txq_params, + .set_monitor_channel = ieee80211_set_monitor_channel, + .suspend = ieee80211_suspend, + .resume = ieee80211_resume, + .scan = ieee80211_scan, + .abort_scan = ieee80211_abort_scan, + .sched_scan_start = ieee80211_sched_scan_start, + .sched_scan_stop = ieee80211_sched_scan_stop, + .auth = ieee80211_auth, + .assoc = ieee80211_assoc, + .deauth = ieee80211_deauth, + .disassoc = ieee80211_disassoc, + .join_ibss = ieee80211_join_ibss, + .leave_ibss = ieee80211_leave_ibss, + .set_mcast_rate = ieee80211_set_mcast_rate, + .set_wiphy_params = ieee80211_set_wiphy_params, + .set_tx_power = ieee80211_set_tx_power, + .get_tx_power = ieee80211_get_tx_power, + .rfkill_poll = ieee80211_rfkill_poll, + CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd) + CFG80211_TESTMODE_DUMP(ieee80211_testmode_dump) + .set_power_mgmt = ieee80211_set_power_mgmt, + .set_bitrate_mask = ieee80211_set_bitrate_mask, + .remain_on_channel = ieee80211_remain_on_channel, + .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel, + .mgmt_tx = ieee80211_mgmt_tx, + .mgmt_tx_cancel_wait = ieee80211_mgmt_tx_cancel_wait, + .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config, + .set_cqm_rssi_range_config = ieee80211_set_cqm_rssi_range_config, + .update_mgmt_frame_registrations = + ieee80211_update_mgmt_frame_registrations, + .set_antenna = ieee80211_set_antenna, + .get_antenna = ieee80211_get_antenna, + .set_rekey_data = ieee80211_set_rekey_data, + .tdls_oper = ieee80211_tdls_oper, + .tdls_mgmt = ieee80211_tdls_mgmt, + .tdls_channel_switch = ieee80211_tdls_channel_switch, + .tdls_cancel_channel_switch = ieee80211_tdls_cancel_channel_switch, + .probe_client = ieee80211_probe_client, + .set_noack_map = ieee80211_set_noack_map, +#ifdef CONFIG_PM + .set_wakeup = ieee80211_set_wakeup, +#endif + .get_channel = ieee80211_cfg_get_channel, + .start_radar_detection = ieee80211_start_radar_detection, + .end_cac = ieee80211_end_cac, + .channel_switch = ieee80211_channel_switch, + .set_qos_map = ieee80211_set_qos_map, + .set_ap_chanwidth = ieee80211_set_ap_chanwidth, + .add_tx_ts = ieee80211_add_tx_ts, + .del_tx_ts = ieee80211_del_tx_ts, + .start_nan = ieee80211_start_nan, + .stop_nan = ieee80211_stop_nan, + .nan_change_conf = ieee80211_nan_change_conf, + .add_nan_func = ieee80211_add_nan_func, + .del_nan_func = ieee80211_del_nan_func, + .set_multicast_to_unicast = ieee80211_set_multicast_to_unicast, + .tx_control_port = ieee80211_tx_control_port, + .get_txq_stats = ieee80211_get_txq_stats, + .get_ftm_responder_stats = ieee80211_get_ftm_responder_stats, + .start_pmsr = ieee80211_start_pmsr, + .abort_pmsr = ieee80211_abort_pmsr, + .probe_mesh_link = ieee80211_probe_mesh_link, + .set_tid_config = ieee80211_set_tid_config, + .reset_tid_config = ieee80211_reset_tid_config, + .set_sar_specs = ieee80211_set_sar_specs, + .color_change = ieee80211_color_change, + .set_radar_background = ieee80211_set_radar_background, + .add_intf_link = ieee80211_add_intf_link, + .del_intf_link = ieee80211_del_intf_link, + .add_link_station = ieee80211_add_link_station, + .mod_link_station = ieee80211_mod_link_station, + .del_link_station = ieee80211_del_link_station, + .set_hw_timestamp = ieee80211_set_hw_timestamp, +}; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c new file mode 100644 index 0000000000..68952752b5 --- /dev/null +++ b/net/mac80211/chan.c @@ -0,0 +1,2069 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * mac80211 - channel management + * Copyright 2020 - 2022 Intel Corporation + */ + +#include <linux/nl80211.h> +#include <linux/export.h> +#include <linux/rtnetlink.h> +#include <net/cfg80211.h> +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" + +static int ieee80211_chanctx_num_assigned(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx) +{ + struct ieee80211_link_data *link; + int num = 0; + + lockdep_assert_held(&local->chanctx_mtx); + + list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list) + num++; + + return num; +} + +static int ieee80211_chanctx_num_reserved(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx) +{ + struct ieee80211_link_data *link; + int num = 0; + + lockdep_assert_held(&local->chanctx_mtx); + + list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) + num++; + + return num; +} + +int ieee80211_chanctx_refcount(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx) +{ + return ieee80211_chanctx_num_assigned(local, ctx) + + ieee80211_chanctx_num_reserved(local, ctx); +} + +static int ieee80211_num_chanctx(struct ieee80211_local *local) +{ + struct ieee80211_chanctx *ctx; + int num = 0; + + lockdep_assert_held(&local->chanctx_mtx); + + list_for_each_entry(ctx, &local->chanctx_list, list) + num++; + + return num; +} + +static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local) +{ + lockdep_assert_held(&local->chanctx_mtx); + return ieee80211_num_chanctx(local) < ieee80211_max_num_channels(local); +} + +static struct ieee80211_chanctx * +ieee80211_link_get_chanctx(struct ieee80211_link_data *link) +{ + struct ieee80211_local *local __maybe_unused = link->sdata->local; + struct ieee80211_chanctx_conf *conf; + + conf = rcu_dereference_protected(link->conf->chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + if (!conf) + return NULL; + + return container_of(conf, struct ieee80211_chanctx, conf); +} + +static const struct cfg80211_chan_def * +ieee80211_chanctx_reserved_chandef(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + const struct cfg80211_chan_def *compat) +{ + struct ieee80211_link_data *link; + + lockdep_assert_held(&local->chanctx_mtx); + + list_for_each_entry(link, &ctx->reserved_links, + reserved_chanctx_list) { + if (!compat) + compat = &link->reserved_chandef; + + compat = cfg80211_chandef_compatible(&link->reserved_chandef, + compat); + if (!compat) + break; + } + + return compat; +} + +static const struct cfg80211_chan_def * +ieee80211_chanctx_non_reserved_chandef(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + const struct cfg80211_chan_def *compat) +{ + struct ieee80211_link_data *link; + + lockdep_assert_held(&local->chanctx_mtx); + + list_for_each_entry(link, &ctx->assigned_links, + assigned_chanctx_list) { + struct ieee80211_bss_conf *link_conf = link->conf; + + if (link->reserved_chanctx) + continue; + + if (!compat) + compat = &link_conf->chandef; + + compat = cfg80211_chandef_compatible( + &link_conf->chandef, compat); + if (!compat) + break; + } + + return compat; +} + +static const struct cfg80211_chan_def * +ieee80211_chanctx_combined_chandef(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + const struct cfg80211_chan_def *compat) +{ + lockdep_assert_held(&local->chanctx_mtx); + + compat = ieee80211_chanctx_reserved_chandef(local, ctx, compat); + if (!compat) + return NULL; + + compat = ieee80211_chanctx_non_reserved_chandef(local, ctx, compat); + if (!compat) + return NULL; + + return compat; +} + +static bool +ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + const struct cfg80211_chan_def *def) +{ + lockdep_assert_held(&local->chanctx_mtx); + + if (ieee80211_chanctx_combined_chandef(local, ctx, def)) + return true; + + if (!list_empty(&ctx->reserved_links) && + ieee80211_chanctx_reserved_chandef(local, ctx, def)) + return true; + + return false; +} + +static struct ieee80211_chanctx * +ieee80211_find_reservation_chanctx(struct ieee80211_local *local, + const struct cfg80211_chan_def *chandef, + enum ieee80211_chanctx_mode mode) +{ + struct ieee80211_chanctx *ctx; + + lockdep_assert_held(&local->chanctx_mtx); + + if (mode == IEEE80211_CHANCTX_EXCLUSIVE) + return NULL; + + list_for_each_entry(ctx, &local->chanctx_list, list) { + if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) + continue; + + if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) + continue; + + if (!ieee80211_chanctx_can_reserve_chandef(local, ctx, + chandef)) + continue; + + return ctx; + } + + return NULL; +} + +static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta, + unsigned int link_id) +{ + enum ieee80211_sta_rx_bandwidth width; + struct link_sta_info *link_sta; + + link_sta = rcu_dereference(sta->link[link_id]); + + /* no effect if this STA has no presence on this link */ + if (!link_sta) + return NL80211_CHAN_WIDTH_20_NOHT; + + width = ieee80211_sta_cap_rx_bw(link_sta); + + switch (width) { + case IEEE80211_STA_RX_BW_20: + if (link_sta->pub->ht_cap.ht_supported) + return NL80211_CHAN_WIDTH_20; + else + return NL80211_CHAN_WIDTH_20_NOHT; + case IEEE80211_STA_RX_BW_40: + return NL80211_CHAN_WIDTH_40; + case IEEE80211_STA_RX_BW_80: + return NL80211_CHAN_WIDTH_80; + case IEEE80211_STA_RX_BW_160: + /* + * This applied for both 160 and 80+80. since we use + * the returned value to consider degradation of + * ctx->conf.min_def, we have to make sure to take + * the bigger one (NL80211_CHAN_WIDTH_160). + * Otherwise we might try degrading even when not + * needed, as the max required sta_bw returned (80+80) + * might be smaller than the configured bw (160). + */ + return NL80211_CHAN_WIDTH_160; + case IEEE80211_STA_RX_BW_320: + return NL80211_CHAN_WIDTH_320; + default: + WARN_ON(1); + return NL80211_CHAN_WIDTH_20; + } +} + +static enum nl80211_chan_width +ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata, + unsigned int link_id) +{ + enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT; + struct sta_info *sta; + + list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { + if (sdata != sta->sdata && + !(sta->sdata->bss && sta->sdata->bss == sdata->bss)) + continue; + + max_bw = max(max_bw, ieee80211_get_sta_bw(sta, link_id)); + } + + return max_bw; +} + +static enum nl80211_chan_width +ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata, + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for) +{ + enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT; + struct ieee80211_vif *vif = &sdata->vif; + int link_id; + + rcu_read_lock(); + for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { + enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT; + struct ieee80211_link_data *link = + rcu_dereference(sdata->link[link_id]); + + if (!link) + continue; + + if (link != rsvd_for && + rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf) + continue; + + switch (vif->type) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + width = ieee80211_get_max_required_bw(sdata, link_id); + break; + case NL80211_IFTYPE_STATION: + /* + * The ap's sta->bandwidth is not set yet at this + * point, so take the width from the chandef, but + * account also for TDLS peers + */ + width = max(link->conf->chandef.width, + ieee80211_get_max_required_bw(sdata, link_id)); + break; + case NL80211_IFTYPE_P2P_DEVICE: + case NL80211_IFTYPE_NAN: + continue; + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_OCB: + width = link->conf->chandef.width; + break; + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_UNSPECIFIED: + case NUM_NL80211_IFTYPES: + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_GO: + WARN_ON_ONCE(1); + } + + max_bw = max(max_bw, width); + } + rcu_read_unlock(); + + return max_bw; +} + +static enum nl80211_chan_width +ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for) +{ + struct ieee80211_sub_if_data *sdata; + enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT; + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + enum nl80211_chan_width width; + + if (!ieee80211_sdata_running(sdata)) + continue; + + width = ieee80211_get_chanctx_vif_max_required_bw(sdata, ctx, + rsvd_for); + + max_bw = max(max_bw, width); + } + + /* use the configured bandwidth in case of monitor interface */ + sdata = rcu_dereference(local->monitor_sdata); + if (sdata && + rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &ctx->conf) + max_bw = max(max_bw, ctx->conf.def.width); + + rcu_read_unlock(); + + return max_bw; +} + +/* + * recalc the min required chan width of the channel context, which is + * the max of min required widths of all the interfaces bound to this + * channel context. + */ +static u32 +_ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for) +{ + enum nl80211_chan_width max_bw; + struct cfg80211_chan_def min_def; + + lockdep_assert_held(&local->chanctx_mtx); + + /* don't optimize non-20MHz based and radar_enabled confs */ + if (ctx->conf.def.width == NL80211_CHAN_WIDTH_5 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_10 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_1 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_2 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_4 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_8 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_16 || + ctx->conf.radar_enabled) { + ctx->conf.min_def = ctx->conf.def; + return 0; + } + + max_bw = ieee80211_get_chanctx_max_required_bw(local, ctx, rsvd_for); + + /* downgrade chandef up to max_bw */ + min_def = ctx->conf.def; + while (min_def.width > max_bw) + ieee80211_chandef_downgrade(&min_def); + + if (cfg80211_chandef_identical(&ctx->conf.min_def, &min_def)) + return 0; + + ctx->conf.min_def = min_def; + if (!ctx->driver_present) + return 0; + + return IEEE80211_CHANCTX_CHANGE_MIN_WIDTH; +} + +/* calling this function is assuming that station vif is updated to + * lates changes by calling ieee80211_link_update_chandef + */ +static void ieee80211_chan_bw_change(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + bool narrowed) +{ + struct sta_info *sta; + struct ieee80211_supported_band *sband = + local->hw.wiphy->bands[ctx->conf.def.chan->band]; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &local->sta_list, + list) { + struct ieee80211_sub_if_data *sdata = sta->sdata; + enum ieee80211_sta_rx_bandwidth new_sta_bw; + unsigned int link_id; + + if (!ieee80211_sdata_running(sta->sdata)) + continue; + + for (link_id = 0; link_id < ARRAY_SIZE(sta->sdata->link); link_id++) { + struct ieee80211_bss_conf *link_conf = + rcu_dereference(sdata->vif.link_conf[link_id]); + struct link_sta_info *link_sta; + + if (!link_conf) + continue; + + if (rcu_access_pointer(link_conf->chanctx_conf) != &ctx->conf) + continue; + + link_sta = rcu_dereference(sta->link[link_id]); + if (!link_sta) + continue; + + new_sta_bw = ieee80211_sta_cur_vht_bw(link_sta); + + /* nothing change */ + if (new_sta_bw == link_sta->pub->bandwidth) + continue; + + /* vif changed to narrow BW and narrow BW for station wasn't + * requested or vise versa */ + if ((new_sta_bw < link_sta->pub->bandwidth) == !narrowed) + continue; + + link_sta->pub->bandwidth = new_sta_bw; + rate_control_rate_update(local, sband, sta, link_id, + IEEE80211_RC_BW_CHANGED); + } + } + rcu_read_unlock(); +} + +/* + * recalc the min required chan width of the channel context, which is + * the max of min required widths of all the interfaces bound to this + * channel context. + */ +void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for) +{ + u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for); + + if (!changed) + return; + + /* check is BW narrowed */ + ieee80211_chan_bw_change(local, ctx, true); + + drv_change_chanctx(local, ctx, changed); + + /* check is BW wider */ + ieee80211_chan_bw_change(local, ctx, false); +} + +static void _ieee80211_change_chanctx(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + struct ieee80211_chanctx *old_ctx, + const struct cfg80211_chan_def *chandef, + struct ieee80211_link_data *rsvd_for) +{ + u32 changed; + + /* expected to handle only 20/40/80/160/320 channel widths */ + switch (chandef->width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_40: + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + case NL80211_CHAN_WIDTH_320: + break; + default: + WARN_ON(1); + } + + /* Check maybe BW narrowed - we do this _before_ calling recalc_chanctx_min_def + * due to maybe not returning from it, e.g in case new context was added + * first time with all parameters up to date. + */ + ieee80211_chan_bw_change(local, old_ctx, true); + + if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) { + ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for); + return; + } + + WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef)); + + ctx->conf.def = *chandef; + + /* check if min chanctx also changed */ + changed = IEEE80211_CHANCTX_CHANGE_WIDTH | + _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for); + drv_change_chanctx(local, ctx, changed); + + if (!local->use_chanctx) { + local->_oper_chandef = *chandef; + ieee80211_hw_config(local, 0); + } + + /* check is BW wider */ + ieee80211_chan_bw_change(local, old_ctx, false); +} + +static void ieee80211_change_chanctx(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + struct ieee80211_chanctx *old_ctx, + const struct cfg80211_chan_def *chandef) +{ + _ieee80211_change_chanctx(local, ctx, old_ctx, chandef, NULL); +} + +static struct ieee80211_chanctx * +ieee80211_find_chanctx(struct ieee80211_local *local, + const struct cfg80211_chan_def *chandef, + enum ieee80211_chanctx_mode mode) +{ + struct ieee80211_chanctx *ctx; + + lockdep_assert_held(&local->chanctx_mtx); + + if (mode == IEEE80211_CHANCTX_EXCLUSIVE) + return NULL; + + list_for_each_entry(ctx, &local->chanctx_list, list) { + const struct cfg80211_chan_def *compat; + + if (ctx->replace_state != IEEE80211_CHANCTX_REPLACE_NONE) + continue; + + if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) + continue; + + compat = cfg80211_chandef_compatible(&ctx->conf.def, chandef); + if (!compat) + continue; + + compat = ieee80211_chanctx_reserved_chandef(local, ctx, + compat); + if (!compat) + continue; + + ieee80211_change_chanctx(local, ctx, ctx, compat); + + return ctx; + } + + return NULL; +} + +bool ieee80211_is_radar_required(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + + lockdep_assert_held(&local->mtx); + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + unsigned int link_id; + + for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { + struct ieee80211_link_data *link; + + link = rcu_dereference(sdata->link[link_id]); + + if (link && link->radar_required) { + rcu_read_unlock(); + return true; + } + } + } + rcu_read_unlock(); + + return false; +} + +static bool +ieee80211_chanctx_radar_required(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx) +{ + struct ieee80211_chanctx_conf *conf = &ctx->conf; + struct ieee80211_sub_if_data *sdata; + bool required = false; + + lockdep_assert_held(&local->chanctx_mtx); + lockdep_assert_held(&local->mtx); + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + unsigned int link_id; + + if (!ieee80211_sdata_running(sdata)) + continue; + for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { + struct ieee80211_link_data *link; + + link = rcu_dereference(sdata->link[link_id]); + if (!link) + continue; + + if (rcu_access_pointer(link->conf->chanctx_conf) != conf) + continue; + if (!link->radar_required) + continue; + required = true; + break; + } + + if (required) + break; + } + rcu_read_unlock(); + + return required; +} + +static struct ieee80211_chanctx * +ieee80211_alloc_chanctx(struct ieee80211_local *local, + const struct cfg80211_chan_def *chandef, + enum ieee80211_chanctx_mode mode) +{ + struct ieee80211_chanctx *ctx; + + lockdep_assert_held(&local->chanctx_mtx); + + ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL); + if (!ctx) + return NULL; + + INIT_LIST_HEAD(&ctx->assigned_links); + INIT_LIST_HEAD(&ctx->reserved_links); + ctx->conf.def = *chandef; + ctx->conf.rx_chains_static = 1; + ctx->conf.rx_chains_dynamic = 1; + ctx->mode = mode; + ctx->conf.radar_enabled = false; + _ieee80211_recalc_chanctx_min_def(local, ctx, NULL); + + return ctx; +} + +static int ieee80211_add_chanctx(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx) +{ + u32 changed; + int err; + + lockdep_assert_held(&local->mtx); + lockdep_assert_held(&local->chanctx_mtx); + + if (!local->use_chanctx) + local->hw.conf.radar_enabled = ctx->conf.radar_enabled; + + /* turn idle off *before* setting channel -- some drivers need that */ + changed = ieee80211_idle_off(local); + if (changed) + ieee80211_hw_config(local, changed); + + if (!local->use_chanctx) { + local->_oper_chandef = ctx->conf.def; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + } else { + err = drv_add_chanctx(local, ctx); + if (err) { + ieee80211_recalc_idle(local); + return err; + } + } + + return 0; +} + +static struct ieee80211_chanctx * +ieee80211_new_chanctx(struct ieee80211_local *local, + const struct cfg80211_chan_def *chandef, + enum ieee80211_chanctx_mode mode) +{ + struct ieee80211_chanctx *ctx; + int err; + + lockdep_assert_held(&local->mtx); + lockdep_assert_held(&local->chanctx_mtx); + + ctx = ieee80211_alloc_chanctx(local, chandef, mode); + if (!ctx) + return ERR_PTR(-ENOMEM); + + err = ieee80211_add_chanctx(local, ctx); + if (err) { + kfree(ctx); + return ERR_PTR(err); + } + + list_add_rcu(&ctx->list, &local->chanctx_list); + return ctx; +} + +static void ieee80211_del_chanctx(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx) +{ + lockdep_assert_held(&local->chanctx_mtx); + + if (!local->use_chanctx) { + struct cfg80211_chan_def *chandef = &local->_oper_chandef; + /* S1G doesn't have 20MHz, so get the correct width for the + * current channel. + */ + if (chandef->chan->band == NL80211_BAND_S1GHZ) + chandef->width = + ieee80211_s1g_channel_width(chandef->chan); + else + chandef->width = NL80211_CHAN_WIDTH_20_NOHT; + chandef->center_freq1 = chandef->chan->center_freq; + chandef->freq1_offset = chandef->chan->freq_offset; + chandef->center_freq2 = 0; + + /* NOTE: Disabling radar is only valid here for + * single channel context. To be sure, check it ... + */ + WARN_ON(local->hw.conf.radar_enabled && + !list_empty(&local->chanctx_list)); + + local->hw.conf.radar_enabled = false; + + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + } else { + drv_remove_chanctx(local, ctx); + } + + ieee80211_recalc_idle(local); +} + +static void ieee80211_free_chanctx(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx) +{ + lockdep_assert_held(&local->chanctx_mtx); + + WARN_ON_ONCE(ieee80211_chanctx_refcount(local, ctx) != 0); + + list_del_rcu(&ctx->list); + ieee80211_del_chanctx(local, ctx); + kfree_rcu(ctx, rcu_head); +} + +void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx) +{ + struct ieee80211_chanctx_conf *conf = &ctx->conf; + struct ieee80211_sub_if_data *sdata; + const struct cfg80211_chan_def *compat = NULL; + struct sta_info *sta; + + lockdep_assert_held(&local->chanctx_mtx); + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + int link_id; + + if (!ieee80211_sdata_running(sdata)) + continue; + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + continue; + + for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { + struct ieee80211_bss_conf *link_conf = + rcu_dereference(sdata->vif.link_conf[link_id]); + + if (!link_conf) + continue; + + if (rcu_access_pointer(link_conf->chanctx_conf) != conf) + continue; + + if (!compat) + compat = &link_conf->chandef; + + compat = cfg80211_chandef_compatible(&link_conf->chandef, + compat); + if (WARN_ON_ONCE(!compat)) + break; + } + } + + if (WARN_ON_ONCE(!compat)) { + rcu_read_unlock(); + return; + } + + /* TDLS peers can sometimes affect the chandef width */ + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (!sta->uploaded || + !test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) || + !test_sta_flag(sta, WLAN_STA_AUTHORIZED) || + !sta->tdls_chandef.chan) + continue; + + compat = cfg80211_chandef_compatible(&sta->tdls_chandef, + compat); + if (WARN_ON_ONCE(!compat)) + break; + } + rcu_read_unlock(); + + if (!compat) + return; + + ieee80211_change_chanctx(local, ctx, ctx, compat); +} + +static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local, + struct ieee80211_chanctx *chanctx) +{ + bool radar_enabled; + + lockdep_assert_held(&local->chanctx_mtx); + /* for ieee80211_is_radar_required */ + lockdep_assert_held(&local->mtx); + + radar_enabled = ieee80211_chanctx_radar_required(local, chanctx); + + if (radar_enabled == chanctx->conf.radar_enabled) + return; + + chanctx->conf.radar_enabled = radar_enabled; + + if (!local->use_chanctx) { + local->hw.conf.radar_enabled = chanctx->conf.radar_enabled; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + } + + drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR); +} + +static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link, + struct ieee80211_chanctx *new_ctx) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *conf; + struct ieee80211_chanctx *curr_ctx = NULL; + int ret = 0; + + if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_NAN)) + return -ENOTSUPP; + + conf = rcu_dereference_protected(link->conf->chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + + if (conf) { + curr_ctx = container_of(conf, struct ieee80211_chanctx, conf); + + drv_unassign_vif_chanctx(local, sdata, link->conf, curr_ctx); + conf = NULL; + list_del(&link->assigned_chanctx_list); + } + + if (new_ctx) { + /* recalc considering the link we'll use it for now */ + ieee80211_recalc_chanctx_min_def(local, new_ctx, link); + + ret = drv_assign_vif_chanctx(local, sdata, link->conf, new_ctx); + if (ret) + goto out; + + conf = &new_ctx->conf; + list_add(&link->assigned_chanctx_list, + &new_ctx->assigned_links); + } + +out: + rcu_assign_pointer(link->conf->chanctx_conf, conf); + + sdata->vif.cfg.idle = !conf; + + if (curr_ctx && ieee80211_chanctx_num_assigned(local, curr_ctx) > 0) { + ieee80211_recalc_chanctx_chantype(local, curr_ctx); + ieee80211_recalc_smps_chanctx(local, curr_ctx); + ieee80211_recalc_radar_chanctx(local, curr_ctx); + ieee80211_recalc_chanctx_min_def(local, curr_ctx, NULL); + } + + if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) { + ieee80211_recalc_txpower(sdata, false); + ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL); + } + + if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && + sdata->vif.type != NL80211_IFTYPE_MONITOR) + ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_IDLE); + + ieee80211_check_fast_xmit_iface(sdata); + + return ret; +} + +void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, + struct ieee80211_chanctx *chanctx) +{ + struct ieee80211_sub_if_data *sdata; + u8 rx_chains_static, rx_chains_dynamic; + + lockdep_assert_held(&local->chanctx_mtx); + + rx_chains_static = 1; + rx_chains_dynamic = 1; + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + u8 needed_static, needed_dynamic; + unsigned int link_id; + + if (!ieee80211_sdata_running(sdata)) + continue; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + if (!sdata->u.mgd.associated) + continue; + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_OCB: + break; + default: + continue; + } + + for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { + struct ieee80211_link_data *link; + + link = rcu_dereference(sdata->link[link_id]); + + if (!link) + continue; + + if (rcu_access_pointer(link->conf->chanctx_conf) != &chanctx->conf) + continue; + + switch (link->smps_mode) { + default: + WARN_ONCE(1, "Invalid SMPS mode %d\n", + link->smps_mode); + fallthrough; + case IEEE80211_SMPS_OFF: + needed_static = link->needed_rx_chains; + needed_dynamic = link->needed_rx_chains; + break; + case IEEE80211_SMPS_DYNAMIC: + needed_static = 1; + needed_dynamic = link->needed_rx_chains; + break; + case IEEE80211_SMPS_STATIC: + needed_static = 1; + needed_dynamic = 1; + break; + } + + rx_chains_static = max(rx_chains_static, needed_static); + rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic); + } + } + + /* Disable SMPS for the monitor interface */ + sdata = rcu_dereference(local->monitor_sdata); + if (sdata && + rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &chanctx->conf) + rx_chains_dynamic = rx_chains_static = local->rx_chains; + + rcu_read_unlock(); + + if (!local->use_chanctx) { + if (rx_chains_static > 1) + local->smps_mode = IEEE80211_SMPS_OFF; + else if (rx_chains_dynamic > 1) + local->smps_mode = IEEE80211_SMPS_DYNAMIC; + else + local->smps_mode = IEEE80211_SMPS_STATIC; + ieee80211_hw_config(local, 0); + } + + if (rx_chains_static == chanctx->conf.rx_chains_static && + rx_chains_dynamic == chanctx->conf.rx_chains_dynamic) + return; + + chanctx->conf.rx_chains_static = rx_chains_static; + chanctx->conf.rx_chains_dynamic = rx_chains_dynamic; + drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RX_CHAINS); +} + +static void +__ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link, + bool clear) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + unsigned int link_id = link->link_id; + struct ieee80211_bss_conf *link_conf = link->conf; + struct ieee80211_local *local __maybe_unused = sdata->local; + struct ieee80211_sub_if_data *vlan; + struct ieee80211_chanctx_conf *conf; + + if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP)) + return; + + lockdep_assert_held(&local->mtx); + + /* Check that conf exists, even when clearing this function + * must be called with the AP's channel context still there + * as it would otherwise cause VLANs to have an invalid + * channel context pointer for a while, possibly pointing + * to a channel context that has already been freed. + */ + conf = rcu_dereference_protected(link_conf->chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + WARN_ON(!conf); + + if (clear) + conf = NULL; + + rcu_read_lock(); + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { + struct ieee80211_bss_conf *vlan_conf; + + vlan_conf = rcu_dereference(vlan->vif.link_conf[link_id]); + if (WARN_ON(!vlan_conf)) + continue; + + rcu_assign_pointer(vlan_conf->chanctx_conf, conf); + } + rcu_read_unlock(); +} + +void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link, + bool clear) +{ + struct ieee80211_local *local = link->sdata->local; + + mutex_lock(&local->chanctx_mtx); + + __ieee80211_link_copy_chanctx_to_vlans(link, clear); + + mutex_unlock(&local->chanctx_mtx); +} + +int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_chanctx *ctx = link->reserved_chanctx; + + lockdep_assert_held(&sdata->local->chanctx_mtx); + + if (WARN_ON(!ctx)) + return -EINVAL; + + list_del(&link->reserved_chanctx_list); + link->reserved_chanctx = NULL; + + if (ieee80211_chanctx_refcount(sdata->local, ctx) == 0) { + if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) { + if (WARN_ON(!ctx->replace_ctx)) + return -EINVAL; + + WARN_ON(ctx->replace_ctx->replace_state != + IEEE80211_CHANCTX_WILL_BE_REPLACED); + WARN_ON(ctx->replace_ctx->replace_ctx != ctx); + + ctx->replace_ctx->replace_ctx = NULL; + ctx->replace_ctx->replace_state = + IEEE80211_CHANCTX_REPLACE_NONE; + + list_del_rcu(&ctx->list); + kfree_rcu(ctx, rcu_head); + } else { + ieee80211_free_chanctx(sdata->local, ctx); + } + } + + return 0; +} + +int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link, + const struct cfg80211_chan_def *chandef, + enum ieee80211_chanctx_mode mode, + bool radar_required) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx *new_ctx, *curr_ctx, *ctx; + + lockdep_assert_held(&local->chanctx_mtx); + + curr_ctx = ieee80211_link_get_chanctx(link); + if (curr_ctx && local->use_chanctx && !local->ops->switch_vif_chanctx) + return -ENOTSUPP; + + new_ctx = ieee80211_find_reservation_chanctx(local, chandef, mode); + if (!new_ctx) { + if (ieee80211_can_create_new_chanctx(local)) { + new_ctx = ieee80211_new_chanctx(local, chandef, mode); + if (IS_ERR(new_ctx)) + return PTR_ERR(new_ctx); + } else { + if (!curr_ctx || + (curr_ctx->replace_state == + IEEE80211_CHANCTX_WILL_BE_REPLACED) || + !list_empty(&curr_ctx->reserved_links)) { + /* + * Another link already requested this context + * for a reservation. Find another one hoping + * all links assigned to it will also switch + * soon enough. + * + * TODO: This needs a little more work as some + * cases (more than 2 chanctx capable devices) + * may fail which could otherwise succeed + * provided some channel context juggling was + * performed. + * + * Consider ctx1..3, link1..6, each ctx has 2 + * links. link1 and link2 from ctx1 request new + * different chandefs starting 2 in-place + * reserations with ctx4 and ctx5 replacing + * ctx1 and ctx2 respectively. Next link5 and + * link6 from ctx3 reserve ctx4. If link3 and + * link4 remain on ctx2 as they are then this + * fails unless `replace_ctx` from ctx5 is + * replaced with ctx3. + */ + list_for_each_entry(ctx, &local->chanctx_list, + list) { + if (ctx->replace_state != + IEEE80211_CHANCTX_REPLACE_NONE) + continue; + + if (!list_empty(&ctx->reserved_links)) + continue; + + curr_ctx = ctx; + break; + } + } + + /* + * If that's true then all available contexts already + * have reservations and cannot be used. + */ + if (!curr_ctx || + (curr_ctx->replace_state == + IEEE80211_CHANCTX_WILL_BE_REPLACED) || + !list_empty(&curr_ctx->reserved_links)) + return -EBUSY; + + new_ctx = ieee80211_alloc_chanctx(local, chandef, mode); + if (!new_ctx) + return -ENOMEM; + + new_ctx->replace_ctx = curr_ctx; + new_ctx->replace_state = + IEEE80211_CHANCTX_REPLACES_OTHER; + + curr_ctx->replace_ctx = new_ctx; + curr_ctx->replace_state = + IEEE80211_CHANCTX_WILL_BE_REPLACED; + + list_add_rcu(&new_ctx->list, &local->chanctx_list); + } + } + + list_add(&link->reserved_chanctx_list, &new_ctx->reserved_links); + link->reserved_chanctx = new_ctx; + link->reserved_chandef = *chandef; + link->reserved_radar_required = radar_required; + link->reserved_ready = false; + + return 0; +} + +static void +ieee80211_link_chanctx_reservation_complete(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_OCB: + ieee80211_queue_work(&sdata->local->hw, + &link->csa_finalize_work); + break; + case NL80211_IFTYPE_STATION: + wiphy_delayed_work_queue(sdata->local->hw.wiphy, + &link->u.mgd.chswitch_work, 0); + break; + case NL80211_IFTYPE_UNSPECIFIED: + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_P2P_DEVICE: + case NL80211_IFTYPE_NAN: + case NUM_NL80211_IFTYPES: + WARN_ON(1); + break; + } +} + +static void +ieee80211_link_update_chandef(struct ieee80211_link_data *link, + const struct cfg80211_chan_def *chandef) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + unsigned int link_id = link->link_id; + struct ieee80211_sub_if_data *vlan; + + link->conf->chandef = *chandef; + + if (sdata->vif.type != NL80211_IFTYPE_AP) + return; + + rcu_read_lock(); + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { + struct ieee80211_bss_conf *vlan_conf; + + vlan_conf = rcu_dereference(vlan->vif.link_conf[link_id]); + if (WARN_ON(!vlan_conf)) + continue; + + vlan_conf->chandef = *chandef; + } + rcu_read_unlock(); +} + +static int +ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_bss_conf *link_conf = link->conf; + struct ieee80211_local *local = sdata->local; + struct ieee80211_vif_chanctx_switch vif_chsw[1] = {}; + struct ieee80211_chanctx *old_ctx, *new_ctx; + const struct cfg80211_chan_def *chandef; + u64 changed = 0; + int err; + + lockdep_assert_held(&local->mtx); + lockdep_assert_held(&local->chanctx_mtx); + + new_ctx = link->reserved_chanctx; + old_ctx = ieee80211_link_get_chanctx(link); + + if (WARN_ON(!link->reserved_ready)) + return -EBUSY; + + if (WARN_ON(!new_ctx)) + return -EINVAL; + + if (WARN_ON(!old_ctx)) + return -EINVAL; + + if (WARN_ON(new_ctx->replace_state == + IEEE80211_CHANCTX_REPLACES_OTHER)) + return -EINVAL; + + chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx, + &link->reserved_chandef); + if (WARN_ON(!chandef)) + return -EINVAL; + + if (link_conf->chandef.width != link->reserved_chandef.width) + changed = BSS_CHANGED_BANDWIDTH; + + ieee80211_link_update_chandef(link, &link->reserved_chandef); + + _ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef, link); + + vif_chsw[0].vif = &sdata->vif; + vif_chsw[0].old_ctx = &old_ctx->conf; + vif_chsw[0].new_ctx = &new_ctx->conf; + vif_chsw[0].link_conf = link->conf; + + list_del(&link->reserved_chanctx_list); + link->reserved_chanctx = NULL; + + err = drv_switch_vif_chanctx(local, vif_chsw, 1, + CHANCTX_SWMODE_REASSIGN_VIF); + if (err) { + if (ieee80211_chanctx_refcount(local, new_ctx) == 0) + ieee80211_free_chanctx(local, new_ctx); + + goto out; + } + + list_move(&link->assigned_chanctx_list, &new_ctx->assigned_links); + rcu_assign_pointer(link_conf->chanctx_conf, &new_ctx->conf); + + if (sdata->vif.type == NL80211_IFTYPE_AP) + __ieee80211_link_copy_chanctx_to_vlans(link, false); + + ieee80211_check_fast_xmit_iface(sdata); + + if (ieee80211_chanctx_refcount(local, old_ctx) == 0) + ieee80211_free_chanctx(local, old_ctx); + + ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL); + ieee80211_recalc_smps_chanctx(local, new_ctx); + ieee80211_recalc_radar_chanctx(local, new_ctx); + + if (changed) + ieee80211_link_info_change_notify(sdata, link, changed); + +out: + ieee80211_link_chanctx_reservation_complete(link); + return err; +} + +static int +ieee80211_link_use_reserved_assign(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx *old_ctx, *new_ctx; + const struct cfg80211_chan_def *chandef; + int err; + + old_ctx = ieee80211_link_get_chanctx(link); + new_ctx = link->reserved_chanctx; + + if (WARN_ON(!link->reserved_ready)) + return -EINVAL; + + if (WARN_ON(old_ctx)) + return -EINVAL; + + if (WARN_ON(!new_ctx)) + return -EINVAL; + + if (WARN_ON(new_ctx->replace_state == + IEEE80211_CHANCTX_REPLACES_OTHER)) + return -EINVAL; + + chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx, + &link->reserved_chandef); + if (WARN_ON(!chandef)) + return -EINVAL; + + ieee80211_change_chanctx(local, new_ctx, new_ctx, chandef); + + list_del(&link->reserved_chanctx_list); + link->reserved_chanctx = NULL; + + err = ieee80211_assign_link_chanctx(link, new_ctx); + if (err) { + if (ieee80211_chanctx_refcount(local, new_ctx) == 0) + ieee80211_free_chanctx(local, new_ctx); + + goto out; + } + +out: + ieee80211_link_chanctx_reservation_complete(link); + return err; +} + +static bool +ieee80211_link_has_in_place_reservation(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_chanctx *old_ctx, *new_ctx; + + lockdep_assert_held(&sdata->local->chanctx_mtx); + + new_ctx = link->reserved_chanctx; + old_ctx = ieee80211_link_get_chanctx(link); + + if (!old_ctx) + return false; + + if (WARN_ON(!new_ctx)) + return false; + + if (old_ctx->replace_state != IEEE80211_CHANCTX_WILL_BE_REPLACED) + return false; + + if (new_ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) + return false; + + return true; +} + +static int ieee80211_chsw_switch_hwconf(struct ieee80211_local *local, + struct ieee80211_chanctx *new_ctx) +{ + const struct cfg80211_chan_def *chandef; + + lockdep_assert_held(&local->mtx); + lockdep_assert_held(&local->chanctx_mtx); + + chandef = ieee80211_chanctx_reserved_chandef(local, new_ctx, NULL); + if (WARN_ON(!chandef)) + return -EINVAL; + + local->hw.conf.radar_enabled = new_ctx->conf.radar_enabled; + local->_oper_chandef = *chandef; + ieee80211_hw_config(local, 0); + + return 0; +} + +static int ieee80211_chsw_switch_vifs(struct ieee80211_local *local, + int n_vifs) +{ + struct ieee80211_vif_chanctx_switch *vif_chsw; + struct ieee80211_link_data *link; + struct ieee80211_chanctx *ctx, *old_ctx; + int i, err; + + lockdep_assert_held(&local->mtx); + lockdep_assert_held(&local->chanctx_mtx); + + vif_chsw = kcalloc(n_vifs, sizeof(vif_chsw[0]), GFP_KERNEL); + if (!vif_chsw) + return -ENOMEM; + + i = 0; + list_for_each_entry(ctx, &local->chanctx_list, list) { + if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) + continue; + + if (WARN_ON(!ctx->replace_ctx)) { + err = -EINVAL; + goto out; + } + + list_for_each_entry(link, &ctx->reserved_links, + reserved_chanctx_list) { + if (!ieee80211_link_has_in_place_reservation(link)) + continue; + + old_ctx = ieee80211_link_get_chanctx(link); + vif_chsw[i].vif = &link->sdata->vif; + vif_chsw[i].old_ctx = &old_ctx->conf; + vif_chsw[i].new_ctx = &ctx->conf; + vif_chsw[i].link_conf = link->conf; + + i++; + } + } + + err = drv_switch_vif_chanctx(local, vif_chsw, n_vifs, + CHANCTX_SWMODE_SWAP_CONTEXTS); + +out: + kfree(vif_chsw); + return err; +} + +static int ieee80211_chsw_switch_ctxs(struct ieee80211_local *local) +{ + struct ieee80211_chanctx *ctx; + int err; + + lockdep_assert_held(&local->mtx); + lockdep_assert_held(&local->chanctx_mtx); + + list_for_each_entry(ctx, &local->chanctx_list, list) { + if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) + continue; + + if (!list_empty(&ctx->replace_ctx->assigned_links)) + continue; + + ieee80211_del_chanctx(local, ctx->replace_ctx); + err = ieee80211_add_chanctx(local, ctx); + if (err) + goto err; + } + + return 0; + +err: + WARN_ON(ieee80211_add_chanctx(local, ctx)); + list_for_each_entry_continue_reverse(ctx, &local->chanctx_list, list) { + if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) + continue; + + if (!list_empty(&ctx->replace_ctx->assigned_links)) + continue; + + ieee80211_del_chanctx(local, ctx); + WARN_ON(ieee80211_add_chanctx(local, ctx->replace_ctx)); + } + + return err; +} + +static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) +{ + struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx; + struct ieee80211_chanctx *new_ctx = NULL; + int err, n_assigned, n_reserved, n_ready; + int n_ctx = 0, n_vifs_switch = 0, n_vifs_assign = 0, n_vifs_ctxless = 0; + + lockdep_assert_held(&local->mtx); + lockdep_assert_held(&local->chanctx_mtx); + + /* + * If there are 2 independent pairs of channel contexts performing + * cross-switch of their vifs this code will still wait until both are + * ready even though it could be possible to switch one before the + * other is ready. + * + * For practical reasons and code simplicity just do a single huge + * switch. + */ + + /* + * Verify if the reservation is still feasible. + * - if it's not then disconnect + * - if it is but not all vifs necessary are ready then defer + */ + + list_for_each_entry(ctx, &local->chanctx_list, list) { + struct ieee80211_link_data *link; + + if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) + continue; + + if (WARN_ON(!ctx->replace_ctx)) { + err = -EINVAL; + goto err; + } + + if (!local->use_chanctx) + new_ctx = ctx; + + n_ctx++; + + n_assigned = 0; + n_reserved = 0; + n_ready = 0; + + list_for_each_entry(link, &ctx->replace_ctx->assigned_links, + assigned_chanctx_list) { + n_assigned++; + if (link->reserved_chanctx) { + n_reserved++; + if (link->reserved_ready) + n_ready++; + } + } + + if (n_assigned != n_reserved) { + if (n_ready == n_reserved) { + wiphy_info(local->hw.wiphy, + "channel context reservation cannot be finalized because some interfaces aren't switching\n"); + err = -EBUSY; + goto err; + } + + return -EAGAIN; + } + + ctx->conf.radar_enabled = false; + list_for_each_entry(link, &ctx->reserved_links, + reserved_chanctx_list) { + if (ieee80211_link_has_in_place_reservation(link) && + !link->reserved_ready) + return -EAGAIN; + + old_ctx = ieee80211_link_get_chanctx(link); + if (old_ctx) { + if (old_ctx->replace_state == + IEEE80211_CHANCTX_WILL_BE_REPLACED) + n_vifs_switch++; + else + n_vifs_assign++; + } else { + n_vifs_ctxless++; + } + + if (link->reserved_radar_required) + ctx->conf.radar_enabled = true; + } + } + + if (WARN_ON(n_ctx == 0) || + WARN_ON(n_vifs_switch == 0 && + n_vifs_assign == 0 && + n_vifs_ctxless == 0) || + WARN_ON(n_ctx > 1 && !local->use_chanctx) || + WARN_ON(!new_ctx && !local->use_chanctx)) { + err = -EINVAL; + goto err; + } + + /* + * All necessary vifs are ready. Perform the switch now depending on + * reservations and driver capabilities. + */ + + if (local->use_chanctx) { + if (n_vifs_switch > 0) { + err = ieee80211_chsw_switch_vifs(local, n_vifs_switch); + if (err) + goto err; + } + + if (n_vifs_assign > 0 || n_vifs_ctxless > 0) { + err = ieee80211_chsw_switch_ctxs(local); + if (err) + goto err; + } + } else { + err = ieee80211_chsw_switch_hwconf(local, new_ctx); + if (err) + goto err; + } + + /* + * Update all structures, values and pointers to point to new channel + * context(s). + */ + list_for_each_entry(ctx, &local->chanctx_list, list) { + struct ieee80211_link_data *link, *link_tmp; + + if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) + continue; + + if (WARN_ON(!ctx->replace_ctx)) { + err = -EINVAL; + goto err; + } + + list_for_each_entry(link, &ctx->reserved_links, + reserved_chanctx_list) { + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_bss_conf *link_conf = link->conf; + u64 changed = 0; + + if (!ieee80211_link_has_in_place_reservation(link)) + continue; + + rcu_assign_pointer(link_conf->chanctx_conf, + &ctx->conf); + + if (sdata->vif.type == NL80211_IFTYPE_AP) + __ieee80211_link_copy_chanctx_to_vlans(link, + false); + + ieee80211_check_fast_xmit_iface(sdata); + + link->radar_required = link->reserved_radar_required; + + if (link_conf->chandef.width != link->reserved_chandef.width) + changed = BSS_CHANGED_BANDWIDTH; + + ieee80211_link_update_chandef(link, &link->reserved_chandef); + if (changed) + ieee80211_link_info_change_notify(sdata, + link, + changed); + + ieee80211_recalc_txpower(sdata, false); + } + + ieee80211_recalc_chanctx_chantype(local, ctx); + ieee80211_recalc_smps_chanctx(local, ctx); + ieee80211_recalc_radar_chanctx(local, ctx); + ieee80211_recalc_chanctx_min_def(local, ctx, NULL); + + list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links, + reserved_chanctx_list) { + if (ieee80211_link_get_chanctx(link) != ctx) + continue; + + list_del(&link->reserved_chanctx_list); + list_move(&link->assigned_chanctx_list, + &ctx->assigned_links); + link->reserved_chanctx = NULL; + + ieee80211_link_chanctx_reservation_complete(link); + } + + /* + * This context might have been a dependency for an already + * ready re-assign reservation interface that was deferred. Do + * not propagate error to the caller though. The in-place + * reservation for originally requested interface has already + * succeeded at this point. + */ + list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links, + reserved_chanctx_list) { + if (WARN_ON(ieee80211_link_has_in_place_reservation(link))) + continue; + + if (WARN_ON(link->reserved_chanctx != ctx)) + continue; + + if (!link->reserved_ready) + continue; + + if (ieee80211_link_get_chanctx(link)) + err = ieee80211_link_use_reserved_reassign(link); + else + err = ieee80211_link_use_reserved_assign(link); + + if (err) { + link_info(link, + "failed to finalize (re-)assign reservation (err=%d)\n", + err); + ieee80211_link_unreserve_chanctx(link); + cfg80211_stop_iface(local->hw.wiphy, + &link->sdata->wdev, + GFP_KERNEL); + } + } + } + + /* + * Finally free old contexts + */ + + list_for_each_entry_safe(ctx, ctx_tmp, &local->chanctx_list, list) { + if (ctx->replace_state != IEEE80211_CHANCTX_WILL_BE_REPLACED) + continue; + + ctx->replace_ctx->replace_ctx = NULL; + ctx->replace_ctx->replace_state = + IEEE80211_CHANCTX_REPLACE_NONE; + + list_del_rcu(&ctx->list); + kfree_rcu(ctx, rcu_head); + } + + return 0; + +err: + list_for_each_entry(ctx, &local->chanctx_list, list) { + struct ieee80211_link_data *link, *link_tmp; + + if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) + continue; + + list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links, + reserved_chanctx_list) { + ieee80211_link_unreserve_chanctx(link); + ieee80211_link_chanctx_reservation_complete(link); + } + } + + return err; +} + +static void __ieee80211_link_release_channel(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_bss_conf *link_conf = link->conf; + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *conf; + struct ieee80211_chanctx *ctx; + bool use_reserved_switch = false; + + lockdep_assert_held(&local->chanctx_mtx); + + conf = rcu_dereference_protected(link_conf->chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + if (!conf) + return; + + ctx = container_of(conf, struct ieee80211_chanctx, conf); + + if (link->reserved_chanctx) { + if (link->reserved_chanctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER && + ieee80211_chanctx_num_reserved(local, link->reserved_chanctx) > 1) + use_reserved_switch = true; + + ieee80211_link_unreserve_chanctx(link); + } + + ieee80211_assign_link_chanctx(link, NULL); + if (ieee80211_chanctx_refcount(local, ctx) == 0) + ieee80211_free_chanctx(local, ctx); + + link->radar_required = false; + + /* Unreserving may ready an in-place reservation. */ + if (use_reserved_switch) + ieee80211_vif_use_reserved_switch(local); +} + +int ieee80211_link_use_channel(struct ieee80211_link_data *link, + const struct cfg80211_chan_def *chandef, + enum ieee80211_chanctx_mode mode) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx *ctx; + u8 radar_detect_width = 0; + int ret; + + lockdep_assert_held(&local->mtx); + + if (sdata->vif.active_links && + !(sdata->vif.active_links & BIT(link->link_id))) { + ieee80211_link_update_chandef(link, chandef); + return 0; + } + + mutex_lock(&local->chanctx_mtx); + + ret = cfg80211_chandef_dfs_required(local->hw.wiphy, + chandef, + sdata->wdev.iftype); + if (ret < 0) + goto out; + if (ret > 0) + radar_detect_width = BIT(chandef->width); + + link->radar_required = ret; + + ret = ieee80211_check_combinations(sdata, chandef, mode, + radar_detect_width); + if (ret < 0) + goto out; + + __ieee80211_link_release_channel(link); + + ctx = ieee80211_find_chanctx(local, chandef, mode); + if (!ctx) + ctx = ieee80211_new_chanctx(local, chandef, mode); + if (IS_ERR(ctx)) { + ret = PTR_ERR(ctx); + goto out; + } + + ieee80211_link_update_chandef(link, chandef); + + ret = ieee80211_assign_link_chanctx(link, ctx); + if (ret) { + /* if assign fails refcount stays the same */ + if (ieee80211_chanctx_refcount(local, ctx) == 0) + ieee80211_free_chanctx(local, ctx); + goto out; + } + + ieee80211_recalc_smps_chanctx(local, ctx); + ieee80211_recalc_radar_chanctx(local, ctx); + out: + if (ret) + link->radar_required = false; + + mutex_unlock(&local->chanctx_mtx); + return ret; +} + +int ieee80211_link_use_reserved_context(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx *new_ctx; + struct ieee80211_chanctx *old_ctx; + int err; + + lockdep_assert_held(&local->mtx); + lockdep_assert_held(&local->chanctx_mtx); + + new_ctx = link->reserved_chanctx; + old_ctx = ieee80211_link_get_chanctx(link); + + if (WARN_ON(!new_ctx)) + return -EINVAL; + + if (WARN_ON(new_ctx->replace_state == + IEEE80211_CHANCTX_WILL_BE_REPLACED)) + return -EINVAL; + + if (WARN_ON(link->reserved_ready)) + return -EINVAL; + + link->reserved_ready = true; + + if (new_ctx->replace_state == IEEE80211_CHANCTX_REPLACE_NONE) { + if (old_ctx) + return ieee80211_link_use_reserved_reassign(link); + + return ieee80211_link_use_reserved_assign(link); + } + + /* + * In-place reservation may need to be finalized now either if: + * a) sdata is taking part in the swapping itself and is the last one + * b) sdata has switched with a re-assign reservation to an existing + * context readying in-place switching of old_ctx + * + * In case of (b) do not propagate the error up because the requested + * sdata already switched successfully. Just spill an extra warning. + * The ieee80211_vif_use_reserved_switch() already stops all necessary + * interfaces upon failure. + */ + if ((old_ctx && + old_ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) || + new_ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) { + err = ieee80211_vif_use_reserved_switch(local); + if (err && err != -EAGAIN) { + if (new_ctx->replace_state == + IEEE80211_CHANCTX_REPLACES_OTHER) + return err; + + wiphy_info(local->hw.wiphy, + "depending in-place reservation failed (err=%d)\n", + err); + } + } + + return 0; +} + +int ieee80211_link_change_bandwidth(struct ieee80211_link_data *link, + const struct cfg80211_chan_def *chandef, + u64 *changed) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_bss_conf *link_conf = link->conf; + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *conf; + struct ieee80211_chanctx *ctx; + const struct cfg80211_chan_def *compat; + int ret; + + if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, + IEEE80211_CHAN_DISABLED)) + return -EINVAL; + + mutex_lock(&local->chanctx_mtx); + if (cfg80211_chandef_identical(chandef, &link_conf->chandef)) { + ret = 0; + goto out; + } + + if (chandef->width == NL80211_CHAN_WIDTH_20_NOHT || + link_conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT) { + ret = -EINVAL; + goto out; + } + + conf = rcu_dereference_protected(link_conf->chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + if (!conf) { + ret = -EINVAL; + goto out; + } + + ctx = container_of(conf, struct ieee80211_chanctx, conf); + + compat = cfg80211_chandef_compatible(&conf->def, chandef); + if (!compat) { + ret = -EINVAL; + goto out; + } + + switch (ctx->replace_state) { + case IEEE80211_CHANCTX_REPLACE_NONE: + if (!ieee80211_chanctx_reserved_chandef(local, ctx, compat)) { + ret = -EBUSY; + goto out; + } + break; + case IEEE80211_CHANCTX_WILL_BE_REPLACED: + /* TODO: Perhaps the bandwidth change could be treated as a + * reservation itself? */ + ret = -EBUSY; + goto out; + case IEEE80211_CHANCTX_REPLACES_OTHER: + /* channel context that is going to replace another channel + * context doesn't really exist and shouldn't be assigned + * anywhere yet */ + WARN_ON(1); + break; + } + + ieee80211_link_update_chandef(link, chandef); + + ieee80211_recalc_chanctx_chantype(local, ctx); + + *changed |= BSS_CHANGED_BANDWIDTH; + ret = 0; + out: + mutex_unlock(&local->chanctx_mtx); + return ret; +} + +void ieee80211_link_release_channel(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + + mutex_lock(&sdata->local->chanctx_mtx); + if (rcu_access_pointer(link->conf->chanctx_conf)) { + lockdep_assert_held(&sdata->local->mtx); + __ieee80211_link_release_channel(link); + } + mutex_unlock(&sdata->local->chanctx_mtx); +} + +void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + unsigned int link_id = link->link_id; + struct ieee80211_bss_conf *link_conf = link->conf; + struct ieee80211_bss_conf *ap_conf; + struct ieee80211_local *local = sdata->local; + struct ieee80211_sub_if_data *ap; + struct ieee80211_chanctx_conf *conf; + + if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->bss)) + return; + + ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); + + mutex_lock(&local->chanctx_mtx); + + rcu_read_lock(); + ap_conf = rcu_dereference(ap->vif.link_conf[link_id]); + conf = rcu_dereference_protected(ap_conf->chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + rcu_assign_pointer(link_conf->chanctx_conf, conf); + rcu_read_unlock(); + mutex_unlock(&local->chanctx_mtx); +} + +void ieee80211_iter_chan_contexts_atomic( + struct ieee80211_hw *hw, + void (*iter)(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *chanctx_conf, + void *data), + void *iter_data) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_chanctx *ctx; + + rcu_read_lock(); + list_for_each_entry_rcu(ctx, &local->chanctx_list, list) + if (ctx->driver_present) + iter(hw, &ctx->conf, iter_data); + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(ieee80211_iter_chan_contexts_atomic); diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h new file mode 100644 index 0000000000..d49894df23 --- /dev/null +++ b/net/mac80211/debug.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Portions + * Copyright (C) 2022 - 2023 Intel Corporation + */ +#ifndef __MAC80211_DEBUG_H +#define __MAC80211_DEBUG_H +#include <net/cfg80211.h> + +#ifdef CONFIG_MAC80211_OCB_DEBUG +#define MAC80211_OCB_DEBUG 1 +#else +#define MAC80211_OCB_DEBUG 0 +#endif + +#ifdef CONFIG_MAC80211_IBSS_DEBUG +#define MAC80211_IBSS_DEBUG 1 +#else +#define MAC80211_IBSS_DEBUG 0 +#endif + +#ifdef CONFIG_MAC80211_PS_DEBUG +#define MAC80211_PS_DEBUG 1 +#else +#define MAC80211_PS_DEBUG 0 +#endif + +#ifdef CONFIG_MAC80211_HT_DEBUG +#define MAC80211_HT_DEBUG 1 +#else +#define MAC80211_HT_DEBUG 0 +#endif + +#ifdef CONFIG_MAC80211_MPL_DEBUG +#define MAC80211_MPL_DEBUG 1 +#else +#define MAC80211_MPL_DEBUG 0 +#endif + +#ifdef CONFIG_MAC80211_MPATH_DEBUG +#define MAC80211_MPATH_DEBUG 1 +#else +#define MAC80211_MPATH_DEBUG 0 +#endif + +#ifdef CONFIG_MAC80211_MHWMP_DEBUG +#define MAC80211_MHWMP_DEBUG 1 +#else +#define MAC80211_MHWMP_DEBUG 0 +#endif + +#ifdef CONFIG_MAC80211_MESH_SYNC_DEBUG +#define MAC80211_MESH_SYNC_DEBUG 1 +#else +#define MAC80211_MESH_SYNC_DEBUG 0 +#endif + +#ifdef CONFIG_MAC80211_MESH_CSA_DEBUG +#define MAC80211_MESH_CSA_DEBUG 1 +#else +#define MAC80211_MESH_CSA_DEBUG 0 +#endif + +#ifdef CONFIG_MAC80211_MESH_PS_DEBUG +#define MAC80211_MESH_PS_DEBUG 1 +#else +#define MAC80211_MESH_PS_DEBUG 0 +#endif + +#ifdef CONFIG_MAC80211_TDLS_DEBUG +#define MAC80211_TDLS_DEBUG 1 +#else +#define MAC80211_TDLS_DEBUG 0 +#endif + +#ifdef CONFIG_MAC80211_STA_DEBUG +#define MAC80211_STA_DEBUG 1 +#else +#define MAC80211_STA_DEBUG 0 +#endif + +#ifdef CONFIG_MAC80211_MLME_DEBUG +#define MAC80211_MLME_DEBUG 1 +#else +#define MAC80211_MLME_DEBUG 0 +#endif + +#ifdef CONFIG_MAC80211_MESSAGE_TRACING +void __sdata_info(const char *fmt, ...) __printf(1, 2); +void __sdata_dbg(bool print, const char *fmt, ...) __printf(2, 3); +void __sdata_err(const char *fmt, ...) __printf(1, 2); +void __wiphy_dbg(struct wiphy *wiphy, bool print, const char *fmt, ...) + __printf(3, 4); + +#define _sdata_info(sdata, fmt, ...) \ + __sdata_info("%s: " fmt, (sdata)->name, ##__VA_ARGS__) +#define _sdata_dbg(print, sdata, fmt, ...) \ + __sdata_dbg(print, "%s: " fmt, (sdata)->name, ##__VA_ARGS__) +#define _sdata_err(sdata, fmt, ...) \ + __sdata_err("%s: " fmt, (sdata)->name, ##__VA_ARGS__) +#define _wiphy_dbg(print, wiphy, fmt, ...) \ + __wiphy_dbg(wiphy, print, fmt, ##__VA_ARGS__) +#else +#define _sdata_info(sdata, fmt, ...) \ +do { \ + pr_info("%s: " fmt, \ + (sdata)->name, ##__VA_ARGS__); \ +} while (0) + +#define _sdata_dbg(print, sdata, fmt, ...) \ +do { \ + if (print) \ + pr_debug("%s: " fmt, \ + (sdata)->name, ##__VA_ARGS__); \ +} while (0) + +#define _sdata_err(sdata, fmt, ...) \ +do { \ + pr_err("%s: " fmt, \ + (sdata)->name, ##__VA_ARGS__); \ +} while (0) + +#define _wiphy_dbg(print, wiphy, fmt, ...) \ +do { \ + if (print) \ + wiphy_dbg((wiphy), fmt, ##__VA_ARGS__); \ +} while (0) +#endif + +#define sdata_info(sdata, fmt, ...) \ + _sdata_info(sdata, fmt, ##__VA_ARGS__) +#define sdata_err(sdata, fmt, ...) \ + _sdata_err(sdata, fmt, ##__VA_ARGS__) +#define sdata_dbg(sdata, fmt, ...) \ + _sdata_dbg(1, sdata, fmt, ##__VA_ARGS__) + +#define link_info(link, fmt, ...) \ + do { \ + if (ieee80211_vif_is_mld(&(link)->sdata->vif)) \ + _sdata_info((link)->sdata, "[link %d] " fmt, \ + (link)->link_id, \ + ##__VA_ARGS__); \ + else \ + _sdata_info((link)->sdata, fmt, ##__VA_ARGS__); \ + } while (0) +#define link_err(link, fmt, ...) \ + do { \ + if (ieee80211_vif_is_mld(&(link)->sdata->vif)) \ + _sdata_err((link)->sdata, "[link %d] " fmt, \ + (link)->link_id, \ + ##__VA_ARGS__); \ + else \ + _sdata_err((link)->sdata, fmt, ##__VA_ARGS__); \ + } while (0) +#define link_dbg(link, fmt, ...) \ + do { \ + if (ieee80211_vif_is_mld(&(link)->sdata->vif)) \ + _sdata_dbg(1, (link)->sdata, "[link %d] " fmt, \ + (link)->link_id, \ + ##__VA_ARGS__); \ + else \ + _sdata_dbg(1, (link)->sdata, fmt, \ + ##__VA_ARGS__); \ + } while (0) + +#define ht_dbg(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_HT_DEBUG, \ + sdata, fmt, ##__VA_ARGS__) + +#define ht_dbg_ratelimited(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_HT_DEBUG && net_ratelimit(), \ + sdata, fmt, ##__VA_ARGS__) + +#define ocb_dbg(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_OCB_DEBUG, \ + sdata, fmt, ##__VA_ARGS__) + +#define ibss_dbg(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_IBSS_DEBUG, \ + sdata, fmt, ##__VA_ARGS__) + +#define ps_dbg(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_PS_DEBUG, \ + sdata, fmt, ##__VA_ARGS__) + +#define ps_dbg_hw(hw, fmt, ...) \ + _wiphy_dbg(MAC80211_PS_DEBUG, \ + (hw)->wiphy, fmt, ##__VA_ARGS__) + +#define ps_dbg_ratelimited(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_PS_DEBUG && net_ratelimit(), \ + sdata, fmt, ##__VA_ARGS__) + +#define mpl_dbg(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_MPL_DEBUG, \ + sdata, fmt, ##__VA_ARGS__) + +#define mpath_dbg(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_MPATH_DEBUG, \ + sdata, fmt, ##__VA_ARGS__) + +#define mhwmp_dbg(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_MHWMP_DEBUG, \ + sdata, fmt, ##__VA_ARGS__) + +#define msync_dbg(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_MESH_SYNC_DEBUG, \ + sdata, fmt, ##__VA_ARGS__) + +#define mcsa_dbg(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_MESH_CSA_DEBUG, \ + sdata, fmt, ##__VA_ARGS__) + +#define mps_dbg(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_MESH_PS_DEBUG, \ + sdata, fmt, ##__VA_ARGS__) + +#define tdls_dbg(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_TDLS_DEBUG, \ + sdata, fmt, ##__VA_ARGS__) + +#define sta_dbg(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_STA_DEBUG, \ + sdata, fmt, ##__VA_ARGS__) + +#define mlme_dbg(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_MLME_DEBUG, \ + sdata, fmt, ##__VA_ARGS__) + +#define mlme_dbg_ratelimited(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_MLME_DEBUG && net_ratelimit(), \ + sdata, fmt, ##__VA_ARGS__) + +#endif /* __MAC80211_DEBUG_H */ diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c new file mode 100644 index 0000000000..207f772bd8 --- /dev/null +++ b/net/mac80211/debugfs.c @@ -0,0 +1,705 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * mac80211 debugfs for wireless PHYs + * + * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2018 - 2019, 2021-2022 Intel Corporation + */ + +#include <linux/debugfs.h> +#include <linux/rtnetlink.h> +#include <linux/vmalloc.h> +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" +#include "debugfs.h" + +#define DEBUGFS_FORMAT_BUFFER_SIZE 100 + +int mac80211_format_buffer(char __user *userbuf, size_t count, + loff_t *ppos, char *fmt, ...) +{ + va_list args; + char buf[DEBUGFS_FORMAT_BUFFER_SIZE]; + int res; + + va_start(args, fmt); + res = vscnprintf(buf, sizeof(buf), fmt, args); + va_end(args); + + return simple_read_from_buffer(userbuf, count, ppos, buf, res); +} + +#define DEBUGFS_READONLY_FILE_FN(name, fmt, value...) \ +static ssize_t name## _read(struct file *file, char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + struct ieee80211_local *local = file->private_data; \ + \ + return mac80211_format_buffer(userbuf, count, ppos, \ + fmt "\n", ##value); \ +} + +#define DEBUGFS_READONLY_FILE_OPS(name) \ +static const struct file_operations name## _ops = { \ + .read = name## _read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_READONLY_FILE(name, fmt, value...) \ + DEBUGFS_READONLY_FILE_FN(name, fmt, value) \ + DEBUGFS_READONLY_FILE_OPS(name) + +#define DEBUGFS_ADD(name) \ + debugfs_create_file(#name, 0400, phyd, local, &name## _ops) + +#define DEBUGFS_ADD_MODE(name, mode) \ + debugfs_create_file(#name, mode, phyd, local, &name## _ops); + + +DEBUGFS_READONLY_FILE(hw_conf, "%x", + local->hw.conf.flags); +DEBUGFS_READONLY_FILE(user_power, "%d", + local->user_power_level); +DEBUGFS_READONLY_FILE(power, "%d", + local->hw.conf.power_level); +DEBUGFS_READONLY_FILE(total_ps_buffered, "%d", + local->total_ps_buffered); +DEBUGFS_READONLY_FILE(wep_iv, "%#08x", + local->wep_iv & 0xffffff); +DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s", + local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver"); + +static ssize_t aqm_read(struct file *file, + char __user *user_buf, + size_t count, + loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + struct fq *fq = &local->fq; + char buf[200]; + int len = 0; + + spin_lock_bh(&local->fq.lock); + rcu_read_lock(); + + len = scnprintf(buf, sizeof(buf), + "access name value\n" + "R fq_flows_cnt %u\n" + "R fq_backlog %u\n" + "R fq_overlimit %u\n" + "R fq_overmemory %u\n" + "R fq_collisions %u\n" + "R fq_memory_usage %u\n" + "RW fq_memory_limit %u\n" + "RW fq_limit %u\n" + "RW fq_quantum %u\n", + fq->flows_cnt, + fq->backlog, + fq->overmemory, + fq->overlimit, + fq->collisions, + fq->memory_usage, + fq->memory_limit, + fq->limit, + fq->quantum); + + rcu_read_unlock(); + spin_unlock_bh(&local->fq.lock); + + return simple_read_from_buffer(user_buf, count, ppos, + buf, len); +} + +static ssize_t aqm_write(struct file *file, + const char __user *user_buf, + size_t count, + loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + char buf[100]; + + if (count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + if (count && buf[count - 1] == '\n') + buf[count - 1] = '\0'; + else + buf[count] = '\0'; + + if (sscanf(buf, "fq_limit %u", &local->fq.limit) == 1) + return count; + else if (sscanf(buf, "fq_memory_limit %u", &local->fq.memory_limit) == 1) + return count; + else if (sscanf(buf, "fq_quantum %u", &local->fq.quantum) == 1) + return count; + + return -EINVAL; +} + +static const struct file_operations aqm_ops = { + .write = aqm_write, + .read = aqm_read, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t airtime_flags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + char buf[128] = {}, *pos, *end; + + pos = buf; + end = pos + sizeof(buf) - 1; + + if (local->airtime_flags & AIRTIME_USE_TX) + pos += scnprintf(pos, end - pos, "AIRTIME_TX\t(%lx)\n", + AIRTIME_USE_TX); + if (local->airtime_flags & AIRTIME_USE_RX) + pos += scnprintf(pos, end - pos, "AIRTIME_RX\t(%lx)\n", + AIRTIME_USE_RX); + + return simple_read_from_buffer(user_buf, count, ppos, buf, + strlen(buf)); +} + +static ssize_t airtime_flags_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + char buf[16]; + + if (count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + if (count && buf[count - 1] == '\n') + buf[count - 1] = '\0'; + else + buf[count] = '\0'; + + if (kstrtou16(buf, 0, &local->airtime_flags)) + return -EINVAL; + + return count; +} + +static const struct file_operations airtime_flags_ops = { + .write = airtime_flags_write, + .read = airtime_flags_read, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t aql_pending_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + char buf[400]; + int len = 0; + + len = scnprintf(buf, sizeof(buf), + "AC AQL pending\n" + "VO %u us\n" + "VI %u us\n" + "BE %u us\n" + "BK %u us\n" + "total %u us\n", + atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_VO]), + atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_VI]), + atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_BE]), + atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_BK]), + atomic_read(&local->aql_total_pending_airtime)); + return simple_read_from_buffer(user_buf, count, ppos, + buf, len); +} + +static const struct file_operations aql_pending_ops = { + .read = aql_pending_read, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t aql_txq_limit_read(struct file *file, + char __user *user_buf, + size_t count, + loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + char buf[400]; + int len = 0; + + len = scnprintf(buf, sizeof(buf), + "AC AQL limit low AQL limit high\n" + "VO %u %u\n" + "VI %u %u\n" + "BE %u %u\n" + "BK %u %u\n", + local->aql_txq_limit_low[IEEE80211_AC_VO], + local->aql_txq_limit_high[IEEE80211_AC_VO], + local->aql_txq_limit_low[IEEE80211_AC_VI], + local->aql_txq_limit_high[IEEE80211_AC_VI], + local->aql_txq_limit_low[IEEE80211_AC_BE], + local->aql_txq_limit_high[IEEE80211_AC_BE], + local->aql_txq_limit_low[IEEE80211_AC_BK], + local->aql_txq_limit_high[IEEE80211_AC_BK]); + return simple_read_from_buffer(user_buf, count, ppos, + buf, len); +} + +static ssize_t aql_txq_limit_write(struct file *file, + const char __user *user_buf, + size_t count, + loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + char buf[100]; + u32 ac, q_limit_low, q_limit_high, q_limit_low_old, q_limit_high_old; + struct sta_info *sta; + + if (count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + if (count && buf[count - 1] == '\n') + buf[count - 1] = '\0'; + else + buf[count] = '\0'; + + if (sscanf(buf, "%u %u %u", &ac, &q_limit_low, &q_limit_high) != 3) + return -EINVAL; + + if (ac >= IEEE80211_NUM_ACS) + return -EINVAL; + + q_limit_low_old = local->aql_txq_limit_low[ac]; + q_limit_high_old = local->aql_txq_limit_high[ac]; + + local->aql_txq_limit_low[ac] = q_limit_low; + local->aql_txq_limit_high[ac] = q_limit_high; + + mutex_lock(&local->sta_mtx); + list_for_each_entry(sta, &local->sta_list, list) { + /* If a sta has customized queue limits, keep it */ + if (sta->airtime[ac].aql_limit_low == q_limit_low_old && + sta->airtime[ac].aql_limit_high == q_limit_high_old) { + sta->airtime[ac].aql_limit_low = q_limit_low; + sta->airtime[ac].aql_limit_high = q_limit_high; + } + } + mutex_unlock(&local->sta_mtx); + return count; +} + +static const struct file_operations aql_txq_limit_ops = { + .write = aql_txq_limit_write, + .read = aql_txq_limit_read, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t aql_enable_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[3]; + int len; + + len = scnprintf(buf, sizeof(buf), "%d\n", + !static_key_false(&aql_disable.key)); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t aql_enable_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + bool aql_disabled = static_key_false(&aql_disable.key); + char buf[3]; + size_t len; + + if (count > sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[sizeof(buf) - 1] = '\0'; + len = strlen(buf); + if (len > 0 && buf[len - 1] == '\n') + buf[len - 1] = 0; + + if (buf[0] == '0' && buf[1] == '\0') { + if (!aql_disabled) + static_branch_inc(&aql_disable); + } else if (buf[0] == '1' && buf[1] == '\0') { + if (aql_disabled) + static_branch_dec(&aql_disable); + } else { + return -EINVAL; + } + + return count; +} + +static const struct file_operations aql_enable_ops = { + .write = aql_enable_write, + .read = aql_enable_read, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t force_tx_status_read(struct file *file, + char __user *user_buf, + size_t count, + loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + char buf[3]; + int len = 0; + + len = scnprintf(buf, sizeof(buf), "%d\n", (int)local->force_tx_status); + + return simple_read_from_buffer(user_buf, count, ppos, + buf, len); +} + +static ssize_t force_tx_status_write(struct file *file, + const char __user *user_buf, + size_t count, + loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + char buf[3]; + + if (count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + if (count && buf[count - 1] == '\n') + buf[count - 1] = '\0'; + else + buf[count] = '\0'; + + if (buf[0] == '0' && buf[1] == '\0') + local->force_tx_status = 0; + else if (buf[0] == '1' && buf[1] == '\0') + local->force_tx_status = 1; + else + return -EINVAL; + + return count; +} + +static const struct file_operations force_tx_status_ops = { + .write = force_tx_status_write, + .read = force_tx_status_read, + .open = simple_open, + .llseek = default_llseek, +}; + +#ifdef CONFIG_PM +static ssize_t reset_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + int ret; + + rtnl_lock(); + wiphy_lock(local->hw.wiphy); + __ieee80211_suspend(&local->hw, NULL); + ret = __ieee80211_resume(&local->hw); + wiphy_unlock(local->hw.wiphy); + + if (ret) + cfg80211_shutdown_all_interfaces(local->hw.wiphy); + + rtnl_unlock(); + + return count; +} + +static const struct file_operations reset_ops = { + .write = reset_write, + .open = simple_open, + .llseek = noop_llseek, +}; +#endif + +static const char *hw_flag_names[] = { +#define FLAG(F) [IEEE80211_HW_##F] = #F + FLAG(HAS_RATE_CONTROL), + FLAG(RX_INCLUDES_FCS), + FLAG(HOST_BROADCAST_PS_BUFFERING), + FLAG(SIGNAL_UNSPEC), + FLAG(SIGNAL_DBM), + FLAG(NEED_DTIM_BEFORE_ASSOC), + FLAG(SPECTRUM_MGMT), + FLAG(AMPDU_AGGREGATION), + FLAG(SUPPORTS_PS), + FLAG(PS_NULLFUNC_STACK), + FLAG(SUPPORTS_DYNAMIC_PS), + FLAG(MFP_CAPABLE), + FLAG(WANT_MONITOR_VIF), + FLAG(NO_AUTO_VIF), + FLAG(SW_CRYPTO_CONTROL), + FLAG(SUPPORT_FAST_XMIT), + FLAG(REPORTS_TX_ACK_STATUS), + FLAG(CONNECTION_MONITOR), + FLAG(QUEUE_CONTROL), + FLAG(SUPPORTS_PER_STA_GTK), + FLAG(AP_LINK_PS), + FLAG(TX_AMPDU_SETUP_IN_HW), + FLAG(SUPPORTS_RC_TABLE), + FLAG(P2P_DEV_ADDR_FOR_INTF), + FLAG(TIMING_BEACON_ONLY), + FLAG(SUPPORTS_HT_CCK_RATES), + FLAG(CHANCTX_STA_CSA), + FLAG(SUPPORTS_CLONED_SKBS), + FLAG(SINGLE_SCAN_ON_ALL_BANDS), + FLAG(TDLS_WIDER_BW), + FLAG(SUPPORTS_AMSDU_IN_AMPDU), + FLAG(BEACON_TX_STATUS), + FLAG(NEEDS_UNIQUE_STA_ADDR), + FLAG(SUPPORTS_REORDERING_BUFFER), + FLAG(USES_RSS), + FLAG(TX_AMSDU), + FLAG(TX_FRAG_LIST), + FLAG(REPORTS_LOW_ACK), + FLAG(SUPPORTS_TX_FRAG), + FLAG(SUPPORTS_TDLS_BUFFER_STA), + FLAG(DEAUTH_NEED_MGD_TX_PREP), + FLAG(DOESNT_SUPPORT_QOS_NDP), + FLAG(BUFF_MMPDU_TXQ), + FLAG(SUPPORTS_VHT_EXT_NSS_BW), + FLAG(STA_MMPDU_TXQ), + FLAG(TX_STATUS_NO_AMPDU_LEN), + FLAG(SUPPORTS_MULTI_BSSID), + FLAG(SUPPORTS_ONLY_HE_MULTI_BSSID), + FLAG(AMPDU_KEYBORDER_SUPPORT), + FLAG(SUPPORTS_TX_ENCAP_OFFLOAD), + FLAG(SUPPORTS_RX_DECAP_OFFLOAD), + FLAG(SUPPORTS_CONC_MON_RX_DECAP), + FLAG(DETECTS_COLOR_COLLISION), + FLAG(MLO_MCAST_MULTI_LINK_TX), +#undef FLAG +}; + +static ssize_t hwflags_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + size_t bufsz = 30 * NUM_IEEE80211_HW_FLAGS; + char *buf = kzalloc(bufsz, GFP_KERNEL); + char *pos = buf, *end = buf + bufsz - 1; + ssize_t rv; + int i; + + if (!buf) + return -ENOMEM; + + /* fail compilation if somebody adds or removes + * a flag without updating the name array above + */ + BUILD_BUG_ON(ARRAY_SIZE(hw_flag_names) != NUM_IEEE80211_HW_FLAGS); + + for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) { + if (test_bit(i, local->hw.flags)) + pos += scnprintf(pos, end - pos, "%s\n", + hw_flag_names[i]); + } + + rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); + kfree(buf); + return rv; +} + +static ssize_t misc_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + /* Max len of each line is 16 characters, plus 9 for 'pending:\n' */ + size_t bufsz = IEEE80211_MAX_QUEUES * 16 + 9; + char *buf; + char *pos, *end; + ssize_t rv; + int i; + int ln; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos = buf; + end = buf + bufsz - 1; + + pos += scnprintf(pos, end - pos, "pending:\n"); + + for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { + ln = skb_queue_len(&local->pending[i]); + pos += scnprintf(pos, end - pos, "[%i] %d\n", + i, ln); + } + + rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); + kfree(buf); + return rv; +} + +static ssize_t queues_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + unsigned long flags; + char buf[IEEE80211_MAX_QUEUES * 20]; + int q, res = 0; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + for (q = 0; q < local->hw.queues; q++) + res += sprintf(buf + res, "%02d: %#.8lx/%d\n", q, + local->queue_stop_reasons[q], + skb_queue_len(&local->pending[q])); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + + return simple_read_from_buffer(user_buf, count, ppos, buf, res); +} + +DEBUGFS_READONLY_FILE_OPS(hwflags); +DEBUGFS_READONLY_FILE_OPS(queues); +DEBUGFS_READONLY_FILE_OPS(misc); + +/* statistics stuff */ + +static ssize_t format_devstat_counter(struct ieee80211_local *local, + char __user *userbuf, + size_t count, loff_t *ppos, + int (*printvalue)(struct ieee80211_low_level_stats *stats, char *buf, + int buflen)) +{ + struct ieee80211_low_level_stats stats; + char buf[20]; + int res; + + rtnl_lock(); + res = drv_get_stats(local, &stats); + rtnl_unlock(); + if (res) + return res; + res = printvalue(&stats, buf, sizeof(buf)); + return simple_read_from_buffer(userbuf, count, ppos, buf, res); +} + +#define DEBUGFS_DEVSTATS_FILE(name) \ +static int print_devstats_##name(struct ieee80211_low_level_stats *stats,\ + char *buf, int buflen) \ +{ \ + return scnprintf(buf, buflen, "%u\n", stats->name); \ +} \ +static ssize_t stats_ ##name## _read(struct file *file, \ + char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + return format_devstat_counter(file->private_data, \ + userbuf, \ + count, \ + ppos, \ + print_devstats_##name); \ +} \ + \ +static const struct file_operations stats_ ##name## _ops = { \ + .read = stats_ ##name## _read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +#ifdef CONFIG_MAC80211_DEBUG_COUNTERS +#define DEBUGFS_STATS_ADD(name) \ + debugfs_create_u32(#name, 0400, statsd, &local->name); +#endif +#define DEBUGFS_DEVSTATS_ADD(name) \ + debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops); + +DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); +DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount); +DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount); +DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount); + +void debugfs_hw_add(struct ieee80211_local *local) +{ + struct dentry *phyd = local->hw.wiphy->debugfsdir; + struct dentry *statsd; + + if (!phyd) + return; + + local->debugfs.keys = debugfs_create_dir("keys", phyd); + + DEBUGFS_ADD(total_ps_buffered); + DEBUGFS_ADD(wep_iv); + DEBUGFS_ADD(rate_ctrl_alg); + DEBUGFS_ADD(queues); + DEBUGFS_ADD(misc); +#ifdef CONFIG_PM + DEBUGFS_ADD_MODE(reset, 0200); +#endif + DEBUGFS_ADD(hwflags); + DEBUGFS_ADD(user_power); + DEBUGFS_ADD(power); + DEBUGFS_ADD(hw_conf); + DEBUGFS_ADD_MODE(force_tx_status, 0600); + DEBUGFS_ADD_MODE(aql_enable, 0600); + DEBUGFS_ADD(aql_pending); + DEBUGFS_ADD_MODE(aqm, 0600); + + DEBUGFS_ADD_MODE(airtime_flags, 0600); + + DEBUGFS_ADD(aql_txq_limit); + debugfs_create_u32("aql_threshold", 0600, + phyd, &local->aql_threshold); + + statsd = debugfs_create_dir("statistics", phyd); + +#ifdef CONFIG_MAC80211_DEBUG_COUNTERS + DEBUGFS_STATS_ADD(dot11TransmittedFragmentCount); + DEBUGFS_STATS_ADD(dot11MulticastTransmittedFrameCount); + DEBUGFS_STATS_ADD(dot11FailedCount); + DEBUGFS_STATS_ADD(dot11RetryCount); + DEBUGFS_STATS_ADD(dot11MultipleRetryCount); + DEBUGFS_STATS_ADD(dot11FrameDuplicateCount); + DEBUGFS_STATS_ADD(dot11ReceivedFragmentCount); + DEBUGFS_STATS_ADD(dot11MulticastReceivedFrameCount); + DEBUGFS_STATS_ADD(dot11TransmittedFrameCount); + DEBUGFS_STATS_ADD(tx_handlers_drop); + DEBUGFS_STATS_ADD(tx_handlers_queued); + DEBUGFS_STATS_ADD(tx_handlers_drop_wep); + DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc); + DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port); + DEBUGFS_STATS_ADD(rx_handlers_drop); + DEBUGFS_STATS_ADD(rx_handlers_queued); + DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc); + DEBUGFS_STATS_ADD(rx_handlers_drop_defrag); + DEBUGFS_STATS_ADD(tx_expand_skb_head); + DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned); + DEBUGFS_STATS_ADD(rx_expand_skb_head_defrag); + DEBUGFS_STATS_ADD(rx_handlers_fragments); + DEBUGFS_STATS_ADD(tx_status_drop); +#endif + DEBUGFS_DEVSTATS_ADD(dot11ACKFailureCount); + DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount); + DEBUGFS_DEVSTATS_ADD(dot11FCSErrorCount); + DEBUGFS_DEVSTATS_ADD(dot11RTSSuccessCount); +} diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h new file mode 100644 index 0000000000..d2c4247874 --- /dev/null +++ b/net/mac80211/debugfs.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __MAC80211_DEBUGFS_H +#define __MAC80211_DEBUGFS_H + +#include "ieee80211_i.h" + +#ifdef CONFIG_MAC80211_DEBUGFS +void debugfs_hw_add(struct ieee80211_local *local); +int __printf(4, 5) mac80211_format_buffer(char __user *userbuf, size_t count, + loff_t *ppos, char *fmt, ...); +#else +static inline void debugfs_hw_add(struct ieee80211_local *local) +{ +} +#endif + +#endif /* __MAC80211_DEBUGFS_H */ diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c new file mode 100644 index 0000000000..16a04330e7 --- /dev/null +++ b/net/mac80211/debugfs_key.c @@ -0,0 +1,472 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2003-2005 Devicescape Software, Inc. + * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> + * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> + * Copyright (C) 2015 Intel Deutschland GmbH + * Copyright (C) 2021-2022 Intel Corporation + */ + +#include <linux/kobject.h> +#include <linux/slab.h> +#include "ieee80211_i.h" +#include "key.h" +#include "debugfs.h" +#include "debugfs_key.h" + +#define KEY_READ(name, prop, format_string) \ +static ssize_t key_##name##_read(struct file *file, \ + char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + struct ieee80211_key *key = file->private_data; \ + return mac80211_format_buffer(userbuf, count, ppos, \ + format_string, key->prop); \ +} +#define KEY_READ_X(name) KEY_READ(name, name, "0x%x\n") + +#define KEY_OPS(name) \ +static const struct file_operations key_ ##name## _ops = { \ + .read = key_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define KEY_OPS_W(name) \ +static const struct file_operations key_ ##name## _ops = { \ + .read = key_##name##_read, \ + .write = key_##name##_write, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define KEY_FILE(name, format) \ + KEY_READ_##format(name) \ + KEY_OPS(name) + +#define KEY_CONF_READ(name, format_string) \ + KEY_READ(conf_##name, conf.name, format_string) +#define KEY_CONF_READ_D(name) KEY_CONF_READ(name, "%d\n") + +#define KEY_CONF_OPS(name) \ +static const struct file_operations key_ ##name## _ops = { \ + .read = key_conf_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define KEY_CONF_FILE(name, format) \ + KEY_CONF_READ_##format(name) \ + KEY_CONF_OPS(name) + +KEY_CONF_FILE(keylen, D); +KEY_CONF_FILE(keyidx, D); +KEY_CONF_FILE(hw_key_idx, D); +KEY_FILE(flags, X); +KEY_READ(ifindex, sdata->name, "%s\n"); +KEY_OPS(ifindex); + +static ssize_t key_algorithm_read(struct file *file, + char __user *userbuf, + size_t count, loff_t *ppos) +{ + char buf[15]; + struct ieee80211_key *key = file->private_data; + u32 c = key->conf.cipher; + + sprintf(buf, "%.2x-%.2x-%.2x:%d\n", + c >> 24, (c >> 16) & 0xff, (c >> 8) & 0xff, c & 0xff); + return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); +} +KEY_OPS(algorithm); + +static ssize_t key_tx_spec_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ieee80211_key *key = file->private_data; + u64 pn; + int ret; + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + return -EINVAL; + case WLAN_CIPHER_SUITE_TKIP: + /* not supported yet */ + return -EOPNOTSUPP; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + ret = kstrtou64_from_user(userbuf, count, 16, &pn); + if (ret) + return ret; + /* PN is a 48-bit counter */ + if (pn >= (1ULL << 48)) + return -ERANGE; + atomic64_set(&key->conf.tx_pn, pn); + return count; + default: + return 0; + } +} + +static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + u64 pn; + char buf[20]; + int len; + struct ieee80211_key *key = file->private_data; + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + len = scnprintf(buf, sizeof(buf), "\n"); + break; + case WLAN_CIPHER_SUITE_TKIP: + pn = atomic64_read(&key->conf.tx_pn); + len = scnprintf(buf, sizeof(buf), "%08x %04x\n", + TKIP_PN_TO_IV32(pn), + TKIP_PN_TO_IV16(pn)); + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + pn = atomic64_read(&key->conf.tx_pn); + len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n", + (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24), + (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn); + break; + default: + return 0; + } + return simple_read_from_buffer(userbuf, count, ppos, buf, len); +} +KEY_OPS_W(tx_spec); + +static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ieee80211_key *key = file->private_data; + char buf[14*IEEE80211_NUM_TIDS+1], *p = buf; + int i, len; + const u8 *rpn; + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + len = scnprintf(buf, sizeof(buf), "\n"); + break; + case WLAN_CIPHER_SUITE_TKIP: + for (i = 0; i < IEEE80211_NUM_TIDS; i++) + p += scnprintf(p, sizeof(buf)+buf-p, + "%08x %04x\n", + key->u.tkip.rx[i].iv32, + key->u.tkip.rx[i].iv16); + len = p - buf; + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) { + rpn = key->u.ccmp.rx_pn[i]; + p += scnprintf(p, sizeof(buf)+buf-p, + "%02x%02x%02x%02x%02x%02x\n", + rpn[0], rpn[1], rpn[2], + rpn[3], rpn[4], rpn[5]); + } + len = p - buf; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + rpn = key->u.aes_cmac.rx_pn; + p += scnprintf(p, sizeof(buf)+buf-p, + "%02x%02x%02x%02x%02x%02x\n", + rpn[0], rpn[1], rpn[2], + rpn[3], rpn[4], rpn[5]); + len = p - buf; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + rpn = key->u.aes_gmac.rx_pn; + p += scnprintf(p, sizeof(buf)+buf-p, + "%02x%02x%02x%02x%02x%02x\n", + rpn[0], rpn[1], rpn[2], + rpn[3], rpn[4], rpn[5]); + len = p - buf; + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) { + rpn = key->u.gcmp.rx_pn[i]; + p += scnprintf(p, sizeof(buf)+buf-p, + "%02x%02x%02x%02x%02x%02x\n", + rpn[0], rpn[1], rpn[2], + rpn[3], rpn[4], rpn[5]); + } + len = p - buf; + break; + default: + return 0; + } + return simple_read_from_buffer(userbuf, count, ppos, buf, len); +} +KEY_OPS(rx_spec); + +static ssize_t key_replays_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ieee80211_key *key = file->private_data; + char buf[20]; + int len; + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + len = scnprintf(buf, sizeof(buf), "%u\n", + key->u.aes_cmac.replays); + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + len = scnprintf(buf, sizeof(buf), "%u\n", + key->u.aes_gmac.replays); + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + len = scnprintf(buf, sizeof(buf), "%u\n", key->u.gcmp.replays); + break; + default: + return 0; + } + return simple_read_from_buffer(userbuf, count, ppos, buf, len); +} +KEY_OPS(replays); + +static ssize_t key_icverrors_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ieee80211_key *key = file->private_data; + char buf[20]; + int len; + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + len = scnprintf(buf, sizeof(buf), "%u\n", + key->u.aes_cmac.icverrors); + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + len = scnprintf(buf, sizeof(buf), "%u\n", + key->u.aes_gmac.icverrors); + break; + default: + return 0; + } + return simple_read_from_buffer(userbuf, count, ppos, buf, len); +} +KEY_OPS(icverrors); + +static ssize_t key_mic_failures_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ieee80211_key *key = file->private_data; + char buf[20]; + int len; + + if (key->conf.cipher != WLAN_CIPHER_SUITE_TKIP) + return -EINVAL; + + len = scnprintf(buf, sizeof(buf), "%u\n", key->u.tkip.mic_failures); + + return simple_read_from_buffer(userbuf, count, ppos, buf, len); +} +KEY_OPS(mic_failures); + +static ssize_t key_key_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ieee80211_key *key = file->private_data; + int i, bufsize = 2 * key->conf.keylen + 2; + char *buf = kmalloc(bufsize, GFP_KERNEL); + char *p = buf; + ssize_t res; + + if (!buf) + return -ENOMEM; + + for (i = 0; i < key->conf.keylen; i++) + p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]); + p += scnprintf(p, bufsize+buf-p, "\n"); + res = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return res; +} +KEY_OPS(key); + +#define DEBUGFS_ADD(name) \ + debugfs_create_file(#name, 0400, key->debugfs.dir, \ + key, &key_##name##_ops) +#define DEBUGFS_ADD_W(name) \ + debugfs_create_file(#name, 0600, key->debugfs.dir, \ + key, &key_##name##_ops); + +void ieee80211_debugfs_key_add(struct ieee80211_key *key) +{ + static int keycount; + char buf[100]; + struct sta_info *sta; + + if (!key->local->debugfs.keys) + return; + + sprintf(buf, "%d", keycount); + key->debugfs.cnt = keycount; + keycount++; + key->debugfs.dir = debugfs_create_dir(buf, + key->local->debugfs.keys); + + sta = key->sta; + if (sta) { + sprintf(buf, "../../netdev:%s/stations/%pM", + sta->sdata->name, sta->sta.addr); + key->debugfs.stalink = + debugfs_create_symlink("station", key->debugfs.dir, buf); + } + + DEBUGFS_ADD(keylen); + DEBUGFS_ADD(flags); + DEBUGFS_ADD(keyidx); + DEBUGFS_ADD(hw_key_idx); + DEBUGFS_ADD(algorithm); + DEBUGFS_ADD_W(tx_spec); + DEBUGFS_ADD(rx_spec); + DEBUGFS_ADD(replays); + DEBUGFS_ADD(icverrors); + DEBUGFS_ADD(mic_failures); + DEBUGFS_ADD(key); + DEBUGFS_ADD(ifindex); +}; + +void ieee80211_debugfs_key_remove(struct ieee80211_key *key) +{ + if (!key) + return; + + debugfs_remove_recursive(key->debugfs.dir); + key->debugfs.dir = NULL; +} + +void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata) +{ + char buf[50]; + struct ieee80211_key *key; + + if (!sdata->vif.debugfs_dir) + return; + + lockdep_assert_held(&sdata->local->key_mtx); + + debugfs_remove(sdata->debugfs.default_unicast_key); + sdata->debugfs.default_unicast_key = NULL; + + if (sdata->default_unicast_key) { + key = key_mtx_dereference(sdata->local, + sdata->default_unicast_key); + sprintf(buf, "../keys/%d", key->debugfs.cnt); + sdata->debugfs.default_unicast_key = + debugfs_create_symlink("default_unicast_key", + sdata->vif.debugfs_dir, buf); + } + + debugfs_remove(sdata->debugfs.default_multicast_key); + sdata->debugfs.default_multicast_key = NULL; + + if (sdata->deflink.default_multicast_key) { + key = key_mtx_dereference(sdata->local, + sdata->deflink.default_multicast_key); + sprintf(buf, "../keys/%d", key->debugfs.cnt); + sdata->debugfs.default_multicast_key = + debugfs_create_symlink("default_multicast_key", + sdata->vif.debugfs_dir, buf); + } +} + +void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata) +{ + char buf[50]; + struct ieee80211_key *key; + + if (!sdata->vif.debugfs_dir) + return; + + key = key_mtx_dereference(sdata->local, + sdata->deflink.default_mgmt_key); + if (key) { + sprintf(buf, "../keys/%d", key->debugfs.cnt); + sdata->debugfs.default_mgmt_key = + debugfs_create_symlink("default_mgmt_key", + sdata->vif.debugfs_dir, buf); + } else + ieee80211_debugfs_key_remove_mgmt_default(sdata); +} + +void ieee80211_debugfs_key_remove_mgmt_default(struct ieee80211_sub_if_data *sdata) +{ + if (!sdata) + return; + + debugfs_remove(sdata->debugfs.default_mgmt_key); + sdata->debugfs.default_mgmt_key = NULL; +} + +void +ieee80211_debugfs_key_add_beacon_default(struct ieee80211_sub_if_data *sdata) +{ + char buf[50]; + struct ieee80211_key *key; + + if (!sdata->vif.debugfs_dir) + return; + + key = key_mtx_dereference(sdata->local, + sdata->deflink.default_beacon_key); + if (key) { + sprintf(buf, "../keys/%d", key->debugfs.cnt); + sdata->debugfs.default_beacon_key = + debugfs_create_symlink("default_beacon_key", + sdata->vif.debugfs_dir, buf); + } else { + ieee80211_debugfs_key_remove_beacon_default(sdata); + } +} + +void +ieee80211_debugfs_key_remove_beacon_default(struct ieee80211_sub_if_data *sdata) +{ + if (!sdata) + return; + + debugfs_remove(sdata->debugfs.default_beacon_key); + sdata->debugfs.default_beacon_key = NULL; +} + +void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, + struct sta_info *sta) +{ + debugfs_remove(key->debugfs.stalink); + key->debugfs.stalink = NULL; +} diff --git a/net/mac80211/debugfs_key.h b/net/mac80211/debugfs_key.h new file mode 100644 index 0000000000..af7cf495f8 --- /dev/null +++ b/net/mac80211/debugfs_key.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __MAC80211_DEBUGFS_KEY_H +#define __MAC80211_DEBUGFS_KEY_H + +#ifdef CONFIG_MAC80211_DEBUGFS +void ieee80211_debugfs_key_add(struct ieee80211_key *key); +void ieee80211_debugfs_key_remove(struct ieee80211_key *key); +void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata); +void ieee80211_debugfs_key_add_mgmt_default( + struct ieee80211_sub_if_data *sdata); +void ieee80211_debugfs_key_remove_mgmt_default( + struct ieee80211_sub_if_data *sdata); +void ieee80211_debugfs_key_add_beacon_default( + struct ieee80211_sub_if_data *sdata); +void ieee80211_debugfs_key_remove_beacon_default( + struct ieee80211_sub_if_data *sdata); +void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, + struct sta_info *sta); +#else +static inline void ieee80211_debugfs_key_add(struct ieee80211_key *key) +{} +static inline void ieee80211_debugfs_key_remove(struct ieee80211_key *key) +{} +static inline void ieee80211_debugfs_key_update_default( + struct ieee80211_sub_if_data *sdata) +{} +static inline void ieee80211_debugfs_key_add_mgmt_default( + struct ieee80211_sub_if_data *sdata) +{} +static inline void ieee80211_debugfs_key_remove_mgmt_default( + struct ieee80211_sub_if_data *sdata) +{} +static inline void ieee80211_debugfs_key_add_beacon_default( + struct ieee80211_sub_if_data *sdata) +{} +static inline void ieee80211_debugfs_key_remove_beacon_default( + struct ieee80211_sub_if_data *sdata) +{} +static inline void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, + struct sta_info *sta) +{} +#endif + +#endif /* __MAC80211_DEBUGFS_KEY_H */ diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c new file mode 100644 index 0000000000..63250286dc --- /dev/null +++ b/net/mac80211/debugfs_netdev.c @@ -0,0 +1,988 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> + * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> + * Copyright (C) 2020-2023 Intel Corporation + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/if.h> +#include <linux/if_ether.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/rtnetlink.h> +#include <linux/slab.h> +#include <linux/notifier.h> +#include <net/mac80211.h> +#include <net/cfg80211.h> +#include "ieee80211_i.h" +#include "rate.h" +#include "debugfs.h" +#include "debugfs_netdev.h" +#include "driver-ops.h" + +static ssize_t ieee80211_if_read( + void *data, + char __user *userbuf, + size_t count, loff_t *ppos, + ssize_t (*format)(const void *, char *, int)) +{ + char buf[200]; + ssize_t ret = -EINVAL; + + read_lock(&dev_base_lock); + ret = (*format)(data, buf, sizeof(buf)); + read_unlock(&dev_base_lock); + + if (ret >= 0) + ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret); + + return ret; +} + +static ssize_t ieee80211_if_write( + void *data, + const char __user *userbuf, + size_t count, loff_t *ppos, + ssize_t (*write)(void *, const char *, int)) +{ + char buf[64]; + ssize_t ret; + + if (count >= sizeof(buf)) + return -E2BIG; + + if (copy_from_user(buf, userbuf, count)) + return -EFAULT; + buf[count] = '\0'; + + rtnl_lock(); + ret = (*write)(data, buf, count); + rtnl_unlock(); + + return ret; +} + +#define IEEE80211_IF_FMT(name, type, field, format_string) \ +static ssize_t ieee80211_if_fmt_##name( \ + const type *data, char *buf, \ + int buflen) \ +{ \ + return scnprintf(buf, buflen, format_string, data->field); \ +} +#define IEEE80211_IF_FMT_DEC(name, type, field) \ + IEEE80211_IF_FMT(name, type, field, "%d\n") +#define IEEE80211_IF_FMT_HEX(name, type, field) \ + IEEE80211_IF_FMT(name, type, field, "%#x\n") +#define IEEE80211_IF_FMT_LHEX(name, type, field) \ + IEEE80211_IF_FMT(name, type, field, "%#lx\n") + +#define IEEE80211_IF_FMT_HEXARRAY(name, type, field) \ +static ssize_t ieee80211_if_fmt_##name( \ + const type *data, \ + char *buf, int buflen) \ +{ \ + char *p = buf; \ + int i; \ + for (i = 0; i < sizeof(data->field); i++) { \ + p += scnprintf(p, buflen + buf - p, "%.2x ", \ + data->field[i]); \ + } \ + p += scnprintf(p, buflen + buf - p, "\n"); \ + return p - buf; \ +} + +#define IEEE80211_IF_FMT_ATOMIC(name, type, field) \ +static ssize_t ieee80211_if_fmt_##name( \ + const type *data, \ + char *buf, int buflen) \ +{ \ + return scnprintf(buf, buflen, "%d\n", atomic_read(&data->field));\ +} + +#define IEEE80211_IF_FMT_MAC(name, type, field) \ +static ssize_t ieee80211_if_fmt_##name( \ + const type *data, char *buf, \ + int buflen) \ +{ \ + return scnprintf(buf, buflen, "%pM\n", data->field); \ +} + +#define IEEE80211_IF_FMT_JIFFIES_TO_MS(name, type, field) \ +static ssize_t ieee80211_if_fmt_##name( \ + const type *data, \ + char *buf, int buflen) \ +{ \ + return scnprintf(buf, buflen, "%d\n", \ + jiffies_to_msecs(data->field)); \ +} + +#define _IEEE80211_IF_FILE_OPS(name, _read, _write) \ +static const struct file_operations name##_ops = { \ + .read = (_read), \ + .write = (_write), \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define _IEEE80211_IF_FILE_R_FN(name, type) \ +static ssize_t ieee80211_if_read_##name(struct file *file, \ + char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + ssize_t (*fn)(const void *, char *, int) = (void *) \ + ((ssize_t (*)(const type, char *, int)) \ + ieee80211_if_fmt_##name); \ + return ieee80211_if_read(file->private_data, \ + userbuf, count, ppos, fn); \ +} + +#define _IEEE80211_IF_FILE_W_FN(name, type) \ +static ssize_t ieee80211_if_write_##name(struct file *file, \ + const char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + ssize_t (*fn)(void *, const char *, int) = (void *) \ + ((ssize_t (*)(type, const char *, int)) \ + ieee80211_if_parse_##name); \ + return ieee80211_if_write(file->private_data, userbuf, count, \ + ppos, fn); \ +} + +#define IEEE80211_IF_FILE_R(name) \ + _IEEE80211_IF_FILE_R_FN(name, struct ieee80211_sub_if_data *) \ + _IEEE80211_IF_FILE_OPS(name, ieee80211_if_read_##name, NULL) + +#define IEEE80211_IF_FILE_W(name) \ + _IEEE80211_IF_FILE_W_FN(name, struct ieee80211_sub_if_data *) \ + _IEEE80211_IF_FILE_OPS(name, NULL, ieee80211_if_write_##name) + +#define IEEE80211_IF_FILE_RW(name) \ + _IEEE80211_IF_FILE_R_FN(name, struct ieee80211_sub_if_data *) \ + _IEEE80211_IF_FILE_W_FN(name, struct ieee80211_sub_if_data *) \ + _IEEE80211_IF_FILE_OPS(name, ieee80211_if_read_##name, \ + ieee80211_if_write_##name) + +#define IEEE80211_IF_FILE(name, field, format) \ + IEEE80211_IF_FMT_##format(name, struct ieee80211_sub_if_data, field) \ + IEEE80211_IF_FILE_R(name) + +/* Same but with a link_ prefix in the ops variable name and different type */ +#define IEEE80211_IF_LINK_FILE_R(name) \ + _IEEE80211_IF_FILE_R_FN(name, struct ieee80211_link_data *) \ + _IEEE80211_IF_FILE_OPS(link_##name, ieee80211_if_read_##name, NULL) + +#define IEEE80211_IF_LINK_FILE_W(name) \ + _IEEE80211_IF_FILE_W_FN(name) \ + _IEEE80211_IF_FILE_OPS(link_##name, NULL, ieee80211_if_write_##name) + +#define IEEE80211_IF_LINK_FILE_RW(name) \ + _IEEE80211_IF_FILE_R_FN(name, struct ieee80211_link_data *) \ + _IEEE80211_IF_FILE_W_FN(name, struct ieee80211_link_data *) \ + _IEEE80211_IF_FILE_OPS(link_##name, ieee80211_if_read_##name, \ + ieee80211_if_write_##name) + +#define IEEE80211_IF_LINK_FILE(name, field, format) \ + IEEE80211_IF_FMT_##format(name, struct ieee80211_link_data, field) \ + IEEE80211_IF_LINK_FILE_R(name) + +/* common attributes */ +IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[NL80211_BAND_2GHZ], + HEX); +IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[NL80211_BAND_5GHZ], + HEX); +IEEE80211_IF_FILE(rc_rateidx_mcs_mask_2ghz, + rc_rateidx_mcs_mask[NL80211_BAND_2GHZ], HEXARRAY); +IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz, + rc_rateidx_mcs_mask[NL80211_BAND_5GHZ], HEXARRAY); + +static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_2ghz( + const struct ieee80211_sub_if_data *sdata, + char *buf, int buflen) +{ + int i, len = 0; + const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[NL80211_BAND_2GHZ]; + + for (i = 0; i < NL80211_VHT_NSS_MAX; i++) + len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]); + len += scnprintf(buf + len, buflen - len, "\n"); + + return len; +} + +IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_2ghz); + +static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_5ghz( + const struct ieee80211_sub_if_data *sdata, + char *buf, int buflen) +{ + int i, len = 0; + const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[NL80211_BAND_5GHZ]; + + for (i = 0; i < NL80211_VHT_NSS_MAX; i++) + len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]); + len += scnprintf(buf + len, buflen - len, "\n"); + + return len; +} + +IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_5ghz); + +IEEE80211_IF_FILE(flags, flags, HEX); +IEEE80211_IF_FILE(state, state, LHEX); +IEEE80211_IF_LINK_FILE(txpower, conf->txpower, DEC); +IEEE80211_IF_LINK_FILE(ap_power_level, ap_power_level, DEC); +IEEE80211_IF_LINK_FILE(user_power_level, user_power_level, DEC); + +static ssize_t +ieee80211_if_fmt_hw_queues(const struct ieee80211_sub_if_data *sdata, + char *buf, int buflen) +{ + int len; + + len = scnprintf(buf, buflen, "AC queues: VO:%d VI:%d BE:%d BK:%d\n", + sdata->vif.hw_queue[IEEE80211_AC_VO], + sdata->vif.hw_queue[IEEE80211_AC_VI], + sdata->vif.hw_queue[IEEE80211_AC_BE], + sdata->vif.hw_queue[IEEE80211_AC_BK]); + + if (sdata->vif.type == NL80211_IFTYPE_AP) + len += scnprintf(buf + len, buflen - len, "cab queue: %d\n", + sdata->vif.cab_queue); + + return len; +} +IEEE80211_IF_FILE_R(hw_queues); + +/* STA attributes */ +IEEE80211_IF_FILE(bssid, deflink.u.mgd.bssid, MAC); +IEEE80211_IF_FILE(aid, vif.cfg.aid, DEC); +IEEE80211_IF_FILE(beacon_timeout, u.mgd.beacon_timeout, JIFFIES_TO_MS); + +static int ieee80211_set_smps(struct ieee80211_link_data *link, + enum ieee80211_smps_mode smps_mode) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + int err; + + if (sdata->vif.driver_flags & IEEE80211_VIF_DISABLE_SMPS_OVERRIDE) + return -EOPNOTSUPP; + + if (!(local->hw.wiphy->features & NL80211_FEATURE_STATIC_SMPS) && + smps_mode == IEEE80211_SMPS_STATIC) + return -EINVAL; + + /* auto should be dynamic if in PS mode */ + if (!(local->hw.wiphy->features & NL80211_FEATURE_DYNAMIC_SMPS) && + (smps_mode == IEEE80211_SMPS_DYNAMIC || + smps_mode == IEEE80211_SMPS_AUTOMATIC)) + return -EINVAL; + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + sdata_lock(sdata); + err = __ieee80211_request_smps_mgd(link->sdata, link, smps_mode); + sdata_unlock(sdata); + + return err; +} + +static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = { + [IEEE80211_SMPS_AUTOMATIC] = "auto", + [IEEE80211_SMPS_OFF] = "off", + [IEEE80211_SMPS_STATIC] = "static", + [IEEE80211_SMPS_DYNAMIC] = "dynamic", +}; + +static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_link_data *link, + char *buf, int buflen) +{ + if (link->sdata->vif.type == NL80211_IFTYPE_STATION) + return snprintf(buf, buflen, "request: %s\nused: %s\n", + smps_modes[link->u.mgd.req_smps], + smps_modes[link->smps_mode]); + return -EINVAL; +} + +static ssize_t ieee80211_if_parse_smps(struct ieee80211_link_data *link, + const char *buf, int buflen) +{ + enum ieee80211_smps_mode mode; + + for (mode = 0; mode < IEEE80211_SMPS_NUM_MODES; mode++) { + if (strncmp(buf, smps_modes[mode], buflen) == 0) { + int err = ieee80211_set_smps(link, mode); + if (!err) + return buflen; + return err; + } + } + + return -EINVAL; +} +IEEE80211_IF_LINK_FILE_RW(smps); + +static ssize_t ieee80211_if_parse_tkip_mic_test( + struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) +{ + struct ieee80211_local *local = sdata->local; + u8 addr[ETH_ALEN]; + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + __le16 fc; + + if (!mac_pton(buf, addr)) + return -EINVAL; + + if (!ieee80211_sdata_running(sdata)) + return -ENOTCONN; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 100); + if (!skb) + return -ENOMEM; + skb_reserve(skb, local->hw.extra_tx_headroom); + + hdr = skb_put_zero(skb, 24); + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP: + fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); + /* DA BSSID SA */ + memcpy(hdr->addr1, addr, ETH_ALEN); + memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); + memcpy(hdr->addr3, sdata->vif.addr, ETH_ALEN); + break; + case NL80211_IFTYPE_STATION: + fc |= cpu_to_le16(IEEE80211_FCTL_TODS); + /* BSSID SA DA */ + sdata_lock(sdata); + if (!sdata->u.mgd.associated) { + sdata_unlock(sdata); + dev_kfree_skb(skb); + return -ENOTCONN; + } + memcpy(hdr->addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN); + memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); + memcpy(hdr->addr3, addr, ETH_ALEN); + sdata_unlock(sdata); + break; + default: + dev_kfree_skb(skb); + return -EOPNOTSUPP; + } + hdr->frame_control = fc; + + /* + * Add some length to the test frame to make it look bit more valid. + * The exact contents does not matter since the recipient is required + * to drop this because of the Michael MIC failure. + */ + skb_put_zero(skb, 50); + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE; + + ieee80211_tx_skb(sdata, skb); + + return buflen; +} +IEEE80211_IF_FILE_W(tkip_mic_test); + +static ssize_t ieee80211_if_parse_beacon_loss( + struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) +{ + if (!ieee80211_sdata_running(sdata) || !sdata->vif.cfg.assoc) + return -ENOTCONN; + + ieee80211_beacon_loss(&sdata->vif); + + return buflen; +} +IEEE80211_IF_FILE_W(beacon_loss); + +static ssize_t ieee80211_if_fmt_uapsd_queues( + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) +{ + const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + return snprintf(buf, buflen, "0x%x\n", ifmgd->uapsd_queues); +} + +static ssize_t ieee80211_if_parse_uapsd_queues( + struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + u8 val; + int ret; + + ret = kstrtou8(buf, 0, &val); + if (ret) + return ret; + + if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) + return -ERANGE; + + ifmgd->uapsd_queues = val; + + return buflen; +} +IEEE80211_IF_FILE_RW(uapsd_queues); + +static ssize_t ieee80211_if_fmt_uapsd_max_sp_len( + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) +{ + const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + return snprintf(buf, buflen, "0x%x\n", ifmgd->uapsd_max_sp_len); +} + +static ssize_t ieee80211_if_parse_uapsd_max_sp_len( + struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + unsigned long val; + int ret; + + ret = kstrtoul(buf, 0, &val); + if (ret) + return -EINVAL; + + if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK) + return -ERANGE; + + ifmgd->uapsd_max_sp_len = val; + + return buflen; +} +IEEE80211_IF_FILE_RW(uapsd_max_sp_len); + +static ssize_t ieee80211_if_fmt_tdls_wider_bw( + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) +{ + const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + bool tdls_wider_bw; + + tdls_wider_bw = ieee80211_hw_check(&sdata->local->hw, TDLS_WIDER_BW) && + !ifmgd->tdls_wider_bw_prohibited; + + return snprintf(buf, buflen, "%d\n", tdls_wider_bw); +} + +static ssize_t ieee80211_if_parse_tdls_wider_bw( + struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + u8 val; + int ret; + + ret = kstrtou8(buf, 0, &val); + if (ret) + return ret; + + ifmgd->tdls_wider_bw_prohibited = !val; + return buflen; +} +IEEE80211_IF_FILE_RW(tdls_wider_bw); + +/* AP attributes */ +IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC); +IEEE80211_IF_FILE(num_sta_ps, u.ap.ps.num_sta_ps, ATOMIC); +IEEE80211_IF_FILE(dtim_count, u.ap.ps.dtim_count, DEC); +IEEE80211_IF_FILE(num_mcast_sta_vlan, u.vlan.num_mcast_sta, ATOMIC); + +static ssize_t ieee80211_if_fmt_num_buffered_multicast( + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) +{ + return scnprintf(buf, buflen, "%u\n", + skb_queue_len(&sdata->u.ap.ps.bc_buf)); +} +IEEE80211_IF_FILE_R(num_buffered_multicast); + +static ssize_t ieee80211_if_fmt_aqm( + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) +{ + struct ieee80211_local *local = sdata->local; + struct txq_info *txqi; + int len; + + if (!sdata->vif.txq) + return 0; + + txqi = to_txq_info(sdata->vif.txq); + + spin_lock_bh(&local->fq.lock); + rcu_read_lock(); + + len = scnprintf(buf, + buflen, + "ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets\n" + "%u %u %u %u %u %u %u %u %u %u\n", + txqi->txq.ac, + txqi->tin.backlog_bytes, + txqi->tin.backlog_packets, + txqi->tin.flows, + txqi->cstats.drop_count, + txqi->cstats.ecn_mark, + txqi->tin.overlimit, + txqi->tin.collisions, + txqi->tin.tx_bytes, + txqi->tin.tx_packets); + + rcu_read_unlock(); + spin_unlock_bh(&local->fq.lock); + + return len; +} +IEEE80211_IF_FILE_R(aqm); + +IEEE80211_IF_FILE(multicast_to_unicast, u.ap.multicast_to_unicast, HEX); + +/* IBSS attributes */ +static ssize_t ieee80211_if_fmt_tsf( + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) +{ + struct ieee80211_local *local = sdata->local; + u64 tsf; + + tsf = drv_get_tsf(local, (struct ieee80211_sub_if_data *)sdata); + + return scnprintf(buf, buflen, "0x%016llx\n", (unsigned long long) tsf); +} + +static ssize_t ieee80211_if_parse_tsf( + struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) +{ + struct ieee80211_local *local = sdata->local; + unsigned long long tsf; + int ret; + int tsf_is_delta = 0; + + if (strncmp(buf, "reset", 5) == 0) { + if (local->ops->reset_tsf) { + drv_reset_tsf(local, sdata); + wiphy_info(local->hw.wiphy, "debugfs reset TSF\n"); + } + } else { + if (buflen > 10 && buf[1] == '=') { + if (buf[0] == '+') + tsf_is_delta = 1; + else if (buf[0] == '-') + tsf_is_delta = -1; + else + return -EINVAL; + buf += 2; + } + ret = kstrtoull(buf, 10, &tsf); + if (ret < 0) + return ret; + if (tsf_is_delta && local->ops->offset_tsf) { + drv_offset_tsf(local, sdata, tsf_is_delta * tsf); + wiphy_info(local->hw.wiphy, + "debugfs offset TSF by %018lld\n", + tsf_is_delta * tsf); + } else if (local->ops->set_tsf) { + if (tsf_is_delta) + tsf = drv_get_tsf(local, sdata) + + tsf_is_delta * tsf; + drv_set_tsf(local, sdata, tsf); + wiphy_info(local->hw.wiphy, + "debugfs set TSF to %#018llx\n", tsf); + } + } + + ieee80211_recalc_dtim(local, sdata); + return buflen; +} +IEEE80211_IF_FILE_RW(tsf); + +static ssize_t ieee80211_if_fmt_valid_links(const struct ieee80211_sub_if_data *sdata, + char *buf, int buflen) +{ + return snprintf(buf, buflen, "0x%x\n", sdata->vif.valid_links); +} +IEEE80211_IF_FILE_R(valid_links); + +static ssize_t ieee80211_if_fmt_active_links(const struct ieee80211_sub_if_data *sdata, + char *buf, int buflen) +{ + return snprintf(buf, buflen, "0x%x\n", sdata->vif.active_links); +} + +static ssize_t ieee80211_if_parse_active_links(struct ieee80211_sub_if_data *sdata, + const char *buf, int buflen) +{ + u16 active_links; + + if (kstrtou16(buf, 0, &active_links)) + return -EINVAL; + + return ieee80211_set_active_links(&sdata->vif, active_links) ?: buflen; +} +IEEE80211_IF_FILE_RW(active_links); + +IEEE80211_IF_LINK_FILE(addr, conf->addr, MAC); + +#ifdef CONFIG_MAC80211_MESH +IEEE80211_IF_FILE(estab_plinks, u.mesh.estab_plinks, ATOMIC); + +/* Mesh stats attributes */ +IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC); +IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC); +IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC); +IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC); +IEEE80211_IF_FILE(dropped_frames_no_route, + u.mesh.mshstats.dropped_frames_no_route, DEC); + +/* Mesh parameters */ +IEEE80211_IF_FILE(dot11MeshMaxRetries, + u.mesh.mshcfg.dot11MeshMaxRetries, DEC); +IEEE80211_IF_FILE(dot11MeshRetryTimeout, + u.mesh.mshcfg.dot11MeshRetryTimeout, DEC); +IEEE80211_IF_FILE(dot11MeshConfirmTimeout, + u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC); +IEEE80211_IF_FILE(dot11MeshHoldingTimeout, + u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC); +IEEE80211_IF_FILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC); +IEEE80211_IF_FILE(element_ttl, u.mesh.mshcfg.element_ttl, DEC); +IEEE80211_IF_FILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC); +IEEE80211_IF_FILE(dot11MeshMaxPeerLinks, + u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC); +IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout, + u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC); +IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval, + u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC); +IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval, + u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC); +IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime, + u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC); +IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries, + u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC); +IEEE80211_IF_FILE(path_refresh_time, + u.mesh.mshcfg.path_refresh_time, DEC); +IEEE80211_IF_FILE(min_discovery_timeout, + u.mesh.mshcfg.min_discovery_timeout, DEC); +IEEE80211_IF_FILE(dot11MeshHWMPRootMode, + u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC); +IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol, + u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC); +IEEE80211_IF_FILE(dot11MeshHWMPRannInterval, + u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC); +IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC); +IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC); +IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC); +IEEE80211_IF_FILE(dot11MeshHWMPactivePathToRootTimeout, + u.mesh.mshcfg.dot11MeshHWMPactivePathToRootTimeout, DEC); +IEEE80211_IF_FILE(dot11MeshHWMProotInterval, + u.mesh.mshcfg.dot11MeshHWMProotInterval, DEC); +IEEE80211_IF_FILE(dot11MeshHWMPconfirmationInterval, + u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval, DEC); +IEEE80211_IF_FILE(power_mode, u.mesh.mshcfg.power_mode, DEC); +IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration, + u.mesh.mshcfg.dot11MeshAwakeWindowDuration, DEC); +IEEE80211_IF_FILE(dot11MeshConnectedToMeshGate, + u.mesh.mshcfg.dot11MeshConnectedToMeshGate, DEC); +IEEE80211_IF_FILE(dot11MeshNolearn, u.mesh.mshcfg.dot11MeshNolearn, DEC); +IEEE80211_IF_FILE(dot11MeshConnectedToAuthServer, + u.mesh.mshcfg.dot11MeshConnectedToAuthServer, DEC); +#endif + +#define DEBUGFS_ADD_MODE(name, mode) \ + debugfs_create_file(#name, mode, sdata->vif.debugfs_dir, \ + sdata, &name##_ops) + +#define DEBUGFS_ADD_X(_bits, _name, _mode) \ + debugfs_create_x##_bits(#_name, _mode, sdata->vif.debugfs_dir, \ + &sdata->vif._name) + +#define DEBUGFS_ADD_X8(_name, _mode) \ + DEBUGFS_ADD_X(8, _name, _mode) + +#define DEBUGFS_ADD_X16(_name, _mode) \ + DEBUGFS_ADD_X(16, _name, _mode) + +#define DEBUGFS_ADD_X32(_name, _mode) \ + DEBUGFS_ADD_X(32, _name, _mode) + +#define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400) + +static void add_common_files(struct ieee80211_sub_if_data *sdata) +{ + DEBUGFS_ADD(rc_rateidx_mask_2ghz); + DEBUGFS_ADD(rc_rateidx_mask_5ghz); + DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); + DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); + DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_2ghz); + DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz); + DEBUGFS_ADD(hw_queues); + + if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && + sdata->vif.type != NL80211_IFTYPE_NAN) + DEBUGFS_ADD(aqm); +} + +static void add_sta_files(struct ieee80211_sub_if_data *sdata) +{ + DEBUGFS_ADD(bssid); + DEBUGFS_ADD(aid); + DEBUGFS_ADD(beacon_timeout); + DEBUGFS_ADD_MODE(tkip_mic_test, 0200); + DEBUGFS_ADD_MODE(beacon_loss, 0200); + DEBUGFS_ADD_MODE(uapsd_queues, 0600); + DEBUGFS_ADD_MODE(uapsd_max_sp_len, 0600); + DEBUGFS_ADD_MODE(tdls_wider_bw, 0600); + DEBUGFS_ADD_MODE(valid_links, 0400); + DEBUGFS_ADD_MODE(active_links, 0600); + DEBUGFS_ADD_X16(dormant_links, 0400); +} + +static void add_ap_files(struct ieee80211_sub_if_data *sdata) +{ + DEBUGFS_ADD(num_mcast_sta); + DEBUGFS_ADD(num_sta_ps); + DEBUGFS_ADD(dtim_count); + DEBUGFS_ADD(num_buffered_multicast); + DEBUGFS_ADD_MODE(tkip_mic_test, 0200); + DEBUGFS_ADD_MODE(multicast_to_unicast, 0600); +} + +static void add_vlan_files(struct ieee80211_sub_if_data *sdata) +{ + /* add num_mcast_sta_vlan using name num_mcast_sta */ + debugfs_create_file("num_mcast_sta", 0400, sdata->vif.debugfs_dir, + sdata, &num_mcast_sta_vlan_ops); +} + +static void add_ibss_files(struct ieee80211_sub_if_data *sdata) +{ + DEBUGFS_ADD_MODE(tsf, 0600); +} + +#ifdef CONFIG_MAC80211_MESH + +static void add_mesh_files(struct ieee80211_sub_if_data *sdata) +{ + DEBUGFS_ADD_MODE(tsf, 0600); + DEBUGFS_ADD_MODE(estab_plinks, 0400); +} + +static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) +{ + struct dentry *dir = debugfs_create_dir("mesh_stats", + sdata->vif.debugfs_dir); +#define MESHSTATS_ADD(name)\ + debugfs_create_file(#name, 0400, dir, sdata, &name##_ops) + + MESHSTATS_ADD(fwded_mcast); + MESHSTATS_ADD(fwded_unicast); + MESHSTATS_ADD(fwded_frames); + MESHSTATS_ADD(dropped_frames_ttl); + MESHSTATS_ADD(dropped_frames_no_route); +#undef MESHSTATS_ADD +} + +static void add_mesh_config(struct ieee80211_sub_if_data *sdata) +{ + struct dentry *dir = debugfs_create_dir("mesh_config", + sdata->vif.debugfs_dir); + +#define MESHPARAMS_ADD(name) \ + debugfs_create_file(#name, 0600, dir, sdata, &name##_ops) + + MESHPARAMS_ADD(dot11MeshMaxRetries); + MESHPARAMS_ADD(dot11MeshRetryTimeout); + MESHPARAMS_ADD(dot11MeshConfirmTimeout); + MESHPARAMS_ADD(dot11MeshHoldingTimeout); + MESHPARAMS_ADD(dot11MeshTTL); + MESHPARAMS_ADD(element_ttl); + MESHPARAMS_ADD(auto_open_plinks); + MESHPARAMS_ADD(dot11MeshMaxPeerLinks); + MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout); + MESHPARAMS_ADD(dot11MeshHWMPpreqMinInterval); + MESHPARAMS_ADD(dot11MeshHWMPperrMinInterval); + MESHPARAMS_ADD(dot11MeshHWMPnetDiameterTraversalTime); + MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries); + MESHPARAMS_ADD(path_refresh_time); + MESHPARAMS_ADD(min_discovery_timeout); + MESHPARAMS_ADD(dot11MeshHWMPRootMode); + MESHPARAMS_ADD(dot11MeshHWMPRannInterval); + MESHPARAMS_ADD(dot11MeshForwarding); + MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol); + MESHPARAMS_ADD(rssi_threshold); + MESHPARAMS_ADD(ht_opmode); + MESHPARAMS_ADD(dot11MeshHWMPactivePathToRootTimeout); + MESHPARAMS_ADD(dot11MeshHWMProotInterval); + MESHPARAMS_ADD(dot11MeshHWMPconfirmationInterval); + MESHPARAMS_ADD(power_mode); + MESHPARAMS_ADD(dot11MeshAwakeWindowDuration); + MESHPARAMS_ADD(dot11MeshConnectedToMeshGate); + MESHPARAMS_ADD(dot11MeshNolearn); + MESHPARAMS_ADD(dot11MeshConnectedToAuthServer); +#undef MESHPARAMS_ADD +} +#endif + +static void add_files(struct ieee80211_sub_if_data *sdata) +{ + if (!sdata->vif.debugfs_dir) + return; + + DEBUGFS_ADD(flags); + DEBUGFS_ADD(state); + + if (sdata->vif.type != NL80211_IFTYPE_MONITOR) + add_common_files(sdata); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_MESH_POINT: +#ifdef CONFIG_MAC80211_MESH + add_mesh_files(sdata); + add_mesh_stats(sdata); + add_mesh_config(sdata); +#endif + break; + case NL80211_IFTYPE_STATION: + add_sta_files(sdata); + break; + case NL80211_IFTYPE_ADHOC: + add_ibss_files(sdata); + break; + case NL80211_IFTYPE_AP: + add_ap_files(sdata); + break; + case NL80211_IFTYPE_AP_VLAN: + add_vlan_files(sdata); + break; + default: + break; + } +} + +#undef DEBUGFS_ADD_MODE +#undef DEBUGFS_ADD + +#define DEBUGFS_ADD_MODE(dentry, name, mode) \ + debugfs_create_file(#name, mode, dentry, \ + link, &link_##name##_ops) + +#define DEBUGFS_ADD(dentry, name) DEBUGFS_ADD_MODE(dentry, name, 0400) + +static void add_link_files(struct ieee80211_link_data *link, + struct dentry *dentry) +{ + DEBUGFS_ADD(dentry, txpower); + DEBUGFS_ADD(dentry, user_power_level); + DEBUGFS_ADD(dentry, ap_power_level); + + switch (link->sdata->vif.type) { + case NL80211_IFTYPE_STATION: + DEBUGFS_ADD_MODE(dentry, smps, 0600); + break; + default: + break; + } +} + +void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata) +{ + char buf[10+IFNAMSIZ]; + + sprintf(buf, "netdev:%s", sdata->name); + sdata->vif.debugfs_dir = debugfs_create_dir(buf, + sdata->local->hw.wiphy->debugfsdir); + sdata->debugfs.subdir_stations = debugfs_create_dir("stations", + sdata->vif.debugfs_dir); + add_files(sdata); + + if (!(sdata->local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)) + add_link_files(&sdata->deflink, sdata->vif.debugfs_dir); +} + +void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) +{ + if (!sdata->vif.debugfs_dir) + return; + + debugfs_remove_recursive(sdata->vif.debugfs_dir); + sdata->vif.debugfs_dir = NULL; + sdata->debugfs.subdir_stations = NULL; +} + +void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) +{ + struct dentry *dir; + char buf[10 + IFNAMSIZ]; + + dir = sdata->vif.debugfs_dir; + + if (IS_ERR_OR_NULL(dir)) + return; + + sprintf(buf, "netdev:%s", sdata->name); + debugfs_rename(dir->d_parent, dir, dir->d_parent, buf); +} + +void ieee80211_link_debugfs_add(struct ieee80211_link_data *link) +{ + char link_dir_name[10]; + + if (WARN_ON(!link->sdata->vif.debugfs_dir)) + return; + + /* For now, this should not be called for non-MLO capable drivers */ + if (WARN_ON(!(link->sdata->local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO))) + return; + + snprintf(link_dir_name, sizeof(link_dir_name), + "link-%d", link->link_id); + + link->debugfs_dir = + debugfs_create_dir(link_dir_name, + link->sdata->vif.debugfs_dir); + + DEBUGFS_ADD(link->debugfs_dir, addr); + add_link_files(link, link->debugfs_dir); +} + +void ieee80211_link_debugfs_remove(struct ieee80211_link_data *link) +{ + if (!link->sdata->vif.debugfs_dir || !link->debugfs_dir) { + link->debugfs_dir = NULL; + return; + } + + if (link->debugfs_dir == link->sdata->vif.debugfs_dir) { + WARN_ON(link != &link->sdata->deflink); + link->debugfs_dir = NULL; + return; + } + + debugfs_remove_recursive(link->debugfs_dir); + link->debugfs_dir = NULL; +} + +void ieee80211_link_debugfs_drv_add(struct ieee80211_link_data *link) +{ + if (WARN_ON(!link->debugfs_dir)) + return; + + drv_link_add_debugfs(link->sdata->local, link->sdata, + link->conf, link->debugfs_dir); +} + +void ieee80211_link_debugfs_drv_remove(struct ieee80211_link_data *link) +{ + if (!link || !link->debugfs_dir) + return; + + if (WARN_ON(link->debugfs_dir == link->sdata->vif.debugfs_dir)) + return; + + /* Recreate the directory excluding the driver data */ + debugfs_remove_recursive(link->debugfs_dir); + link->debugfs_dir = NULL; + + ieee80211_link_debugfs_add(link); +} diff --git a/net/mac80211/debugfs_netdev.h b/net/mac80211/debugfs_netdev.h new file mode 100644 index 0000000000..99e688dcab --- /dev/null +++ b/net/mac80211/debugfs_netdev.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* routines exported for debugfs handling */ + +#ifndef __IEEE80211_DEBUGFS_NETDEV_H +#define __IEEE80211_DEBUGFS_NETDEV_H + +#include "ieee80211_i.h" + +#ifdef CONFIG_MAC80211_DEBUGFS +void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata); +void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata); +void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata); + +void ieee80211_link_debugfs_add(struct ieee80211_link_data *link); +void ieee80211_link_debugfs_remove(struct ieee80211_link_data *link); + +void ieee80211_link_debugfs_drv_add(struct ieee80211_link_data *link); +void ieee80211_link_debugfs_drv_remove(struct ieee80211_link_data *link); +#else +static inline void ieee80211_debugfs_add_netdev( + struct ieee80211_sub_if_data *sdata) +{} +static inline void ieee80211_debugfs_remove_netdev( + struct ieee80211_sub_if_data *sdata) +{} +static inline void ieee80211_debugfs_rename_netdev( + struct ieee80211_sub_if_data *sdata) +{} + +static inline void ieee80211_link_debugfs_add(struct ieee80211_link_data *link) +{} +static inline void ieee80211_link_debugfs_remove(struct ieee80211_link_data *link) +{} + +static inline void ieee80211_link_debugfs_drv_add(struct ieee80211_link_data *link) +{} +static inline void ieee80211_link_debugfs_drv_remove(struct ieee80211_link_data *link) +{} +#endif + +#endif /* __IEEE80211_DEBUGFS_NETDEV_H */ diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c new file mode 100644 index 0000000000..5a97fb248c --- /dev/null +++ b/net/mac80211/debugfs_sta.c @@ -0,0 +1,1360 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2003-2005 Devicescape Software, Inc. + * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> + * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright (C) 2018 - 2022 Intel Corporation + */ + +#include <linux/debugfs.h> +#include <linux/ieee80211.h> +#include "ieee80211_i.h" +#include "debugfs.h" +#include "debugfs_sta.h" +#include "sta_info.h" +#include "driver-ops.h" + +/* sta attributtes */ + +#define STA_READ(name, field, format_string) \ +static ssize_t sta_ ##name## _read(struct file *file, \ + char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + struct sta_info *sta = file->private_data; \ + return mac80211_format_buffer(userbuf, count, ppos, \ + format_string, sta->field); \ +} +#define STA_READ_D(name, field) STA_READ(name, field, "%d\n") + +#define STA_OPS(name) \ +static const struct file_operations sta_ ##name## _ops = { \ + .read = sta_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define STA_OPS_RW(name) \ +static const struct file_operations sta_ ##name## _ops = { \ + .read = sta_##name##_read, \ + .write = sta_##name##_write, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define STA_FILE(name, field, format) \ + STA_READ_##format(name, field) \ + STA_OPS(name) + +STA_FILE(aid, sta.aid, D); + +static const char * const sta_flag_names[] = { +#define FLAG(F) [WLAN_STA_##F] = #F + FLAG(AUTH), + FLAG(ASSOC), + FLAG(PS_STA), + FLAG(AUTHORIZED), + FLAG(SHORT_PREAMBLE), + FLAG(WDS), + FLAG(CLEAR_PS_FILT), + FLAG(MFP), + FLAG(BLOCK_BA), + FLAG(PS_DRIVER), + FLAG(PSPOLL), + FLAG(TDLS_PEER), + FLAG(TDLS_PEER_AUTH), + FLAG(TDLS_INITIATOR), + FLAG(TDLS_CHAN_SWITCH), + FLAG(TDLS_OFF_CHANNEL), + FLAG(TDLS_WIDER_BW), + FLAG(UAPSD), + FLAG(SP), + FLAG(4ADDR_EVENT), + FLAG(INSERTED), + FLAG(RATE_CONTROL), + FLAG(TOFFSET_KNOWN), + FLAG(MPSP_OWNER), + FLAG(MPSP_RECIPIENT), + FLAG(PS_DELIVER), + FLAG(USES_ENCRYPTION), + FLAG(DECAP_OFFLOAD), +#undef FLAG +}; + +static ssize_t sta_flags_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char buf[16 * NUM_WLAN_STA_FLAGS], *pos = buf; + char *end = buf + sizeof(buf) - 1; + struct sta_info *sta = file->private_data; + unsigned int flg; + + BUILD_BUG_ON(ARRAY_SIZE(sta_flag_names) != NUM_WLAN_STA_FLAGS); + + for (flg = 0; flg < NUM_WLAN_STA_FLAGS; flg++) { + if (test_sta_flag(sta, flg)) + pos += scnprintf(pos, end - pos, "%s\n", + sta_flag_names[flg]); + } + + return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); +} +STA_OPS(flags); + +static ssize_t sta_num_ps_buf_frames_read(struct file *file, + char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct sta_info *sta = file->private_data; + char buf[17*IEEE80211_NUM_ACS], *p = buf; + int ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + p += scnprintf(p, sizeof(buf)+buf-p, "AC%d: %d\n", ac, + skb_queue_len(&sta->ps_tx_buf[ac]) + + skb_queue_len(&sta->tx_filtered[ac])); + return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); +} +STA_OPS(num_ps_buf_frames); + +static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char buf[15*IEEE80211_NUM_TIDS], *p = buf; + int i; + struct sta_info *sta = file->private_data; + for (i = 0; i < IEEE80211_NUM_TIDS; i++) + p += scnprintf(p, sizeof(buf)+buf-p, "%x ", + le16_to_cpu(sta->last_seq_ctrl[i])); + p += scnprintf(p, sizeof(buf)+buf-p, "\n"); + return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); +} +STA_OPS(last_seq_ctrl); + +#define AQM_TXQ_ENTRY_LEN 130 + +static ssize_t sta_aqm_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct sta_info *sta = file->private_data; + struct ieee80211_local *local = sta->local; + size_t bufsz = AQM_TXQ_ENTRY_LEN * (IEEE80211_NUM_TIDS + 2); + char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf; + struct txq_info *txqi; + ssize_t rv; + int i; + + if (!buf) + return -ENOMEM; + + spin_lock_bh(&local->fq.lock); + rcu_read_lock(); + + p += scnprintf(p, + bufsz + buf - p, + "target %uus interval %uus ecn %s\n", + codel_time_to_us(sta->cparams.target), + codel_time_to_us(sta->cparams.interval), + sta->cparams.ecn ? "yes" : "no"); + p += scnprintf(p, + bufsz + buf - p, + "tid ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets flags\n"); + + for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { + if (!sta->sta.txq[i]) + continue; + txqi = to_txq_info(sta->sta.txq[i]); + p += scnprintf(p, bufsz + buf - p, + "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s%s)\n", + txqi->txq.tid, + txqi->txq.ac, + txqi->tin.backlog_bytes, + txqi->tin.backlog_packets, + txqi->tin.flows, + txqi->cstats.drop_count, + txqi->cstats.ecn_mark, + txqi->tin.overlimit, + txqi->tin.collisions, + txqi->tin.tx_bytes, + txqi->tin.tx_packets, + txqi->flags, + test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ? "STOP" : "RUN", + test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags) ? " AMPDU" : "", + test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "", + test_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ? " DIRTY" : ""); + } + + rcu_read_unlock(); + spin_unlock_bh(&local->fq.lock); + + rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return rv; +} +STA_OPS(aqm); + +static ssize_t sta_airtime_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct sta_info *sta = file->private_data; + struct ieee80211_local *local = sta->sdata->local; + size_t bufsz = 400; + char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf; + u64 rx_airtime = 0, tx_airtime = 0; + s32 deficit[IEEE80211_NUM_ACS]; + ssize_t rv; + int ac; + + if (!buf) + return -ENOMEM; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + spin_lock_bh(&local->active_txq_lock[ac]); + rx_airtime += sta->airtime[ac].rx_airtime; + tx_airtime += sta->airtime[ac].tx_airtime; + deficit[ac] = sta->airtime[ac].deficit; + spin_unlock_bh(&local->active_txq_lock[ac]); + } + + p += scnprintf(p, bufsz + buf - p, + "RX: %llu us\nTX: %llu us\nWeight: %u\n" + "Deficit: VO: %d us VI: %d us BE: %d us BK: %d us\n", + rx_airtime, tx_airtime, sta->airtime_weight, + deficit[0], deficit[1], deficit[2], deficit[3]); + + rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return rv; +} + +static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct sta_info *sta = file->private_data; + struct ieee80211_local *local = sta->sdata->local; + int ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + spin_lock_bh(&local->active_txq_lock[ac]); + sta->airtime[ac].rx_airtime = 0; + sta->airtime[ac].tx_airtime = 0; + sta->airtime[ac].deficit = sta->airtime_weight; + spin_unlock_bh(&local->active_txq_lock[ac]); + } + + return count; +} +STA_OPS_RW(airtime); + +static ssize_t sta_aql_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct sta_info *sta = file->private_data; + struct ieee80211_local *local = sta->sdata->local; + size_t bufsz = 400; + char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf; + u32 q_depth[IEEE80211_NUM_ACS]; + u32 q_limit_l[IEEE80211_NUM_ACS], q_limit_h[IEEE80211_NUM_ACS]; + ssize_t rv; + int ac; + + if (!buf) + return -ENOMEM; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + spin_lock_bh(&local->active_txq_lock[ac]); + q_limit_l[ac] = sta->airtime[ac].aql_limit_low; + q_limit_h[ac] = sta->airtime[ac].aql_limit_high; + spin_unlock_bh(&local->active_txq_lock[ac]); + q_depth[ac] = atomic_read(&sta->airtime[ac].aql_tx_pending); + } + + p += scnprintf(p, bufsz + buf - p, + "Q depth: VO: %u us VI: %u us BE: %u us BK: %u us\n" + "Q limit[low/high]: VO: %u/%u VI: %u/%u BE: %u/%u BK: %u/%u\n", + q_depth[0], q_depth[1], q_depth[2], q_depth[3], + q_limit_l[0], q_limit_h[0], q_limit_l[1], q_limit_h[1], + q_limit_l[2], q_limit_h[2], q_limit_l[3], q_limit_h[3]); + + rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return rv; +} + +static ssize_t sta_aql_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct sta_info *sta = file->private_data; + u32 ac, q_limit_l, q_limit_h; + char _buf[100] = {}, *buf = _buf; + + if (count > sizeof(_buf)) + return -EINVAL; + + if (copy_from_user(buf, userbuf, count)) + return -EFAULT; + + buf[sizeof(_buf) - 1] = '\0'; + if (sscanf(buf, "limit %u %u %u", &ac, &q_limit_l, &q_limit_h) + != 3) + return -EINVAL; + + if (ac >= IEEE80211_NUM_ACS) + return -EINVAL; + + sta->airtime[ac].aql_limit_low = q_limit_l; + sta->airtime[ac].aql_limit_high = q_limit_h; + + return count; +} +STA_OPS_RW(aql); + + +static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char *buf, *p; + ssize_t bufsz = 71 + IEEE80211_NUM_TIDS * 40; + int i; + struct sta_info *sta = file->private_data; + struct tid_ampdu_rx *tid_rx; + struct tid_ampdu_tx *tid_tx; + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + rcu_read_lock(); + + p += scnprintf(p, bufsz + buf - p, "next dialog_token: %#02x\n", + sta->ampdu_mlme.dialog_token_allocator + 1); + p += scnprintf(p, bufsz + buf - p, + "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tpending\n"); + + for (i = 0; i < IEEE80211_NUM_TIDS; i++) { + bool tid_rx_valid; + + tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]); + tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]); + tid_rx_valid = test_bit(i, sta->ampdu_mlme.agg_session_valid); + + p += scnprintf(p, bufsz + buf - p, "%02d", i); + p += scnprintf(p, bufsz + buf - p, "\t\t%x", + tid_rx_valid); + p += scnprintf(p, bufsz + buf - p, "\t%#.2x", + tid_rx_valid ? + sta->ampdu_mlme.tid_rx_token[i] : 0); + p += scnprintf(p, bufsz + buf - p, "\t%#.3x", + tid_rx ? tid_rx->ssn : 0); + + p += scnprintf(p, bufsz + buf - p, "\t\t%x", !!tid_tx); + p += scnprintf(p, bufsz + buf - p, "\t%#.2x", + tid_tx ? tid_tx->dialog_token : 0); + p += scnprintf(p, bufsz + buf - p, "\t%03d", + tid_tx ? skb_queue_len(&tid_tx->pending) : 0); + p += scnprintf(p, bufsz + buf - p, "\n"); + } + rcu_read_unlock(); + + ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return ret; +} + +static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + char _buf[25] = {}, *buf = _buf; + struct sta_info *sta = file->private_data; + bool start, tx; + unsigned long tid; + char *pos; + int ret, timeout = 5000; + + if (count > sizeof(_buf)) + return -EINVAL; + + if (copy_from_user(buf, userbuf, count)) + return -EFAULT; + + buf[sizeof(_buf) - 1] = '\0'; + pos = buf; + buf = strsep(&pos, " "); + if (!buf) + return -EINVAL; + + if (!strcmp(buf, "tx")) + tx = true; + else if (!strcmp(buf, "rx")) + tx = false; + else + return -EINVAL; + + buf = strsep(&pos, " "); + if (!buf) + return -EINVAL; + if (!strcmp(buf, "start")) { + start = true; + if (!tx) + return -EINVAL; + } else if (!strcmp(buf, "stop")) { + start = false; + } else { + return -EINVAL; + } + + buf = strsep(&pos, " "); + if (!buf) + return -EINVAL; + if (sscanf(buf, "timeout=%d", &timeout) == 1) { + buf = strsep(&pos, " "); + if (!buf || !tx || !start) + return -EINVAL; + } + + ret = kstrtoul(buf, 0, &tid); + if (ret || tid >= IEEE80211_NUM_TIDS) + return -EINVAL; + + if (tx) { + if (start) + ret = ieee80211_start_tx_ba_session(&sta->sta, tid, + timeout); + else + ret = ieee80211_stop_tx_ba_session(&sta->sta, tid); + } else { + __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, + 3, true); + ret = 0; + } + + return ret ?: count; +} +STA_OPS_RW(agg_status); + +/* link sta attributes */ +#define LINK_STA_OPS(name) \ +static const struct file_operations link_sta_ ##name## _ops = { \ + .read = link_sta_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +static ssize_t link_sta_addr_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct link_sta_info *link_sta = file->private_data; + u8 mac[3 * ETH_ALEN + 1]; + + snprintf(mac, sizeof(mac), "%pM\n", link_sta->pub->addr); + + return simple_read_from_buffer(userbuf, count, ppos, mac, 3 * ETH_ALEN); +} + +LINK_STA_OPS(addr); + +static ssize_t link_sta_ht_capa_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ +#define PRINT_HT_CAP(_cond, _str) \ + do { \ + if (_cond) \ + p += scnprintf(p, bufsz + buf - p, "\t" _str "\n"); \ + } while (0) + char *buf, *p; + int i; + ssize_t bufsz = 512; + struct link_sta_info *link_sta = file->private_data; + struct ieee80211_sta_ht_cap *htc = &link_sta->pub->ht_cap; + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + p += scnprintf(p, bufsz + buf - p, "ht %ssupported\n", + htc->ht_supported ? "" : "not "); + if (htc->ht_supported) { + p += scnprintf(p, bufsz + buf - p, "cap: %#.4x\n", htc->cap); + + PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDPC"); + PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40"); + PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20"); + + PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 0, "Static SM Power Save"); + PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 1, "Dynamic SM Power Save"); + PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 3, "SM Power Save disabled"); + + PRINT_HT_CAP((htc->cap & BIT(4)), "RX Greenfield"); + PRINT_HT_CAP((htc->cap & BIT(5)), "RX HT20 SGI"); + PRINT_HT_CAP((htc->cap & BIT(6)), "RX HT40 SGI"); + PRINT_HT_CAP((htc->cap & BIT(7)), "TX STBC"); + + PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 0, "No RX STBC"); + PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 1, "RX STBC 1-stream"); + PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 2, "RX STBC 2-streams"); + PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 3, "RX STBC 3-streams"); + + PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack"); + + PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: " + "3839 bytes"); + PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: " + "7935 bytes"); + + /* + * For beacons and probe response this would mean the BSS + * does or does not allow the usage of DSSS/CCK HT40. + * Otherwise it means the STA does or does not use + * DSSS/CCK HT40. + */ + PRINT_HT_CAP((htc->cap & BIT(12)), "DSSS/CCK HT40"); + PRINT_HT_CAP(!(htc->cap & BIT(12)), "No DSSS/CCK HT40"); + + /* BIT(13) is reserved */ + + PRINT_HT_CAP((htc->cap & BIT(14)), "40 MHz Intolerant"); + + PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection"); + + p += scnprintf(p, bufsz + buf - p, "ampdu factor/density: %d/%d\n", + htc->ampdu_factor, htc->ampdu_density); + p += scnprintf(p, bufsz + buf - p, "MCS mask:"); + + for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) + p += scnprintf(p, bufsz + buf - p, " %.2x", + htc->mcs.rx_mask[i]); + p += scnprintf(p, bufsz + buf - p, "\n"); + + /* If not set this is meaningless */ + if (le16_to_cpu(htc->mcs.rx_highest)) { + p += scnprintf(p, bufsz + buf - p, + "MCS rx highest: %d Mbps\n", + le16_to_cpu(htc->mcs.rx_highest)); + } + + p += scnprintf(p, bufsz + buf - p, "MCS tx params: %x\n", + htc->mcs.tx_params); + } + + ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return ret; +} +LINK_STA_OPS(ht_capa); + +static ssize_t link_sta_vht_capa_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char *buf, *p; + struct link_sta_info *link_sta = file->private_data; + struct ieee80211_sta_vht_cap *vhtc = &link_sta->pub->vht_cap; + ssize_t ret; + ssize_t bufsz = 512; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + p += scnprintf(p, bufsz + buf - p, "VHT %ssupported\n", + vhtc->vht_supported ? "" : "not "); + if (vhtc->vht_supported) { + p += scnprintf(p, bufsz + buf - p, "cap: %#.8x\n", + vhtc->cap); +#define PFLAG(a, b) \ + do { \ + if (vhtc->cap & IEEE80211_VHT_CAP_ ## a) \ + p += scnprintf(p, bufsz + buf - p, \ + "\t\t%s\n", b); \ + } while (0) + + switch (vhtc->cap & 0x3) { + case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895: + p += scnprintf(p, bufsz + buf - p, + "\t\tMAX-MPDU-3895\n"); + break; + case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991: + p += scnprintf(p, bufsz + buf - p, + "\t\tMAX-MPDU-7991\n"); + break; + case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: + p += scnprintf(p, bufsz + buf - p, + "\t\tMAX-MPDU-11454\n"); + break; + default: + p += scnprintf(p, bufsz + buf - p, + "\t\tMAX-MPDU-UNKNOWN\n"); + } + switch (vhtc->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { + case 0: + p += scnprintf(p, bufsz + buf - p, + "\t\t80Mhz\n"); + break; + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: + p += scnprintf(p, bufsz + buf - p, + "\t\t160Mhz\n"); + break; + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: + p += scnprintf(p, bufsz + buf - p, + "\t\t80+80Mhz\n"); + break; + default: + p += scnprintf(p, bufsz + buf - p, + "\t\tUNKNOWN-MHZ: 0x%x\n", + (vhtc->cap >> 2) & 0x3); + } + PFLAG(RXLDPC, "RXLDPC"); + PFLAG(SHORT_GI_80, "SHORT-GI-80"); + PFLAG(SHORT_GI_160, "SHORT-GI-160"); + PFLAG(TXSTBC, "TXSTBC"); + p += scnprintf(p, bufsz + buf - p, + "\t\tRXSTBC_%d\n", (vhtc->cap >> 8) & 0x7); + PFLAG(SU_BEAMFORMER_CAPABLE, "SU-BEAMFORMER-CAPABLE"); + PFLAG(SU_BEAMFORMEE_CAPABLE, "SU-BEAMFORMEE-CAPABLE"); + p += scnprintf(p, bufsz + buf - p, + "\t\tBEAMFORMEE-STS: 0x%x\n", + (vhtc->cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK) >> + IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT); + p += scnprintf(p, bufsz + buf - p, + "\t\tSOUNDING-DIMENSIONS: 0x%x\n", + (vhtc->cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK) + >> IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT); + PFLAG(MU_BEAMFORMER_CAPABLE, "MU-BEAMFORMER-CAPABLE"); + PFLAG(MU_BEAMFORMEE_CAPABLE, "MU-BEAMFORMEE-CAPABLE"); + PFLAG(VHT_TXOP_PS, "TXOP-PS"); + PFLAG(HTC_VHT, "HTC-VHT"); + p += scnprintf(p, bufsz + buf - p, + "\t\tMPDU-LENGTH-EXPONENT: 0x%x\n", + (vhtc->cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT); + PFLAG(VHT_LINK_ADAPTATION_VHT_UNSOL_MFB, + "LINK-ADAPTATION-VHT-UNSOL-MFB"); + p += scnprintf(p, bufsz + buf - p, + "\t\tLINK-ADAPTATION-VHT-MRQ-MFB: 0x%x\n", + (vhtc->cap & IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB) >> 26); + PFLAG(RX_ANTENNA_PATTERN, "RX-ANTENNA-PATTERN"); + PFLAG(TX_ANTENNA_PATTERN, "TX-ANTENNA-PATTERN"); + + p += scnprintf(p, bufsz + buf - p, "RX MCS: %.4x\n", + le16_to_cpu(vhtc->vht_mcs.rx_mcs_map)); + if (vhtc->vht_mcs.rx_highest) + p += scnprintf(p, bufsz + buf - p, + "MCS RX highest: %d Mbps\n", + le16_to_cpu(vhtc->vht_mcs.rx_highest)); + p += scnprintf(p, bufsz + buf - p, "TX MCS: %.4x\n", + le16_to_cpu(vhtc->vht_mcs.tx_mcs_map)); + if (vhtc->vht_mcs.tx_highest) + p += scnprintf(p, bufsz + buf - p, + "MCS TX highest: %d Mbps\n", + le16_to_cpu(vhtc->vht_mcs.tx_highest)); +#undef PFLAG + } + + ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return ret; +} +LINK_STA_OPS(vht_capa); + +static ssize_t link_sta_he_capa_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char *buf, *p; + size_t buf_sz = PAGE_SIZE; + struct link_sta_info *link_sta = file->private_data; + struct ieee80211_sta_he_cap *hec = &link_sta->pub->he_cap; + struct ieee80211_he_mcs_nss_supp *nss = &hec->he_mcs_nss_supp; + u8 ppe_size; + u8 *cap; + int i; + ssize_t ret; + + buf = kmalloc(buf_sz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + p += scnprintf(p, buf_sz + buf - p, "HE %ssupported\n", + hec->has_he ? "" : "not "); + if (!hec->has_he) + goto out; + + cap = hec->he_cap_elem.mac_cap_info; + p += scnprintf(p, buf_sz + buf - p, + "MAC-CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n", + cap[0], cap[1], cap[2], cap[3], cap[4], cap[5]); + +#define PRINT(fmt, ...) \ + p += scnprintf(p, buf_sz + buf - p, "\t\t" fmt "\n", \ + ##__VA_ARGS__) + +#define PFLAG(t, n, a, b) \ + do { \ + if (cap[n] & IEEE80211_HE_##t##_CAP##n##_##a) \ + PRINT("%s", b); \ + } while (0) + +#define PFLAG_RANGE(t, i, n, s, m, off, fmt) \ + do { \ + u8 msk = IEEE80211_HE_##t##_CAP##i##_##n##_MASK; \ + u8 idx = ((cap[i] & msk) >> (ffs(msk) - 1)) + off; \ + PRINT(fmt, (s << idx) + (m * idx)); \ + } while (0) + +#define PFLAG_RANGE_DEFAULT(t, i, n, s, m, off, fmt, a, b) \ + do { \ + if (cap[i] == IEEE80211_HE_##t ##_CAP##i##_##n##_##a) { \ + PRINT("%s", b); \ + break; \ + } \ + PFLAG_RANGE(t, i, n, s, m, off, fmt); \ + } while (0) + + PFLAG(MAC, 0, HTC_HE, "HTC-HE"); + PFLAG(MAC, 0, TWT_REQ, "TWT-REQ"); + PFLAG(MAC, 0, TWT_RES, "TWT-RES"); + PFLAG_RANGE_DEFAULT(MAC, 0, DYNAMIC_FRAG, 0, 1, 0, + "DYNAMIC-FRAG-LEVEL-%d", NOT_SUPP, "NOT-SUPP"); + PFLAG_RANGE_DEFAULT(MAC, 0, MAX_NUM_FRAG_MSDU, 1, 0, 0, + "MAX-NUM-FRAG-MSDU-%d", UNLIMITED, "UNLIMITED"); + + PFLAG_RANGE_DEFAULT(MAC, 1, MIN_FRAG_SIZE, 128, 0, -1, + "MIN-FRAG-SIZE-%d", UNLIMITED, "UNLIMITED"); + PFLAG_RANGE_DEFAULT(MAC, 1, TF_MAC_PAD_DUR, 0, 8, 0, + "TF-MAC-PAD-DUR-%dUS", MASK, "UNKNOWN"); + PFLAG_RANGE(MAC, 1, MULTI_TID_AGG_RX_QOS, 0, 1, 1, + "MULTI-TID-AGG-RX-QOS-%d"); + + if (cap[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) { + switch (((cap[2] << 1) | (cap[1] >> 7)) & 0x3) { + case 0: + PRINT("LINK-ADAPTATION-NO-FEEDBACK"); + break; + case 1: + PRINT("LINK-ADAPTATION-RESERVED"); + break; + case 2: + PRINT("LINK-ADAPTATION-UNSOLICITED-FEEDBACK"); + break; + case 3: + PRINT("LINK-ADAPTATION-BOTH"); + break; + } + } + + PFLAG(MAC, 2, ALL_ACK, "ALL-ACK"); + PFLAG(MAC, 2, TRS, "TRS"); + PFLAG(MAC, 2, BSR, "BSR"); + PFLAG(MAC, 2, BCAST_TWT, "BCAST-TWT"); + PFLAG(MAC, 2, 32BIT_BA_BITMAP, "32BIT-BA-BITMAP"); + PFLAG(MAC, 2, MU_CASCADING, "MU-CASCADING"); + PFLAG(MAC, 2, ACK_EN, "ACK-EN"); + + PFLAG(MAC, 3, OMI_CONTROL, "OMI-CONTROL"); + PFLAG(MAC, 3, OFDMA_RA, "OFDMA-RA"); + + switch (cap[3] & IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) { + case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_0: + PRINT("MAX-AMPDU-LEN-EXP-USE-EXT-0"); + break; + case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1: + PRINT("MAX-AMPDU-LEN-EXP-VHT-EXT-1"); + break; + case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2: + PRINT("MAX-AMPDU-LEN-EXP-VHT-EXT-2"); + break; + case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3: + PRINT("MAX-AMPDU-LEN-EXP-VHT-EXT-3"); + break; + } + + PFLAG(MAC, 3, AMSDU_FRAG, "AMSDU-FRAG"); + PFLAG(MAC, 3, FLEX_TWT_SCHED, "FLEX-TWT-SCHED"); + PFLAG(MAC, 3, RX_CTRL_FRAME_TO_MULTIBSS, "RX-CTRL-FRAME-TO-MULTIBSS"); + + PFLAG(MAC, 4, BSRP_BQRP_A_MPDU_AGG, "BSRP-BQRP-A-MPDU-AGG"); + PFLAG(MAC, 4, QTP, "QTP"); + PFLAG(MAC, 4, BQR, "BQR"); + PFLAG(MAC, 4, PSR_RESP, "PSR-RESP"); + PFLAG(MAC, 4, NDP_FB_REP, "NDP-FB-REP"); + PFLAG(MAC, 4, OPS, "OPS"); + PFLAG(MAC, 4, AMSDU_IN_AMPDU, "AMSDU-IN-AMPDU"); + + PRINT("MULTI-TID-AGG-TX-QOS-%d", ((cap[5] << 1) | (cap[4] >> 7)) & 0x7); + + PFLAG(MAC, 5, SUBCHAN_SELECTIVE_TRANSMISSION, + "SUBCHAN-SELECTIVE-TRANSMISSION"); + PFLAG(MAC, 5, UL_2x996_TONE_RU, "UL-2x996-TONE-RU"); + PFLAG(MAC, 5, OM_CTRL_UL_MU_DATA_DIS_RX, "OM-CTRL-UL-MU-DATA-DIS-RX"); + PFLAG(MAC, 5, HE_DYNAMIC_SM_PS, "HE-DYNAMIC-SM-PS"); + PFLAG(MAC, 5, PUNCTURED_SOUNDING, "PUNCTURED-SOUNDING"); + PFLAG(MAC, 5, HT_VHT_TRIG_FRAME_RX, "HT-VHT-TRIG-FRAME-RX"); + + cap = hec->he_cap_elem.phy_cap_info; + p += scnprintf(p, buf_sz + buf - p, + "PHY CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n", + cap[0], cap[1], cap[2], cap[3], cap[4], cap[5], cap[6], + cap[7], cap[8], cap[9], cap[10]); + + PFLAG(PHY, 0, CHANNEL_WIDTH_SET_40MHZ_IN_2G, + "CHANNEL-WIDTH-SET-40MHZ-IN-2G"); + PFLAG(PHY, 0, CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G, + "CHANNEL-WIDTH-SET-40MHZ-80MHZ-IN-5G"); + PFLAG(PHY, 0, CHANNEL_WIDTH_SET_160MHZ_IN_5G, + "CHANNEL-WIDTH-SET-160MHZ-IN-5G"); + PFLAG(PHY, 0, CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, + "CHANNEL-WIDTH-SET-80PLUS80-MHZ-IN-5G"); + PFLAG(PHY, 0, CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G, + "CHANNEL-WIDTH-SET-RU-MAPPING-IN-2G"); + PFLAG(PHY, 0, CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G, + "CHANNEL-WIDTH-SET-RU-MAPPING-IN-5G"); + + switch (cap[1] & IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK) { + case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ: + PRINT("PREAMBLE-PUNC-RX-80MHZ-ONLY-SECOND-20MHZ"); + break; + case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ: + PRINT("PREAMBLE-PUNC-RX-80MHZ-ONLY-SECOND-40MHZ"); + break; + case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ: + PRINT("PREAMBLE-PUNC-RX-160MHZ-ONLY-SECOND-20MHZ"); + break; + case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ: + PRINT("PREAMBLE-PUNC-RX-160MHZ-ONLY-SECOND-40MHZ"); + break; + } + + PFLAG(PHY, 1, DEVICE_CLASS_A, + "IEEE80211-HE-PHY-CAP1-DEVICE-CLASS-A"); + PFLAG(PHY, 1, LDPC_CODING_IN_PAYLOAD, + "LDPC-CODING-IN-PAYLOAD"); + PFLAG(PHY, 1, HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US, + "HY-CAP1-HE-LTF-AND-GI-FOR-HE-PPDUS-0-8US"); + PRINT("MIDAMBLE-RX-MAX-NSTS-%d", ((cap[2] << 1) | (cap[1] >> 7)) & 0x3); + + PFLAG(PHY, 2, NDP_4x_LTF_AND_3_2US, "NDP-4X-LTF-AND-3-2US"); + PFLAG(PHY, 2, STBC_TX_UNDER_80MHZ, "STBC-TX-UNDER-80MHZ"); + PFLAG(PHY, 2, STBC_RX_UNDER_80MHZ, "STBC-RX-UNDER-80MHZ"); + PFLAG(PHY, 2, DOPPLER_TX, "DOPPLER-TX"); + PFLAG(PHY, 2, DOPPLER_RX, "DOPPLER-RX"); + PFLAG(PHY, 2, UL_MU_FULL_MU_MIMO, "UL-MU-FULL-MU-MIMO"); + PFLAG(PHY, 2, UL_MU_PARTIAL_MU_MIMO, "UL-MU-PARTIAL-MU-MIMO"); + + switch (cap[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK) { + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM: + PRINT("DCM-MAX-CONST-TX-NO-DCM"); + break; + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK: + PRINT("DCM-MAX-CONST-TX-BPSK"); + break; + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK: + PRINT("DCM-MAX-CONST-TX-QPSK"); + break; + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM: + PRINT("DCM-MAX-CONST-TX-16-QAM"); + break; + } + + PFLAG(PHY, 3, DCM_MAX_TX_NSS_1, "DCM-MAX-TX-NSS-1"); + PFLAG(PHY, 3, DCM_MAX_TX_NSS_2, "DCM-MAX-TX-NSS-2"); + + switch (cap[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK) { + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM: + PRINT("DCM-MAX-CONST-RX-NO-DCM"); + break; + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK: + PRINT("DCM-MAX-CONST-RX-BPSK"); + break; + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK: + PRINT("DCM-MAX-CONST-RX-QPSK"); + break; + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM: + PRINT("DCM-MAX-CONST-RX-16-QAM"); + break; + } + + PFLAG(PHY, 3, DCM_MAX_RX_NSS_1, "DCM-MAX-RX-NSS-1"); + PFLAG(PHY, 3, DCM_MAX_RX_NSS_2, "DCM-MAX-RX-NSS-2"); + PFLAG(PHY, 3, RX_PARTIAL_BW_SU_IN_20MHZ_MU, + "RX-PARTIAL-BW-SU-IN-20MHZ-MU"); + PFLAG(PHY, 3, SU_BEAMFORMER, "SU-BEAMFORMER"); + + PFLAG(PHY, 4, SU_BEAMFORMEE, "SU-BEAMFORMEE"); + PFLAG(PHY, 4, MU_BEAMFORMER, "MU-BEAMFORMER"); + + PFLAG_RANGE(PHY, 4, BEAMFORMEE_MAX_STS_UNDER_80MHZ, 0, 1, 4, + "BEAMFORMEE-MAX-STS-UNDER-%d"); + PFLAG_RANGE(PHY, 4, BEAMFORMEE_MAX_STS_ABOVE_80MHZ, 0, 1, 4, + "BEAMFORMEE-MAX-STS-ABOVE-%d"); + + PFLAG_RANGE(PHY, 5, BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ, 0, 1, 1, + "NUM-SND-DIM-UNDER-80MHZ-%d"); + PFLAG_RANGE(PHY, 5, BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ, 0, 1, 1, + "NUM-SND-DIM-ABOVE-80MHZ-%d"); + PFLAG(PHY, 5, NG16_SU_FEEDBACK, "NG16-SU-FEEDBACK"); + PFLAG(PHY, 5, NG16_MU_FEEDBACK, "NG16-MU-FEEDBACK"); + + PFLAG(PHY, 6, CODEBOOK_SIZE_42_SU, "CODEBOOK-SIZE-42-SU"); + PFLAG(PHY, 6, CODEBOOK_SIZE_75_MU, "CODEBOOK-SIZE-75-MU"); + PFLAG(PHY, 6, TRIG_SU_BEAMFORMING_FB, "TRIG-SU-BEAMFORMING-FB"); + PFLAG(PHY, 6, TRIG_MU_BEAMFORMING_PARTIAL_BW_FB, + "MU-BEAMFORMING-PARTIAL-BW-FB"); + PFLAG(PHY, 6, TRIG_CQI_FB, "TRIG-CQI-FB"); + PFLAG(PHY, 6, PARTIAL_BW_EXT_RANGE, "PARTIAL-BW-EXT-RANGE"); + PFLAG(PHY, 6, PARTIAL_BANDWIDTH_DL_MUMIMO, + "PARTIAL-BANDWIDTH-DL-MUMIMO"); + PFLAG(PHY, 6, PPE_THRESHOLD_PRESENT, "PPE-THRESHOLD-PRESENT"); + + PFLAG(PHY, 7, PSR_BASED_SR, "PSR-BASED-SR"); + PFLAG(PHY, 7, POWER_BOOST_FACTOR_SUPP, "POWER-BOOST-FACTOR-SUPP"); + PFLAG(PHY, 7, HE_SU_MU_PPDU_4XLTF_AND_08_US_GI, + "HE-SU-MU-PPDU-4XLTF-AND-08-US-GI"); + PFLAG_RANGE(PHY, 7, MAX_NC, 0, 1, 1, "MAX-NC-%d"); + PFLAG(PHY, 7, STBC_TX_ABOVE_80MHZ, "STBC-TX-ABOVE-80MHZ"); + PFLAG(PHY, 7, STBC_RX_ABOVE_80MHZ, "STBC-RX-ABOVE-80MHZ"); + + PFLAG(PHY, 8, HE_ER_SU_PPDU_4XLTF_AND_08_US_GI, + "HE-ER-SU-PPDU-4XLTF-AND-08-US-GI"); + PFLAG(PHY, 8, 20MHZ_IN_40MHZ_HE_PPDU_IN_2G, + "20MHZ-IN-40MHZ-HE-PPDU-IN-2G"); + PFLAG(PHY, 8, 20MHZ_IN_160MHZ_HE_PPDU, "20MHZ-IN-160MHZ-HE-PPDU"); + PFLAG(PHY, 8, 80MHZ_IN_160MHZ_HE_PPDU, "80MHZ-IN-160MHZ-HE-PPDU"); + PFLAG(PHY, 8, HE_ER_SU_1XLTF_AND_08_US_GI, + "HE-ER-SU-1XLTF-AND-08-US-GI"); + PFLAG(PHY, 8, MIDAMBLE_RX_TX_2X_AND_1XLTF, + "MIDAMBLE-RX-TX-2X-AND-1XLTF"); + + switch (cap[8] & IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK) { + case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242: + PRINT("DCM-MAX-RU-242"); + break; + case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484: + PRINT("DCM-MAX-RU-484"); + break; + case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996: + PRINT("DCM-MAX-RU-996"); + break; + case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996: + PRINT("DCM-MAX-RU-2x996"); + break; + } + + PFLAG(PHY, 9, LONGER_THAN_16_SIGB_OFDM_SYM, + "LONGER-THAN-16-SIGB-OFDM-SYM"); + PFLAG(PHY, 9, NON_TRIGGERED_CQI_FEEDBACK, + "NON-TRIGGERED-CQI-FEEDBACK"); + PFLAG(PHY, 9, TX_1024_QAM_LESS_THAN_242_TONE_RU, + "TX-1024-QAM-LESS-THAN-242-TONE-RU"); + PFLAG(PHY, 9, RX_1024_QAM_LESS_THAN_242_TONE_RU, + "RX-1024-QAM-LESS-THAN-242-TONE-RU"); + PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB, + "RX-FULL-BW-SU-USING-MU-WITH-COMP-SIGB"); + PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB, + "RX-FULL-BW-SU-USING-MU-WITH-NON-COMP-SIGB"); + + switch (u8_get_bits(cap[9], + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)) { + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US: + PRINT("NOMINAL-PACKET-PADDING-0US"); + break; + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US: + PRINT("NOMINAL-PACKET-PADDING-8US"); + break; + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US: + PRINT("NOMINAL-PACKET-PADDING-16US"); + break; + } + +#undef PFLAG_RANGE_DEFAULT +#undef PFLAG_RANGE +#undef PFLAG + +#define PRINT_NSS_SUPP(f, n) \ + do { \ + int _i; \ + u16 v = le16_to_cpu(nss->f); \ + p += scnprintf(p, buf_sz + buf - p, n ": %#.4x\n", v); \ + for (_i = 0; _i < 8; _i += 2) { \ + switch ((v >> _i) & 0x3) { \ + case 0: \ + PRINT(n "-%d-SUPPORT-0-7", _i / 2); \ + break; \ + case 1: \ + PRINT(n "-%d-SUPPORT-0-9", _i / 2); \ + break; \ + case 2: \ + PRINT(n "-%d-SUPPORT-0-11", _i / 2); \ + break; \ + case 3: \ + PRINT(n "-%d-NOT-SUPPORTED", _i / 2); \ + break; \ + } \ + } \ + } while (0) + + PRINT_NSS_SUPP(rx_mcs_80, "RX-MCS-80"); + PRINT_NSS_SUPP(tx_mcs_80, "TX-MCS-80"); + + if (cap[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) { + PRINT_NSS_SUPP(rx_mcs_160, "RX-MCS-160"); + PRINT_NSS_SUPP(tx_mcs_160, "TX-MCS-160"); + } + + if (cap[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) { + PRINT_NSS_SUPP(rx_mcs_80p80, "RX-MCS-80P80"); + PRINT_NSS_SUPP(tx_mcs_80p80, "TX-MCS-80P80"); + } + +#undef PRINT_NSS_SUPP +#undef PRINT + + if (!(cap[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)) + goto out; + + p += scnprintf(p, buf_sz + buf - p, "PPE-THRESHOLDS: %#.2x", + hec->ppe_thres[0]); + + ppe_size = ieee80211_he_ppe_size(hec->ppe_thres[0], cap); + for (i = 1; i < ppe_size; i++) { + p += scnprintf(p, buf_sz + buf - p, " %#.2x", + hec->ppe_thres[i]); + } + p += scnprintf(p, buf_sz + buf - p, "\n"); + +out: + ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return ret; +} +LINK_STA_OPS(he_capa); + +static ssize_t link_sta_eht_capa_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char *buf, *p; + size_t buf_sz = PAGE_SIZE; + struct link_sta_info *link_sta = file->private_data; + struct ieee80211_sta_eht_cap *bec = &link_sta->pub->eht_cap; + struct ieee80211_eht_cap_elem_fixed *fixed = &bec->eht_cap_elem; + struct ieee80211_eht_mcs_nss_supp *nss = &bec->eht_mcs_nss_supp; + u8 *cap; + int i; + ssize_t ret; + static const char *mcs_desc[] = { "0-7", "8-9", "10-11", "12-13"}; + + buf = kmalloc(buf_sz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + p += scnprintf(p, buf_sz + buf - p, "EHT %ssupported\n", + bec->has_eht ? "" : "not "); + if (!bec->has_eht) + goto out; + + p += scnprintf(p, buf_sz + buf - p, + "MAC-CAP: %#.2x %#.2x\n", + fixed->mac_cap_info[0], fixed->mac_cap_info[1]); + p += scnprintf(p, buf_sz + buf - p, + "PHY-CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n", + fixed->phy_cap_info[0], fixed->phy_cap_info[1], + fixed->phy_cap_info[2], fixed->phy_cap_info[3], + fixed->phy_cap_info[4], fixed->phy_cap_info[5], + fixed->phy_cap_info[6], fixed->phy_cap_info[7], + fixed->phy_cap_info[8]); + +#define PRINT(fmt, ...) \ + p += scnprintf(p, buf_sz + buf - p, "\t\t" fmt "\n", \ + ##__VA_ARGS__) + +#define PFLAG(t, n, a, b) \ + do { \ + if (cap[n] & IEEE80211_EHT_##t##_CAP##n##_##a) \ + PRINT("%s", b); \ + } while (0) + + cap = fixed->mac_cap_info; + PFLAG(MAC, 0, EPCS_PRIO_ACCESS, "EPCS-PRIO-ACCESS"); + PFLAG(MAC, 0, OM_CONTROL, "OM-CONTROL"); + PFLAG(MAC, 0, TRIG_TXOP_SHARING_MODE1, "TRIG-TXOP-SHARING-MODE1"); + PFLAG(MAC, 0, TRIG_TXOP_SHARING_MODE2, "TRIG-TXOP-SHARING-MODE2"); + PFLAG(MAC, 0, RESTRICTED_TWT, "RESTRICTED-TWT"); + PFLAG(MAC, 0, SCS_TRAFFIC_DESC, "SCS-TRAFFIC-DESC"); + switch ((cap[0] & 0xc0) >> 6) { + case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_3895: + PRINT("MAX-MPDU-LEN: 3985"); + break; + case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991: + PRINT("MAX-MPDU-LEN: 7991"); + break; + case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454: + PRINT("MAX-MPDU-LEN: 11454"); + break; + } + + cap = fixed->phy_cap_info; + PFLAG(PHY, 0, 320MHZ_IN_6GHZ, "320MHZ-IN-6GHZ"); + PFLAG(PHY, 0, 242_TONE_RU_GT20MHZ, "242-TONE-RU-GT20MHZ"); + PFLAG(PHY, 0, NDP_4_EHT_LFT_32_GI, "NDP-4-EHT-LFT-32-GI"); + PFLAG(PHY, 0, PARTIAL_BW_UL_MU_MIMO, "PARTIAL-BW-UL-MU-MIMO"); + PFLAG(PHY, 0, SU_BEAMFORMER, "SU-BEAMFORMER"); + PFLAG(PHY, 0, SU_BEAMFORMEE, "SU-BEAMFORMEE"); + i = cap[0] >> 7; + i |= (cap[1] & 0x3) << 1; + PRINT("BEAMFORMEE-80-NSS: %i", i); + PRINT("BEAMFORMEE-160-NSS: %i", (cap[1] >> 2) & 0x7); + PRINT("BEAMFORMEE-320-NSS: %i", (cap[1] >> 5) & 0x7); + PRINT("SOUNDING-DIM-80-NSS: %i", (cap[2] & 0x7)); + PRINT("SOUNDING-DIM-160-NSS: %i", (cap[2] >> 3) & 0x7); + i = cap[2] >> 6; + i |= (cap[3] & 0x1) << 3; + PRINT("SOUNDING-DIM-320-NSS: %i", i); + + PFLAG(PHY, 3, NG_16_SU_FEEDBACK, "NG-16-SU-FEEDBACK"); + PFLAG(PHY, 3, NG_16_MU_FEEDBACK, "NG-16-MU-FEEDBACK"); + PFLAG(PHY, 3, CODEBOOK_4_2_SU_FDBK, "CODEBOOK-4-2-SU-FDBK"); + PFLAG(PHY, 3, CODEBOOK_7_5_MU_FDBK, "CODEBOOK-7-5-MU-FDBK"); + PFLAG(PHY, 3, TRIG_SU_BF_FDBK, "TRIG-SU-BF-FDBK"); + PFLAG(PHY, 3, TRIG_MU_BF_PART_BW_FDBK, "TRIG-MU-BF-PART-BW-FDBK"); + PFLAG(PHY, 3, TRIG_CQI_FDBK, "TRIG-CQI-FDBK"); + + PFLAG(PHY, 4, PART_BW_DL_MU_MIMO, "PART-BW-DL-MU-MIMO"); + PFLAG(PHY, 4, PSR_SR_SUPP, "PSR-SR-SUPP"); + PFLAG(PHY, 4, POWER_BOOST_FACT_SUPP, "POWER-BOOST-FACT-SUPP"); + PFLAG(PHY, 4, EHT_MU_PPDU_4_EHT_LTF_08_GI, "EHT-MU-PPDU-4-EHT-LTF-08-GI"); + PRINT("MAX_NC: %i", cap[4] >> 4); + + PFLAG(PHY, 5, NON_TRIG_CQI_FEEDBACK, "NON-TRIG-CQI-FEEDBACK"); + PFLAG(PHY, 5, TX_LESS_242_TONE_RU_SUPP, "TX-LESS-242-TONE-RU-SUPP"); + PFLAG(PHY, 5, RX_LESS_242_TONE_RU_SUPP, "RX-LESS-242-TONE-RU-SUPP"); + PFLAG(PHY, 5, PPE_THRESHOLD_PRESENT, "PPE_THRESHOLD_PRESENT"); + switch (cap[5] >> 4 & 0x3) { + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US: + PRINT("NOMINAL_PKT_PAD: 0us"); + break; + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US: + PRINT("NOMINAL_PKT_PAD: 8us"); + break; + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US: + PRINT("NOMINAL_PKT_PAD: 16us"); + break; + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US: + PRINT("NOMINAL_PKT_PAD: 20us"); + break; + } + i = cap[5] >> 6; + i |= cap[6] & 0x7; + PRINT("MAX-NUM-SUPP-EHT-LTF: %i", i); + PFLAG(PHY, 5, SUPP_EXTRA_EHT_LTF, "SUPP-EXTRA-EHT-LTF"); + + i = (cap[6] >> 3) & 0xf; + PRINT("MCS15-SUPP-MASK: %i", i); + PFLAG(PHY, 6, EHT_DUP_6GHZ_SUPP, "EHT-DUP-6GHZ-SUPP"); + + PFLAG(PHY, 7, 20MHZ_STA_RX_NDP_WIDER_BW, "20MHZ-STA-RX-NDP-WIDER-BW"); + PFLAG(PHY, 7, NON_OFDMA_UL_MU_MIMO_80MHZ, "NON-OFDMA-UL-MU-MIMO-80MHZ"); + PFLAG(PHY, 7, NON_OFDMA_UL_MU_MIMO_160MHZ, "NON-OFDMA-UL-MU-MIMO-160MHZ"); + PFLAG(PHY, 7, NON_OFDMA_UL_MU_MIMO_320MHZ, "NON-OFDMA-UL-MU-MIMO-320MHZ"); + PFLAG(PHY, 7, MU_BEAMFORMER_80MHZ, "MU-BEAMFORMER-80MHZ"); + PFLAG(PHY, 7, MU_BEAMFORMER_160MHZ, "MU-BEAMFORMER-160MHZ"); + PFLAG(PHY, 7, MU_BEAMFORMER_320MHZ, "MU-BEAMFORMER-320MHZ"); + PFLAG(PHY, 7, TB_SOUNDING_FDBK_RATE_LIMIT, "TB-SOUNDING-FDBK-RATE-LIMIT"); + + PFLAG(PHY, 8, RX_1024QAM_WIDER_BW_DL_OFDMA, "RX-1024QAM-WIDER-BW-DL-OFDMA"); + PFLAG(PHY, 8, RX_4096QAM_WIDER_BW_DL_OFDMA, "RX-4096QAM-WIDER-BW-DL-OFDMA"); + +#undef PFLAG + + PRINT(""); /* newline */ + if (!(link_sta->pub->he_cap.he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) { + u8 *mcs_vals = (u8 *)(&nss->only_20mhz); + + for (i = 0; i < 4; i++) + PRINT("EHT bw=20 MHz, max NSS for MCS %s: Rx=%u, Tx=%u", + mcs_desc[i], + mcs_vals[i] & 0xf, mcs_vals[i] >> 4); + } else { + u8 *mcs_vals = (u8 *)(&nss->bw._80); + + for (i = 0; i < 3; i++) + PRINT("EHT bw <= 80 MHz, max NSS for MCS %s: Rx=%u, Tx=%u", + mcs_desc[i + 1], + mcs_vals[i] & 0xf, mcs_vals[i] >> 4); + + mcs_vals = (u8 *)(&nss->bw._160); + for (i = 0; i < 3; i++) + PRINT("EHT bw <= 160 MHz, max NSS for MCS %s: Rx=%u, Tx=%u", + mcs_desc[i + 1], + mcs_vals[i] & 0xf, mcs_vals[i] >> 4); + + mcs_vals = (u8 *)(&nss->bw._320); + for (i = 0; i < 3; i++) + PRINT("EHT bw <= 320 MHz, max NSS for MCS %s: Rx=%u, Tx=%u", + mcs_desc[i + 1], + mcs_vals[i] & 0xf, mcs_vals[i] >> 4); + } + + if (cap[5] & IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) { + u8 ppe_size = ieee80211_eht_ppe_size(bec->eht_ppe_thres[0], cap); + + p += scnprintf(p, buf_sz + buf - p, "EHT PPE Thresholds: "); + for (i = 0; i < ppe_size; i++) + p += scnprintf(p, buf_sz + buf - p, "0x%02x ", + bec->eht_ppe_thres[i]); + PRINT(""); /* newline */ + } + +out: + ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return ret; +} +LINK_STA_OPS(eht_capa); + +#define DEBUGFS_ADD(name) \ + debugfs_create_file(#name, 0400, \ + sta->debugfs_dir, sta, &sta_ ##name## _ops) + +#define DEBUGFS_ADD_COUNTER(name, field) \ + debugfs_create_ulong(#name, 0400, sta->debugfs_dir, &sta->field); + +void ieee80211_sta_debugfs_add(struct sta_info *sta) +{ + struct ieee80211_local *local = sta->local; + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations; + u8 mac[3*ETH_ALEN]; + + if (!stations_dir) + return; + + snprintf(mac, sizeof(mac), "%pM", sta->sta.addr); + + /* + * This might fail due to a race condition: + * When mac80211 unlinks a station, the debugfs entries + * remain, but it is already possible to link a new + * station with the same address which triggers adding + * it to debugfs; therefore, if the old station isn't + * destroyed quickly enough the old station's debugfs + * dir might still be around. + */ + sta->debugfs_dir = debugfs_create_dir(mac, stations_dir); + + DEBUGFS_ADD(flags); + DEBUGFS_ADD(aid); + DEBUGFS_ADD(num_ps_buf_frames); + DEBUGFS_ADD(last_seq_ctrl); + DEBUGFS_ADD(agg_status); + /* FIXME: Kept here as the statistics are only done on the deflink */ + DEBUGFS_ADD_COUNTER(tx_filtered, deflink.status_stats.filtered); + + DEBUGFS_ADD(aqm); + DEBUGFS_ADD(airtime); + + if (wiphy_ext_feature_isset(local->hw.wiphy, + NL80211_EXT_FEATURE_AQL)) + DEBUGFS_ADD(aql); + + debugfs_create_xul("driver_buffered_tids", 0400, sta->debugfs_dir, + &sta->driver_buffered_tids); + + drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs_dir); +} + +void ieee80211_sta_debugfs_remove(struct sta_info *sta) +{ + debugfs_remove_recursive(sta->debugfs_dir); + sta->debugfs_dir = NULL; +} + +#undef DEBUGFS_ADD +#undef DEBUGFS_ADD_COUNTER + +#define DEBUGFS_ADD(name) \ + debugfs_create_file(#name, 0400, \ + link_sta->debugfs_dir, link_sta, &link_sta_ ##name## _ops) +#define DEBUGFS_ADD_COUNTER(name, field) \ + debugfs_create_ulong(#name, 0400, link_sta->debugfs_dir, &link_sta->field) + +void ieee80211_link_sta_debugfs_add(struct link_sta_info *link_sta) +{ + if (WARN_ON(!link_sta->sta->debugfs_dir)) + return; + + /* For non-MLO, leave the files in the main directory. */ + if (link_sta->sta->sta.valid_links) { + char link_dir_name[10]; + + snprintf(link_dir_name, sizeof(link_dir_name), + "link-%d", link_sta->link_id); + + link_sta->debugfs_dir = + debugfs_create_dir(link_dir_name, + link_sta->sta->debugfs_dir); + + DEBUGFS_ADD(addr); + } else { + if (WARN_ON(link_sta != &link_sta->sta->deflink)) + return; + + link_sta->debugfs_dir = link_sta->sta->debugfs_dir; + } + + DEBUGFS_ADD(ht_capa); + DEBUGFS_ADD(vht_capa); + DEBUGFS_ADD(he_capa); + DEBUGFS_ADD(eht_capa); + + DEBUGFS_ADD_COUNTER(rx_duplicates, rx_stats.num_duplicates); + DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments); +} + +void ieee80211_link_sta_debugfs_remove(struct link_sta_info *link_sta) +{ + if (!link_sta->debugfs_dir || !link_sta->sta->debugfs_dir) { + link_sta->debugfs_dir = NULL; + return; + } + + if (link_sta->debugfs_dir == link_sta->sta->debugfs_dir) { + WARN_ON(link_sta != &link_sta->sta->deflink); + link_sta->sta->debugfs_dir = NULL; + return; + } + + debugfs_remove_recursive(link_sta->debugfs_dir); + link_sta->debugfs_dir = NULL; +} + +void ieee80211_link_sta_debugfs_drv_add(struct link_sta_info *link_sta) +{ + if (WARN_ON(!link_sta->debugfs_dir)) + return; + + drv_link_sta_add_debugfs(link_sta->sta->local, link_sta->sta->sdata, + link_sta->pub, link_sta->debugfs_dir); +} + +void ieee80211_link_sta_debugfs_drv_remove(struct link_sta_info *link_sta) +{ + if (!link_sta->debugfs_dir) + return; + + if (WARN_ON(link_sta->debugfs_dir == link_sta->sta->debugfs_dir)) + return; + + /* Recreate the directory excluding the driver data */ + debugfs_remove_recursive(link_sta->debugfs_dir); + link_sta->debugfs_dir = NULL; + + ieee80211_link_sta_debugfs_add(link_sta); +} diff --git a/net/mac80211/debugfs_sta.h b/net/mac80211/debugfs_sta.h new file mode 100644 index 0000000000..cde8148bdb --- /dev/null +++ b/net/mac80211/debugfs_sta.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __MAC80211_DEBUGFS_STA_H +#define __MAC80211_DEBUGFS_STA_H + +#include "sta_info.h" + +#ifdef CONFIG_MAC80211_DEBUGFS +void ieee80211_sta_debugfs_add(struct sta_info *sta); +void ieee80211_sta_debugfs_remove(struct sta_info *sta); + +void ieee80211_link_sta_debugfs_add(struct link_sta_info *link_sta); +void ieee80211_link_sta_debugfs_remove(struct link_sta_info *link_sta); + +void ieee80211_link_sta_debugfs_drv_add(struct link_sta_info *link_sta); +void ieee80211_link_sta_debugfs_drv_remove(struct link_sta_info *link_sta); +#else +static inline void ieee80211_sta_debugfs_add(struct sta_info *sta) {} +static inline void ieee80211_sta_debugfs_remove(struct sta_info *sta) {} + +static inline void ieee80211_link_sta_debugfs_add(struct link_sta_info *link_sta) {} +static inline void ieee80211_link_sta_debugfs_remove(struct link_sta_info *link_sta) {} + +static inline void ieee80211_link_sta_debugfs_drv_add(struct link_sta_info *link_sta) {} +static inline void ieee80211_link_sta_debugfs_drv_remove(struct link_sta_info *link_sta) {} +#endif + +#endif /* __MAC80211_DEBUGFS_STA_H */ diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c new file mode 100644 index 0000000000..f8af0c3d40 --- /dev/null +++ b/net/mac80211/driver-ops.c @@ -0,0 +1,578 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2015 Intel Deutschland GmbH + * Copyright (C) 2022-2023 Intel Corporation + */ +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "trace.h" +#include "driver-ops.h" +#include "debugfs_sta.h" +#include "debugfs_netdev.h" + +int drv_start(struct ieee80211_local *local) +{ + int ret; + + might_sleep(); + + if (WARN_ON(local->started)) + return -EALREADY; + + trace_drv_start(local); + local->started = true; + /* allow rx frames */ + smp_mb(); + ret = local->ops->start(&local->hw); + trace_drv_return_int(local, ret); + + if (ret) + local->started = false; + + return ret; +} + +void drv_stop(struct ieee80211_local *local) +{ + might_sleep(); + + if (WARN_ON(!local->started)) + return; + + trace_drv_stop(local); + local->ops->stop(&local->hw); + trace_drv_return_void(local); + + /* sync away all work on the tasklet before clearing started */ + tasklet_disable(&local->tasklet); + tasklet_enable(&local->tasklet); + + barrier(); + + local->started = false; +} + +int drv_add_interface(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + int ret; + + might_sleep(); + + if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + (sdata->vif.type == NL80211_IFTYPE_MONITOR && + !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) && + !(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE)))) + return -EINVAL; + + trace_drv_add_interface(local, sdata); + ret = local->ops->add_interface(&local->hw, &sdata->vif); + trace_drv_return_int(local, ret); + + if (ret == 0) + sdata->flags |= IEEE80211_SDATA_IN_DRIVER; + + return ret; +} + +int drv_change_interface(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum nl80211_iftype type, bool p2p) +{ + int ret; + + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_change_interface(local, sdata, type, p2p); + ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p); + trace_drv_return_int(local, ret); + return ret; +} + +void drv_remove_interface(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_remove_interface(local, sdata); + local->ops->remove_interface(&local->hw, &sdata->vif); + sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER; + trace_drv_return_void(local); +} + +__must_check +int drv_sta_state(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + int ret = 0; + + might_sleep(); + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state); + if (local->ops->sta_state) { + ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta, + old_state, new_state); + } else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) { + ret = drv_sta_add(local, sdata, &sta->sta); + if (ret == 0) { + sta->uploaded = true; + if (rcu_access_pointer(sta->sta.rates)) + drv_sta_rate_tbl_update(local, sdata, &sta->sta); + } + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) { + drv_sta_remove(local, sdata, &sta->sta); + } + trace_drv_return_int(local, ret); + return ret; +} + +__must_check +int drv_sta_set_txpwr(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct sta_info *sta) +{ + int ret = -EOPNOTSUPP; + + might_sleep(); + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_sta_set_txpwr(local, sdata, &sta->sta); + if (local->ops->sta_set_txpwr) + ret = local->ops->sta_set_txpwr(&local->hw, &sdata->vif, + &sta->sta); + trace_drv_return_int(local, ret); + return ret; +} + +void drv_sta_rc_update(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, u32 changed) +{ + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return; + + WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED && + (sdata->vif.type != NL80211_IFTYPE_ADHOC && + sdata->vif.type != NL80211_IFTYPE_MESH_POINT)); + + trace_drv_sta_rc_update(local, sdata, sta, changed); + if (local->ops->sta_rc_update) + local->ops->sta_rc_update(&local->hw, &sdata->vif, + sta, changed); + + trace_drv_return_void(local); +} + +int drv_conf_tx(struct ieee80211_local *local, + struct ieee80211_link_data *link, u16 ac, + const struct ieee80211_tx_queue_params *params) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + int ret = -EOPNOTSUPP; + + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + if (sdata->vif.active_links && + !(sdata->vif.active_links & BIT(link->link_id))) + return 0; + + if (params->cw_min == 0 || params->cw_min > params->cw_max) { + /* + * If we can't configure hardware anyway, don't warn. We may + * never have initialized the CW parameters. + */ + WARN_ONCE(local->ops->conf_tx, + "%s: invalid CW_min/CW_max: %d/%d\n", + sdata->name, params->cw_min, params->cw_max); + return -EINVAL; + } + + trace_drv_conf_tx(local, sdata, link->link_id, ac, params); + if (local->ops->conf_tx) + ret = local->ops->conf_tx(&local->hw, &sdata->vif, + link->link_id, ac, params); + trace_drv_return_int(local, ret); + return ret; +} + +u64 drv_get_tsf(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + u64 ret = -1ULL; + + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return ret; + + trace_drv_get_tsf(local, sdata); + if (local->ops->get_tsf) + ret = local->ops->get_tsf(&local->hw, &sdata->vif); + trace_drv_return_u64(local, ret); + return ret; +} + +void drv_set_tsf(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u64 tsf) +{ + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_set_tsf(local, sdata, tsf); + if (local->ops->set_tsf) + local->ops->set_tsf(&local->hw, &sdata->vif, tsf); + trace_drv_return_void(local); +} + +void drv_offset_tsf(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + s64 offset) +{ + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_offset_tsf(local, sdata, offset); + if (local->ops->offset_tsf) + local->ops->offset_tsf(&local->hw, &sdata->vif, offset); + trace_drv_return_void(local); +} + +void drv_reset_tsf(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_reset_tsf(local, sdata); + if (local->ops->reset_tsf) + local->ops->reset_tsf(&local->hw, &sdata->vif); + trace_drv_return_void(local); +} + +int drv_assign_vif_chanctx(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx *ctx) +{ + int ret = 0; + + drv_verify_link_exists(sdata, link_conf); + if (!check_sdata_in_driver(sdata)) + return -EIO; + + if (sdata->vif.active_links && + !(sdata->vif.active_links & BIT(link_conf->link_id))) + return 0; + + trace_drv_assign_vif_chanctx(local, sdata, link_conf, ctx); + if (local->ops->assign_vif_chanctx) { + WARN_ON_ONCE(!ctx->driver_present); + ret = local->ops->assign_vif_chanctx(&local->hw, + &sdata->vif, + link_conf, + &ctx->conf); + } + trace_drv_return_int(local, ret); + + return ret; +} + +void drv_unassign_vif_chanctx(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx *ctx) +{ + might_sleep(); + + drv_verify_link_exists(sdata, link_conf); + if (!check_sdata_in_driver(sdata)) + return; + + if (sdata->vif.active_links && + !(sdata->vif.active_links & BIT(link_conf->link_id))) + return; + + trace_drv_unassign_vif_chanctx(local, sdata, link_conf, ctx); + if (local->ops->unassign_vif_chanctx) { + WARN_ON_ONCE(!ctx->driver_present); + local->ops->unassign_vif_chanctx(&local->hw, + &sdata->vif, + link_conf, + &ctx->conf); + } + trace_drv_return_void(local); +} + +int drv_switch_vif_chanctx(struct ieee80211_local *local, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, enum ieee80211_chanctx_switch_mode mode) +{ + int ret = 0; + int i; + + might_sleep(); + + if (!local->ops->switch_vif_chanctx) + return -EOPNOTSUPP; + + for (i = 0; i < n_vifs; i++) { + struct ieee80211_chanctx *new_ctx = + container_of(vifs[i].new_ctx, + struct ieee80211_chanctx, + conf); + struct ieee80211_chanctx *old_ctx = + container_of(vifs[i].old_ctx, + struct ieee80211_chanctx, + conf); + + WARN_ON_ONCE(!old_ctx->driver_present); + WARN_ON_ONCE((mode == CHANCTX_SWMODE_SWAP_CONTEXTS && + new_ctx->driver_present) || + (mode == CHANCTX_SWMODE_REASSIGN_VIF && + !new_ctx->driver_present)); + } + + trace_drv_switch_vif_chanctx(local, vifs, n_vifs, mode); + ret = local->ops->switch_vif_chanctx(&local->hw, + vifs, n_vifs, mode); + trace_drv_return_int(local, ret); + + if (!ret && mode == CHANCTX_SWMODE_SWAP_CONTEXTS) { + for (i = 0; i < n_vifs; i++) { + struct ieee80211_chanctx *new_ctx = + container_of(vifs[i].new_ctx, + struct ieee80211_chanctx, + conf); + struct ieee80211_chanctx *old_ctx = + container_of(vifs[i].old_ctx, + struct ieee80211_chanctx, + conf); + + new_ctx->driver_present = true; + old_ctx->driver_present = false; + } + } + + return ret; +} + +int drv_ampdu_action(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_ampdu_params *params) +{ + int ret = -EOPNOTSUPP; + + might_sleep(); + + if (!sdata) + return -EIO; + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_ampdu_action(local, sdata, params); + + if (local->ops->ampdu_action) + ret = local->ops->ampdu_action(&local->hw, &sdata->vif, params); + + trace_drv_return_int(local, ret); + + return ret; +} + +void drv_link_info_changed(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *info, + int link_id, u64 changed) +{ + might_sleep(); + + if (WARN_ON_ONCE(changed & (BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED) && + sdata->vif.type != NL80211_IFTYPE_AP && + sdata->vif.type != NL80211_IFTYPE_ADHOC && + sdata->vif.type != NL80211_IFTYPE_MESH_POINT && + sdata->vif.type != NL80211_IFTYPE_OCB)) + return; + + if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE || + sdata->vif.type == NL80211_IFTYPE_NAN || + (sdata->vif.type == NL80211_IFTYPE_MONITOR && + !sdata->vif.bss_conf.mu_mimo_owner && + !(changed & BSS_CHANGED_TXPOWER)))) + return; + + if (!check_sdata_in_driver(sdata)) + return; + + if (sdata->vif.active_links && + !(sdata->vif.active_links & BIT(link_id))) + return; + + trace_drv_link_info_changed(local, sdata, info, changed); + if (local->ops->link_info_changed) + local->ops->link_info_changed(&local->hw, &sdata->vif, + info, changed); + else if (local->ops->bss_info_changed) + local->ops->bss_info_changed(&local->hw, &sdata->vif, + info, changed); + trace_drv_return_void(local); +} + +int drv_set_key(struct ieee80211_local *local, + enum set_key_cmd cmd, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + int ret; + + might_sleep(); + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return -EIO; + + if (WARN_ON(key->link_id >= 0 && sdata->vif.active_links && + !(sdata->vif.active_links & BIT(key->link_id)))) + return -ENOLINK; + + trace_drv_set_key(local, cmd, sdata, sta, key); + ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key); + trace_drv_return_int(local, ret); + return ret; +} + +int drv_change_vif_links(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u16 old_links, u16 new_links, + struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]) +{ + struct ieee80211_link_data *link; + unsigned long links_to_add; + unsigned long links_to_rem; + unsigned int link_id; + int ret = -EOPNOTSUPP; + + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + if (old_links == new_links) + return 0; + + links_to_add = ~old_links & new_links; + links_to_rem = old_links & ~new_links; + + for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) { + link = rcu_access_pointer(sdata->link[link_id]); + + ieee80211_link_debugfs_drv_remove(link); + } + + trace_drv_change_vif_links(local, sdata, old_links, new_links); + if (local->ops->change_vif_links) + ret = local->ops->change_vif_links(&local->hw, &sdata->vif, + old_links, new_links, old); + trace_drv_return_int(local, ret); + + if (ret) + return ret; + + if (!local->in_reconfig) { + for_each_set_bit(link_id, &links_to_add, + IEEE80211_MLD_MAX_NUM_LINKS) { + link = rcu_access_pointer(sdata->link[link_id]); + + ieee80211_link_debugfs_drv_add(link); + } + } + + return 0; +} + +int drv_change_sta_links(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + u16 old_links, u16 new_links) +{ + struct sta_info *info = container_of(sta, struct sta_info, sta); + struct link_sta_info *link_sta; + unsigned long links_to_add; + unsigned long links_to_rem; + unsigned int link_id; + int ret = -EOPNOTSUPP; + + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + old_links &= sdata->vif.active_links; + new_links &= sdata->vif.active_links; + + if (old_links == new_links) + return 0; + + links_to_add = ~old_links & new_links; + links_to_rem = old_links & ~new_links; + + for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) { + link_sta = rcu_dereference_protected(info->link[link_id], + lockdep_is_held(&local->sta_mtx)); + + ieee80211_link_sta_debugfs_drv_remove(link_sta); + } + + trace_drv_change_sta_links(local, sdata, sta, old_links, new_links); + if (local->ops->change_sta_links) + ret = local->ops->change_sta_links(&local->hw, &sdata->vif, sta, + old_links, new_links); + trace_drv_return_int(local, ret); + + if (ret) + return ret; + + /* during reconfig don't add it to debugfs again */ + if (local->in_reconfig) + return 0; + + for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) { + link_sta = rcu_dereference_protected(info->link[link_id], + lockdep_is_held(&local->sta_mtx)); + ieee80211_link_sta_debugfs_drv_add(link_sta); + } + + return 0; +} diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h new file mode 100644 index 0000000000..2bc2fbe58f --- /dev/null +++ b/net/mac80211/driver-ops.h @@ -0,0 +1,1553 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* +* Portions of this file +* Copyright(c) 2016 Intel Deutschland GmbH +* Copyright (C) 2018 - 2019, 2021 - 2023 Intel Corporation +*/ + +#ifndef __MAC80211_DRIVER_OPS +#define __MAC80211_DRIVER_OPS + +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "trace.h" + +#define check_sdata_in_driver(sdata) ({ \ + WARN_ONCE(!sdata->local->reconfig_failure && \ + !(sdata->flags & IEEE80211_SDATA_IN_DRIVER), \ + "%s: Failed check-sdata-in-driver check, flags: 0x%x\n", \ + sdata->dev ? sdata->dev->name : sdata->name, sdata->flags); \ + !!(sdata->flags & IEEE80211_SDATA_IN_DRIVER); \ +}) + +static inline struct ieee80211_sub_if_data * +get_bss_sdata(struct ieee80211_sub_if_data *sdata) +{ + if (sdata && sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, + u.ap); + + return sdata; +} + +static inline void drv_tx(struct ieee80211_local *local, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + local->ops->tx(&local->hw, control, skb); +} + +static inline void drv_sync_rx_queues(struct ieee80211_local *local, + struct sta_info *sta) +{ + if (local->ops->sync_rx_queues) { + trace_drv_sync_rx_queues(local, sta->sdata, &sta->sta); + local->ops->sync_rx_queues(&local->hw); + trace_drv_return_void(local); + } +} + +static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata, + u32 sset, u8 *data) +{ + struct ieee80211_local *local = sdata->local; + if (local->ops->get_et_strings) { + trace_drv_get_et_strings(local, sset); + local->ops->get_et_strings(&local->hw, &sdata->vif, sset, data); + trace_drv_return_void(local); + } +} + +static inline void drv_get_et_stats(struct ieee80211_sub_if_data *sdata, + struct ethtool_stats *stats, + u64 *data) +{ + struct ieee80211_local *local = sdata->local; + if (local->ops->get_et_stats) { + trace_drv_get_et_stats(local); + local->ops->get_et_stats(&local->hw, &sdata->vif, stats, data); + trace_drv_return_void(local); + } +} + +static inline int drv_get_et_sset_count(struct ieee80211_sub_if_data *sdata, + int sset) +{ + struct ieee80211_local *local = sdata->local; + int rv = 0; + if (local->ops->get_et_sset_count) { + trace_drv_get_et_sset_count(local, sset); + rv = local->ops->get_et_sset_count(&local->hw, &sdata->vif, + sset); + trace_drv_return_int(local, rv); + } + return rv; +} + +int drv_start(struct ieee80211_local *local); +void drv_stop(struct ieee80211_local *local); + +#ifdef CONFIG_PM +static inline int drv_suspend(struct ieee80211_local *local, + struct cfg80211_wowlan *wowlan) +{ + int ret; + + might_sleep(); + + trace_drv_suspend(local); + ret = local->ops->suspend(&local->hw, wowlan); + trace_drv_return_int(local, ret); + return ret; +} + +static inline int drv_resume(struct ieee80211_local *local) +{ + int ret; + + might_sleep(); + + trace_drv_resume(local); + ret = local->ops->resume(&local->hw); + trace_drv_return_int(local, ret); + return ret; +} + +static inline void drv_set_wakeup(struct ieee80211_local *local, + bool enabled) +{ + might_sleep(); + + if (!local->ops->set_wakeup) + return; + + trace_drv_set_wakeup(local, enabled); + local->ops->set_wakeup(&local->hw, enabled); + trace_drv_return_void(local); +} +#endif + +int drv_add_interface(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); + +int drv_change_interface(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum nl80211_iftype type, bool p2p); + +void drv_remove_interface(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); + +static inline int drv_config(struct ieee80211_local *local, u32 changed) +{ + int ret; + + might_sleep(); + + trace_drv_config(local, changed); + ret = local->ops->config(&local->hw, changed); + trace_drv_return_int(local, ret); + return ret; +} + +static inline void drv_vif_cfg_changed(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u64 changed) +{ + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_vif_cfg_changed(local, sdata, changed); + if (local->ops->vif_cfg_changed) + local->ops->vif_cfg_changed(&local->hw, &sdata->vif, changed); + else if (local->ops->bss_info_changed) + local->ops->bss_info_changed(&local->hw, &sdata->vif, + &sdata->vif.bss_conf, changed); + trace_drv_return_void(local); +} + +void drv_link_info_changed(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *info, + int link_id, u64 changed); + +static inline u64 drv_prepare_multicast(struct ieee80211_local *local, + struct netdev_hw_addr_list *mc_list) +{ + u64 ret = 0; + + trace_drv_prepare_multicast(local, mc_list->count); + + if (local->ops->prepare_multicast) + ret = local->ops->prepare_multicast(&local->hw, mc_list); + + trace_drv_return_u64(local, ret); + + return ret; +} + +static inline void drv_configure_filter(struct ieee80211_local *local, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + might_sleep(); + + trace_drv_configure_filter(local, changed_flags, total_flags, + multicast); + local->ops->configure_filter(&local->hw, changed_flags, total_flags, + multicast); + trace_drv_return_void(local); +} + +static inline void drv_config_iface_filter(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + unsigned int filter_flags, + unsigned int changed_flags) +{ + might_sleep(); + + trace_drv_config_iface_filter(local, sdata, filter_flags, + changed_flags); + if (local->ops->config_iface_filter) + local->ops->config_iface_filter(&local->hw, &sdata->vif, + filter_flags, + changed_flags); + trace_drv_return_void(local); +} + +static inline int drv_set_tim(struct ieee80211_local *local, + struct ieee80211_sta *sta, bool set) +{ + int ret = 0; + trace_drv_set_tim(local, sta, set); + if (local->ops->set_tim) + ret = local->ops->set_tim(&local->hw, sta, set); + trace_drv_return_int(local, ret); + return ret; +} + +int drv_set_key(struct ieee80211_local *local, + enum set_key_cmd cmd, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); + +static inline void drv_update_tkip_key(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_key_conf *conf, + struct sta_info *sta, u32 iv32, + u16 *phase1key) +{ + struct ieee80211_sta *ista = NULL; + + if (sta) + ista = &sta->sta; + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); + if (local->ops->update_tkip_key) + local->ops->update_tkip_key(&local->hw, &sdata->vif, conf, + ista, iv32, phase1key); + trace_drv_return_void(local); +} + +static inline int drv_hw_scan(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_scan_request *req) +{ + int ret; + + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_hw_scan(local, sdata); + ret = local->ops->hw_scan(&local->hw, &sdata->vif, req); + trace_drv_return_int(local, ret); + return ret; +} + +static inline void drv_cancel_hw_scan(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_cancel_hw_scan(local, sdata); + local->ops->cancel_hw_scan(&local->hw, &sdata->vif); + trace_drv_return_void(local); +} + +static inline int +drv_sched_scan_start(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies) +{ + int ret; + + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_sched_scan_start(local, sdata); + ret = local->ops->sched_scan_start(&local->hw, &sdata->vif, + req, ies); + trace_drv_return_int(local, ret); + return ret; +} + +static inline int drv_sched_scan_stop(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + int ret; + + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_sched_scan_stop(local, sdata); + ret = local->ops->sched_scan_stop(&local->hw, &sdata->vif); + trace_drv_return_int(local, ret); + + return ret; +} + +static inline void drv_sw_scan_start(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + const u8 *mac_addr) +{ + might_sleep(); + + trace_drv_sw_scan_start(local, sdata, mac_addr); + if (local->ops->sw_scan_start) + local->ops->sw_scan_start(&local->hw, &sdata->vif, mac_addr); + trace_drv_return_void(local); +} + +static inline void drv_sw_scan_complete(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + might_sleep(); + + trace_drv_sw_scan_complete(local, sdata); + if (local->ops->sw_scan_complete) + local->ops->sw_scan_complete(&local->hw, &sdata->vif); + trace_drv_return_void(local); +} + +static inline int drv_get_stats(struct ieee80211_local *local, + struct ieee80211_low_level_stats *stats) +{ + int ret = -EOPNOTSUPP; + + might_sleep(); + + if (local->ops->get_stats) + ret = local->ops->get_stats(&local->hw, stats); + trace_drv_get_stats(local, stats, ret); + + return ret; +} + +static inline void drv_get_key_seq(struct ieee80211_local *local, + struct ieee80211_key *key, + struct ieee80211_key_seq *seq) +{ + if (local->ops->get_key_seq) + local->ops->get_key_seq(&local->hw, &key->conf, seq); + trace_drv_get_key_seq(local, &key->conf); +} + +static inline int drv_set_frag_threshold(struct ieee80211_local *local, + u32 value) +{ + int ret = 0; + + might_sleep(); + + trace_drv_set_frag_threshold(local, value); + if (local->ops->set_frag_threshold) + ret = local->ops->set_frag_threshold(&local->hw, value); + trace_drv_return_int(local, ret); + return ret; +} + +static inline int drv_set_rts_threshold(struct ieee80211_local *local, + u32 value) +{ + int ret = 0; + + might_sleep(); + + trace_drv_set_rts_threshold(local, value); + if (local->ops->set_rts_threshold) + ret = local->ops->set_rts_threshold(&local->hw, value); + trace_drv_return_int(local, ret); + return ret; +} + +static inline int drv_set_coverage_class(struct ieee80211_local *local, + s16 value) +{ + int ret = 0; + might_sleep(); + + trace_drv_set_coverage_class(local, value); + if (local->ops->set_coverage_class) + local->ops->set_coverage_class(&local->hw, value); + else + ret = -EOPNOTSUPP; + + trace_drv_return_int(local, ret); + return ret; +} + +static inline void drv_sta_notify(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta) +{ + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_sta_notify(local, sdata, cmd, sta); + if (local->ops->sta_notify) + local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta); + trace_drv_return_void(local); +} + +static inline int drv_sta_add(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta) +{ + int ret = 0; + + might_sleep(); + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_sta_add(local, sdata, sta); + if (local->ops->sta_add) + ret = local->ops->sta_add(&local->hw, &sdata->vif, sta); + + trace_drv_return_int(local, ret); + + return ret; +} + +static inline void drv_sta_remove(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta) +{ + might_sleep(); + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_sta_remove(local, sdata, sta); + if (local->ops->sta_remove) + local->ops->sta_remove(&local->hw, &sdata->vif, sta); + + trace_drv_return_void(local); +} + +#ifdef CONFIG_MAC80211_DEBUGFS +static inline void drv_link_add_debugfs(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *link_conf, + struct dentry *dir) +{ + might_sleep(); + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return; + + if (local->ops->link_add_debugfs) + local->ops->link_add_debugfs(&local->hw, &sdata->vif, + link_conf, dir); +} + +static inline void drv_sta_add_debugfs(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + struct dentry *dir) +{ + might_sleep(); + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return; + + if (local->ops->sta_add_debugfs) + local->ops->sta_add_debugfs(&local->hw, &sdata->vif, + sta, dir); +} + +static inline void drv_link_sta_add_debugfs(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_sta *link_sta, + struct dentry *dir) +{ + might_sleep(); + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return; + + if (local->ops->link_sta_add_debugfs) + local->ops->link_sta_add_debugfs(&local->hw, &sdata->vif, + link_sta, dir); +} +#endif + +static inline void drv_sta_pre_rcu_remove(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct sta_info *sta) +{ + might_sleep(); + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_sta_pre_rcu_remove(local, sdata, &sta->sta); + if (local->ops->sta_pre_rcu_remove) + local->ops->sta_pre_rcu_remove(&local->hw, &sdata->vif, + &sta->sta); + trace_drv_return_void(local); +} + +__must_check +int drv_sta_state(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state); + +__must_check +int drv_sta_set_txpwr(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct sta_info *sta); + +void drv_sta_rc_update(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, u32 changed); + +static inline void drv_sta_rate_tbl_update(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta) +{ + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_sta_rate_tbl_update(local, sdata, sta); + if (local->ops->sta_rate_tbl_update) + local->ops->sta_rate_tbl_update(&local->hw, &sdata->vif, sta); + + trace_drv_return_void(local); +} + +static inline void drv_sta_statistics(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_sta_statistics(local, sdata, sta); + if (local->ops->sta_statistics) + local->ops->sta_statistics(&local->hw, &sdata->vif, sta, sinfo); + trace_drv_return_void(local); +} + +int drv_conf_tx(struct ieee80211_local *local, + struct ieee80211_link_data *link, u16 ac, + const struct ieee80211_tx_queue_params *params); + +u64 drv_get_tsf(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); +void drv_set_tsf(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u64 tsf); +void drv_offset_tsf(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + s64 offset); +void drv_reset_tsf(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); + +static inline int drv_tx_last_beacon(struct ieee80211_local *local) +{ + int ret = 0; /* default unsupported op for less congestion */ + + might_sleep(); + + trace_drv_tx_last_beacon(local); + if (local->ops->tx_last_beacon) + ret = local->ops->tx_last_beacon(&local->hw); + trace_drv_return_int(local, ret); + return ret; +} + +int drv_ampdu_action(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_ampdu_params *params); + +static inline int drv_get_survey(struct ieee80211_local *local, int idx, + struct survey_info *survey) +{ + int ret = -EOPNOTSUPP; + + trace_drv_get_survey(local, idx, survey); + + if (local->ops->get_survey) + ret = local->ops->get_survey(&local->hw, idx, survey); + + trace_drv_return_int(local, ret); + + return ret; +} + +static inline void drv_rfkill_poll(struct ieee80211_local *local) +{ + might_sleep(); + + if (local->ops->rfkill_poll) + local->ops->rfkill_poll(&local->hw); +} + +static inline void drv_flush(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u32 queues, bool drop) +{ + struct ieee80211_vif *vif; + + might_sleep(); + + sdata = get_bss_sdata(sdata); + vif = sdata ? &sdata->vif : NULL; + + if (sdata && !check_sdata_in_driver(sdata)) + return; + + trace_drv_flush(local, queues, drop); + if (local->ops->flush) + local->ops->flush(&local->hw, vif, queues, drop); + trace_drv_return_void(local); +} + +static inline void drv_flush_sta(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct sta_info *sta) +{ + might_sleep(); + + sdata = get_bss_sdata(sdata); + + if (sdata && !check_sdata_in_driver(sdata)) + return; + + trace_drv_flush_sta(local, sdata, &sta->sta); + if (local->ops->flush_sta) + local->ops->flush_sta(&local->hw, &sdata->vif, &sta->sta); + trace_drv_return_void(local); +} + +static inline void drv_channel_switch(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel_switch *ch_switch) +{ + might_sleep(); + + trace_drv_channel_switch(local, sdata, ch_switch); + local->ops->channel_switch(&local->hw, &sdata->vif, ch_switch); + trace_drv_return_void(local); +} + + +static inline int drv_set_antenna(struct ieee80211_local *local, + u32 tx_ant, u32 rx_ant) +{ + int ret = -EOPNOTSUPP; + might_sleep(); + if (local->ops->set_antenna) + ret = local->ops->set_antenna(&local->hw, tx_ant, rx_ant); + trace_drv_set_antenna(local, tx_ant, rx_ant, ret); + return ret; +} + +static inline int drv_get_antenna(struct ieee80211_local *local, + u32 *tx_ant, u32 *rx_ant) +{ + int ret = -EOPNOTSUPP; + might_sleep(); + if (local->ops->get_antenna) + ret = local->ops->get_antenna(&local->hw, tx_ant, rx_ant); + trace_drv_get_antenna(local, *tx_ant, *rx_ant, ret); + return ret; +} + +static inline int drv_remain_on_channel(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel *chan, + unsigned int duration, + enum ieee80211_roc_type type) +{ + int ret; + + might_sleep(); + + trace_drv_remain_on_channel(local, sdata, chan, duration, type); + ret = local->ops->remain_on_channel(&local->hw, &sdata->vif, + chan, duration, type); + trace_drv_return_int(local, ret); + + return ret; +} + +static inline int +drv_cancel_remain_on_channel(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + int ret; + + might_sleep(); + + trace_drv_cancel_remain_on_channel(local, sdata); + ret = local->ops->cancel_remain_on_channel(&local->hw, &sdata->vif); + trace_drv_return_int(local, ret); + + return ret; +} + +static inline int drv_set_ringparam(struct ieee80211_local *local, + u32 tx, u32 rx) +{ + int ret = -ENOTSUPP; + + might_sleep(); + + trace_drv_set_ringparam(local, tx, rx); + if (local->ops->set_ringparam) + ret = local->ops->set_ringparam(&local->hw, tx, rx); + trace_drv_return_int(local, ret); + + return ret; +} + +static inline void drv_get_ringparam(struct ieee80211_local *local, + u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max) +{ + might_sleep(); + + trace_drv_get_ringparam(local, tx, tx_max, rx, rx_max); + if (local->ops->get_ringparam) + local->ops->get_ringparam(&local->hw, tx, tx_max, rx, rx_max); + trace_drv_return_void(local); +} + +static inline bool drv_tx_frames_pending(struct ieee80211_local *local) +{ + bool ret = false; + + might_sleep(); + + trace_drv_tx_frames_pending(local); + if (local->ops->tx_frames_pending) + ret = local->ops->tx_frames_pending(&local->hw); + trace_drv_return_bool(local, ret); + + return ret; +} + +static inline int drv_set_bitrate_mask(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + const struct cfg80211_bitrate_mask *mask) +{ + int ret = -EOPNOTSUPP; + + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_set_bitrate_mask(local, sdata, mask); + if (local->ops->set_bitrate_mask) + ret = local->ops->set_bitrate_mask(&local->hw, + &sdata->vif, mask); + trace_drv_return_int(local, ret); + + return ret; +} + +static inline void drv_set_rekey_data(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_gtk_rekey_data *data) +{ + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_set_rekey_data(local, sdata, data); + if (local->ops->set_rekey_data) + local->ops->set_rekey_data(&local->hw, &sdata->vif, data); + trace_drv_return_void(local); +} + +static inline void drv_event_callback(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + const struct ieee80211_event *event) +{ + trace_drv_event_callback(local, sdata, event); + if (local->ops->event_callback) + local->ops->event_callback(&local->hw, &sdata->vif, event); + trace_drv_return_void(local); +} + +static inline void +drv_release_buffered_frames(struct ieee80211_local *local, + struct sta_info *sta, u16 tids, int num_frames, + enum ieee80211_frame_release_type reason, + bool more_data) +{ + trace_drv_release_buffered_frames(local, &sta->sta, tids, num_frames, + reason, more_data); + if (local->ops->release_buffered_frames) + local->ops->release_buffered_frames(&local->hw, &sta->sta, tids, + num_frames, reason, + more_data); + trace_drv_return_void(local); +} + +static inline void +drv_allow_buffered_frames(struct ieee80211_local *local, + struct sta_info *sta, u16 tids, int num_frames, + enum ieee80211_frame_release_type reason, + bool more_data) +{ + trace_drv_allow_buffered_frames(local, &sta->sta, tids, num_frames, + reason, more_data); + if (local->ops->allow_buffered_frames) + local->ops->allow_buffered_frames(&local->hw, &sta->sta, + tids, num_frames, reason, + more_data); + trace_drv_return_void(local); +} + +static inline void drv_mgd_prepare_tx(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_prep_tx_info *info) +{ + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return; + WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION); + + trace_drv_mgd_prepare_tx(local, sdata, info->duration, + info->subtype, info->success); + if (local->ops->mgd_prepare_tx) + local->ops->mgd_prepare_tx(&local->hw, &sdata->vif, info); + trace_drv_return_void(local); +} + +static inline void drv_mgd_complete_tx(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_prep_tx_info *info) +{ + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return; + WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION); + + trace_drv_mgd_complete_tx(local, sdata, info->duration, + info->subtype, info->success); + if (local->ops->mgd_complete_tx) + local->ops->mgd_complete_tx(&local->hw, &sdata->vif, info); + trace_drv_return_void(local); +} + +static inline void +drv_mgd_protect_tdls_discover(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return; + WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION); + + trace_drv_mgd_protect_tdls_discover(local, sdata); + if (local->ops->mgd_protect_tdls_discover) + local->ops->mgd_protect_tdls_discover(&local->hw, &sdata->vif); + trace_drv_return_void(local); +} + +static inline int drv_add_chanctx(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx) +{ + int ret = -EOPNOTSUPP; + + might_sleep(); + + trace_drv_add_chanctx(local, ctx); + if (local->ops->add_chanctx) + ret = local->ops->add_chanctx(&local->hw, &ctx->conf); + trace_drv_return_int(local, ret); + if (!ret) + ctx->driver_present = true; + + return ret; +} + +static inline void drv_remove_chanctx(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx) +{ + might_sleep(); + + if (WARN_ON(!ctx->driver_present)) + return; + + trace_drv_remove_chanctx(local, ctx); + if (local->ops->remove_chanctx) + local->ops->remove_chanctx(&local->hw, &ctx->conf); + trace_drv_return_void(local); + ctx->driver_present = false; +} + +static inline void drv_change_chanctx(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + u32 changed) +{ + might_sleep(); + + trace_drv_change_chanctx(local, ctx, changed); + if (local->ops->change_chanctx) { + WARN_ON_ONCE(!ctx->driver_present); + local->ops->change_chanctx(&local->hw, &ctx->conf, changed); + } + trace_drv_return_void(local); +} + +static inline void drv_verify_link_exists(struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *link_conf) +{ + /* deflink always exists, so need to check only for other links */ + if (sdata->deflink.conf != link_conf) + sdata_assert_lock(sdata); +} + +int drv_assign_vif_chanctx(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx *ctx); +void drv_unassign_vif_chanctx(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx *ctx); +int drv_switch_vif_chanctx(struct ieee80211_local *local, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, enum ieee80211_chanctx_switch_mode mode); + +static inline int drv_start_ap(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *link_conf) +{ + int ret = 0; + + /* make sure link_conf is protected */ + drv_verify_link_exists(sdata, link_conf); + + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_start_ap(local, sdata, link_conf); + if (local->ops->start_ap) + ret = local->ops->start_ap(&local->hw, &sdata->vif, link_conf); + trace_drv_return_int(local, ret); + return ret; +} + +static inline void drv_stop_ap(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *link_conf) +{ + /* make sure link_conf is protected */ + drv_verify_link_exists(sdata, link_conf); + + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_stop_ap(local, sdata, link_conf); + if (local->ops->stop_ap) + local->ops->stop_ap(&local->hw, &sdata->vif, link_conf); + trace_drv_return_void(local); +} + +static inline void +drv_reconfig_complete(struct ieee80211_local *local, + enum ieee80211_reconfig_type reconfig_type) +{ + might_sleep(); + + trace_drv_reconfig_complete(local, reconfig_type); + if (local->ops->reconfig_complete) + local->ops->reconfig_complete(&local->hw, reconfig_type); + trace_drv_return_void(local); +} + +static inline void +drv_set_default_unicast_key(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + int key_idx) +{ + if (!check_sdata_in_driver(sdata)) + return; + + WARN_ON_ONCE(key_idx < -1 || key_idx > 3); + + trace_drv_set_default_unicast_key(local, sdata, key_idx); + if (local->ops->set_default_unicast_key) + local->ops->set_default_unicast_key(&local->hw, &sdata->vif, + key_idx); + trace_drv_return_void(local); +} + +#if IS_ENABLED(CONFIG_IPV6) +static inline void drv_ipv6_addr_change(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct inet6_dev *idev) +{ + trace_drv_ipv6_addr_change(local, sdata); + if (local->ops->ipv6_addr_change) + local->ops->ipv6_addr_change(&local->hw, &sdata->vif, idev); + trace_drv_return_void(local); +} +#endif + +static inline void +drv_channel_switch_beacon(struct ieee80211_sub_if_data *sdata, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_local *local = sdata->local; + + if (local->ops->channel_switch_beacon) { + trace_drv_channel_switch_beacon(local, sdata, chandef); + local->ops->channel_switch_beacon(&local->hw, &sdata->vif, + chandef); + } +} + +static inline int +drv_pre_channel_switch(struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel_switch *ch_switch) +{ + struct ieee80211_local *local = sdata->local; + int ret = 0; + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_pre_channel_switch(local, sdata, ch_switch); + if (local->ops->pre_channel_switch) + ret = local->ops->pre_channel_switch(&local->hw, &sdata->vif, + ch_switch); + trace_drv_return_int(local, ret); + return ret; +} + +static inline int +drv_post_channel_switch(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + int ret = 0; + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_post_channel_switch(local, sdata); + if (local->ops->post_channel_switch) + ret = local->ops->post_channel_switch(&local->hw, &sdata->vif); + trace_drv_return_int(local, ret); + return ret; +} + +static inline void +drv_abort_channel_switch(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_abort_channel_switch(local, sdata); + + if (local->ops->abort_channel_switch) + local->ops->abort_channel_switch(&local->hw, &sdata->vif); +} + +static inline void +drv_channel_switch_rx_beacon(struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel_switch *ch_switch) +{ + struct ieee80211_local *local = sdata->local; + + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_channel_switch_rx_beacon(local, sdata, ch_switch); + if (local->ops->channel_switch_rx_beacon) + local->ops->channel_switch_rx_beacon(&local->hw, &sdata->vif, + ch_switch); +} + +static inline int drv_join_ibss(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + int ret = 0; + + might_sleep(); + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_join_ibss(local, sdata, &sdata->vif.bss_conf); + if (local->ops->join_ibss) + ret = local->ops->join_ibss(&local->hw, &sdata->vif); + trace_drv_return_int(local, ret); + return ret; +} + +static inline void drv_leave_ibss(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + might_sleep(); + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_leave_ibss(local, sdata); + if (local->ops->leave_ibss) + local->ops->leave_ibss(&local->hw, &sdata->vif); + trace_drv_return_void(local); +} + +static inline u32 drv_get_expected_throughput(struct ieee80211_local *local, + struct sta_info *sta) +{ + u32 ret = 0; + + trace_drv_get_expected_throughput(&sta->sta); + if (local->ops->get_expected_throughput && sta->uploaded) + ret = local->ops->get_expected_throughput(&local->hw, &sta->sta); + trace_drv_return_u32(local, ret); + + return ret; +} + +static inline int drv_get_txpower(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, int *dbm) +{ + int ret; + + if (!local->ops->get_txpower) + return -EOPNOTSUPP; + + ret = local->ops->get_txpower(&local->hw, &sdata->vif, dbm); + trace_drv_get_txpower(local, sdata, *dbm, ret); + + return ret; +} + +static inline int +drv_tdls_channel_switch(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, u8 oper_class, + struct cfg80211_chan_def *chandef, + struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie) +{ + int ret; + + might_sleep(); + if (!check_sdata_in_driver(sdata)) + return -EIO; + + if (!local->ops->tdls_channel_switch) + return -EOPNOTSUPP; + + trace_drv_tdls_channel_switch(local, sdata, sta, oper_class, chandef); + ret = local->ops->tdls_channel_switch(&local->hw, &sdata->vif, sta, + oper_class, chandef, tmpl_skb, + ch_sw_tm_ie); + trace_drv_return_int(local, ret); + return ret; +} + +static inline void +drv_tdls_cancel_channel_switch(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta) +{ + might_sleep(); + if (!check_sdata_in_driver(sdata)) + return; + + if (!local->ops->tdls_cancel_channel_switch) + return; + + trace_drv_tdls_cancel_channel_switch(local, sdata, sta); + local->ops->tdls_cancel_channel_switch(&local->hw, &sdata->vif, sta); + trace_drv_return_void(local); +} + +static inline void +drv_tdls_recv_channel_switch(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_tdls_ch_sw_params *params) +{ + trace_drv_tdls_recv_channel_switch(local, sdata, params); + if (local->ops->tdls_recv_channel_switch) + local->ops->tdls_recv_channel_switch(&local->hw, &sdata->vif, + params); + trace_drv_return_void(local); +} + +static inline void drv_wake_tx_queue(struct ieee80211_local *local, + struct txq_info *txq) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif); + + /* In reconfig don't transmit now, but mark for waking later */ + if (local->in_reconfig) { + set_bit(IEEE80211_TXQ_DIRTY, &txq->flags); + return; + } + + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_wake_tx_queue(local, sdata, txq); + local->ops->wake_tx_queue(&local->hw, &txq->txq); +} + +static inline void schedule_and_wake_txq(struct ieee80211_local *local, + struct txq_info *txqi) +{ + ieee80211_schedule_txq(&local->hw, &txqi->txq); + drv_wake_tx_queue(local, txqi); +} + +static inline int drv_can_aggregate_in_amsdu(struct ieee80211_local *local, + struct sk_buff *head, + struct sk_buff *skb) +{ + if (!local->ops->can_aggregate_in_amsdu) + return true; + + return local->ops->can_aggregate_in_amsdu(&local->hw, head, skb); +} + +static inline int +drv_get_ftm_responder_stats(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_ftm_responder_stats *ftm_stats) +{ + u32 ret = -EOPNOTSUPP; + + if (local->ops->get_ftm_responder_stats) + ret = local->ops->get_ftm_responder_stats(&local->hw, + &sdata->vif, + ftm_stats); + trace_drv_get_ftm_responder_stats(local, sdata, ftm_stats); + + return ret; +} + +static inline int drv_start_pmsr(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_pmsr_request *request) +{ + int ret = -EOPNOTSUPP; + + might_sleep(); + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_start_pmsr(local, sdata); + + if (local->ops->start_pmsr) + ret = local->ops->start_pmsr(&local->hw, &sdata->vif, request); + trace_drv_return_int(local, ret); + + return ret; +} + +static inline void drv_abort_pmsr(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_pmsr_request *request) +{ + trace_drv_abort_pmsr(local, sdata); + + might_sleep(); + if (!check_sdata_in_driver(sdata)) + return; + + if (local->ops->abort_pmsr) + local->ops->abort_pmsr(&local->hw, &sdata->vif, request); + trace_drv_return_void(local); +} + +static inline int drv_start_nan(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_nan_conf *conf) +{ + int ret; + + might_sleep(); + check_sdata_in_driver(sdata); + + trace_drv_start_nan(local, sdata, conf); + ret = local->ops->start_nan(&local->hw, &sdata->vif, conf); + trace_drv_return_int(local, ret); + return ret; +} + +static inline void drv_stop_nan(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + might_sleep(); + check_sdata_in_driver(sdata); + + trace_drv_stop_nan(local, sdata); + local->ops->stop_nan(&local->hw, &sdata->vif); + trace_drv_return_void(local); +} + +static inline int drv_nan_change_conf(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_nan_conf *conf, + u32 changes) +{ + int ret; + + might_sleep(); + check_sdata_in_driver(sdata); + + if (!local->ops->nan_change_conf) + return -EOPNOTSUPP; + + trace_drv_nan_change_conf(local, sdata, conf, changes); + ret = local->ops->nan_change_conf(&local->hw, &sdata->vif, conf, + changes); + trace_drv_return_int(local, ret); + + return ret; +} + +static inline int drv_add_nan_func(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + const struct cfg80211_nan_func *nan_func) +{ + int ret; + + might_sleep(); + check_sdata_in_driver(sdata); + + if (!local->ops->add_nan_func) + return -EOPNOTSUPP; + + trace_drv_add_nan_func(local, sdata, nan_func); + ret = local->ops->add_nan_func(&local->hw, &sdata->vif, nan_func); + trace_drv_return_int(local, ret); + + return ret; +} + +static inline void drv_del_nan_func(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u8 instance_id) +{ + might_sleep(); + check_sdata_in_driver(sdata); + + trace_drv_del_nan_func(local, sdata, instance_id); + if (local->ops->del_nan_func) + local->ops->del_nan_func(&local->hw, &sdata->vif, instance_id); + trace_drv_return_void(local); +} + +static inline int drv_set_tid_config(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + struct cfg80211_tid_config *tid_conf) +{ + int ret; + + might_sleep(); + ret = local->ops->set_tid_config(&local->hw, &sdata->vif, sta, + tid_conf); + trace_drv_return_int(local, ret); + + return ret; +} + +static inline int drv_reset_tid_config(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, u8 tids) +{ + int ret; + + might_sleep(); + ret = local->ops->reset_tid_config(&local->hw, &sdata->vif, sta, tids); + trace_drv_return_int(local, ret); + + return ret; +} + +static inline void drv_update_vif_offload(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + might_sleep(); + check_sdata_in_driver(sdata); + + if (!local->ops->update_vif_offload) + return; + + trace_drv_update_vif_offload(local, sdata); + local->ops->update_vif_offload(&local->hw, &sdata->vif); + trace_drv_return_void(local); +} + +static inline void drv_sta_set_4addr(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, bool enabled) +{ + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_sta_set_4addr(local, sdata, sta, enabled); + if (local->ops->sta_set_4addr) + local->ops->sta_set_4addr(&local->hw, &sdata->vif, sta, enabled); + trace_drv_return_void(local); +} + +static inline void drv_sta_set_decap_offload(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + bool enabled) +{ + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_sta_set_decap_offload(local, sdata, sta, enabled); + if (local->ops->sta_set_decap_offload) + local->ops->sta_set_decap_offload(&local->hw, &sdata->vif, sta, + enabled); + trace_drv_return_void(local); +} + +static inline void drv_add_twt_setup(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + struct ieee80211_twt_setup *twt) +{ + struct ieee80211_twt_params *twt_agrt; + + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return; + + twt_agrt = (void *)twt->params; + + trace_drv_add_twt_setup(local, sta, twt, twt_agrt); + local->ops->add_twt_setup(&local->hw, sta, twt); + trace_drv_return_void(local); +} + +static inline void drv_twt_teardown_request(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + u8 flowid) +{ + might_sleep(); + if (!check_sdata_in_driver(sdata)) + return; + + if (!local->ops->twt_teardown_request) + return; + + trace_drv_twt_teardown_request(local, sta, flowid); + local->ops->twt_teardown_request(&local->hw, sta, flowid); + trace_drv_return_void(local); +} + +static inline int drv_net_fill_forward_path(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + struct net_device_path_ctx *ctx, + struct net_device_path *path) +{ + int ret = -EOPNOTSUPP; + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_net_fill_forward_path(local, sdata, sta); + if (local->ops->net_fill_forward_path) + ret = local->ops->net_fill_forward_path(&local->hw, + &sdata->vif, sta, + ctx, path); + trace_drv_return_int(local, ret); + + return ret; +} + +static inline int drv_net_setup_tc(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct net_device *dev, + enum tc_setup_type type, void *type_data) +{ + int ret = -EOPNOTSUPP; + + sdata = get_bss_sdata(sdata); + trace_drv_net_setup_tc(local, sdata, type); + if (local->ops->net_setup_tc) + ret = local->ops->net_setup_tc(&local->hw, &sdata->vif, dev, + type, type_data); + trace_drv_return_int(local, ret); + + return ret; +} + +int drv_change_vif_links(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u16 old_links, u16 new_links, + struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]); +int drv_change_sta_links(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + u16 old_links, u16 new_links); + +#endif /* __MAC80211_DRIVER_OPS */ diff --git a/net/mac80211/drop.h b/net/mac80211/drop.h new file mode 100644 index 0000000000..1570fac841 --- /dev/null +++ b/net/mac80211/drop.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * mac80211 drop reason list + * + * Copyright (C) 2023 Intel Corporation + */ + +#ifndef MAC80211_DROP_H +#define MAC80211_DROP_H +#include <net/dropreason.h> + +typedef unsigned int __bitwise ieee80211_rx_result; + +#define MAC80211_DROP_REASONS_MONITOR(R) \ + R(RX_DROP_M_UNEXPECTED_4ADDR_FRAME) \ + R(RX_DROP_M_BAD_BCN_KEYIDX) \ + R(RX_DROP_M_BAD_MGMT_KEYIDX) \ +/* this line for the trailing \ - add before this */ + +#define MAC80211_DROP_REASONS_UNUSABLE(R) \ + R(RX_DROP_U_MIC_FAIL) \ + R(RX_DROP_U_REPLAY) \ + R(RX_DROP_U_BAD_MMIE) \ +/* this line for the trailing \ - add before this */ + +/* having two enums allows for checking ieee80211_rx_result use with sparse */ +enum ___mac80211_drop_reason { +/* if we get to the end of handlers with RX_CONTINUE this will be the reason */ + ___RX_CONTINUE = SKB_CONSUMED, + +/* this never gets used as an argument to kfree_skb_reason() */ + ___RX_QUEUED = SKB_NOT_DROPPED_YET, + +#define ENUM(x) ___ ## x, + ___RX_DROP_MONITOR = SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR << + SKB_DROP_REASON_SUBSYS_SHIFT, + MAC80211_DROP_REASONS_MONITOR(ENUM) + + ___RX_DROP_UNUSABLE = SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE << + SKB_DROP_REASON_SUBSYS_SHIFT, + MAC80211_DROP_REASONS_UNUSABLE(ENUM) +#undef ENUM +}; + +enum mac80211_drop_reason { + RX_CONTINUE = (__force ieee80211_rx_result)___RX_CONTINUE, + RX_QUEUED = (__force ieee80211_rx_result)___RX_QUEUED, + RX_DROP_MONITOR = (__force ieee80211_rx_result)___RX_DROP_MONITOR, + RX_DROP_UNUSABLE = (__force ieee80211_rx_result)___RX_DROP_UNUSABLE, +#define DEF(x) x = (__force ieee80211_rx_result)___ ## x, + MAC80211_DROP_REASONS_MONITOR(DEF) + MAC80211_DROP_REASONS_UNUSABLE(DEF) +#undef DEF +}; + +#define RX_RES_IS_UNUSABLE(result) \ + (((__force u32)(result) & SKB_DROP_REASON_SUBSYS_MASK) == ___RX_DROP_UNUSABLE) + +#endif /* MAC80211_DROP_H */ diff --git a/net/mac80211/eht.c b/net/mac80211/eht.c new file mode 100644 index 0000000000..ddc7acc683 --- /dev/null +++ b/net/mac80211/eht.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * EHT handling + * + * Copyright(c) 2021-2023 Intel Corporation + */ + +#include "ieee80211_i.h" + +void +ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const u8 *he_cap_ie, u8 he_cap_len, + const struct ieee80211_eht_cap_elem *eht_cap_ie_elem, + u8 eht_cap_len, + struct link_sta_info *link_sta) +{ + struct ieee80211_sta_eht_cap *eht_cap = &link_sta->pub->eht_cap; + struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie; + u8 eht_ppe_size = 0; + u8 mcs_nss_size; + u8 eht_total_size = sizeof(eht_cap->eht_cap_elem); + u8 *pos = (u8 *)eht_cap_ie_elem; + + memset(eht_cap, 0, sizeof(*eht_cap)); + + if (!eht_cap_ie_elem || + !ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif)) + return; + + mcs_nss_size = ieee80211_eht_mcs_nss_size(he_cap_ie_elem, + &eht_cap_ie_elem->fixed, + sdata->vif.type == + NL80211_IFTYPE_STATION); + + eht_total_size += mcs_nss_size; + + /* Calculate the PPE thresholds length only if the header is present */ + if (eht_cap_ie_elem->fixed.phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) { + u16 eht_ppe_hdr; + + if (eht_cap_len < eht_total_size + sizeof(u16)) + return; + + eht_ppe_hdr = get_unaligned_le16(eht_cap_ie_elem->optional + mcs_nss_size); + eht_ppe_size = + ieee80211_eht_ppe_size(eht_ppe_hdr, + eht_cap_ie_elem->fixed.phy_cap_info); + eht_total_size += eht_ppe_size; + + /* we calculate as if NSS > 8 are valid, but don't handle that */ + if (eht_ppe_size > sizeof(eht_cap->eht_ppe_thres)) + return; + } + + if (eht_cap_len < eht_total_size) + return; + + /* Copy the static portion of the EHT capabilities */ + memcpy(&eht_cap->eht_cap_elem, pos, sizeof(eht_cap->eht_cap_elem)); + pos += sizeof(eht_cap->eht_cap_elem); + + /* Copy MCS/NSS which depends on the peer capabilities */ + memset(&eht_cap->eht_mcs_nss_supp, 0, + sizeof(eht_cap->eht_mcs_nss_supp)); + memcpy(&eht_cap->eht_mcs_nss_supp, pos, mcs_nss_size); + + if (eht_ppe_size) + memcpy(eht_cap->eht_ppe_thres, + &eht_cap_ie_elem->optional[mcs_nss_size], + eht_ppe_size); + + eht_cap->has_eht = true; + + link_sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(link_sta); + link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta); +} diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c new file mode 100644 index 0000000000..a3830d925c --- /dev/null +++ b/net/mac80211/ethtool.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * mac80211 ethtool hooks for cfg80211 + * + * Copied from cfg.c - originally + * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2014 Intel Corporation (Author: Johannes Berg) + * Copyright (C) 2018, 2022 Intel Corporation + */ +#include <linux/types.h> +#include <net/cfg80211.h> +#include "ieee80211_i.h" +#include "sta_info.h" +#include "driver-ops.h" + +static int ieee80211_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *rp, + struct kernel_ethtool_ringparam *kernel_rp, + struct netlink_ext_ack *extack) +{ + struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy); + + if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0) + return -EINVAL; + + return drv_set_ringparam(local, rp->tx_pending, rp->rx_pending); +} + +static void ieee80211_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *rp, + struct kernel_ethtool_ringparam *kernel_rp, + struct netlink_ext_ack *extack) +{ + struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy); + + memset(rp, 0, sizeof(*rp)); + + drv_get_ringparam(local, &rp->tx_pending, &rp->tx_max_pending, + &rp->rx_pending, &rp->rx_max_pending); +} + +static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "rx_bytes", + "rx_duplicates", "rx_fragments", "rx_dropped", + "tx_packets", "tx_bytes", + "tx_filtered", "tx_retry_failed", "tx_retries", + "sta_state", "txrate", "rxrate", "signal", + "channel", "noise", "ch_time", "ch_time_busy", + "ch_time_ext_busy", "ch_time_rx", "ch_time_tx" +}; +#define STA_STATS_LEN ARRAY_SIZE(ieee80211_gstrings_sta_stats) + +static int ieee80211_get_sset_count(struct net_device *dev, int sset) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + int rv = 0; + + if (sset == ETH_SS_STATS) + rv += STA_STATS_LEN; + + rv += drv_get_et_sset_count(sdata, sset); + + if (rv == 0) + return -EOPNOTSUPP; + return rv; +} + +static void ieee80211_get_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_channel *channel; + struct sta_info *sta; + struct ieee80211_local *local = sdata->local; + struct station_info sinfo; + struct survey_info survey; + int i, q; +#define STA_STATS_SURVEY_LEN 7 + + memset(data, 0, sizeof(u64) * STA_STATS_LEN); + +#define ADD_STA_STATS(sta) \ + do { \ + data[i++] += sinfo.rx_packets; \ + data[i++] += sinfo.rx_bytes; \ + data[i++] += (sta)->rx_stats.num_duplicates; \ + data[i++] += (sta)->rx_stats.fragments; \ + data[i++] += sinfo.rx_dropped_misc; \ + \ + data[i++] += sinfo.tx_packets; \ + data[i++] += sinfo.tx_bytes; \ + data[i++] += (sta)->status_stats.filtered; \ + data[i++] += sinfo.tx_failed; \ + data[i++] += sinfo.tx_retries; \ + } while (0) + + /* For Managed stations, find the single station based on BSSID + * and use that. For interface types, iterate through all available + * stations and add stats for any station that is assigned to this + * network device. + */ + + mutex_lock(&local->sta_mtx); + + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + sta = sta_info_get_bss(sdata, sdata->deflink.u.mgd.bssid); + + if (!(sta && !WARN_ON(sta->sdata->dev != dev))) + goto do_survey; + + memset(&sinfo, 0, sizeof(sinfo)); + sta_set_sinfo(sta, &sinfo, false); + + i = 0; + ADD_STA_STATS(&sta->deflink); + + data[i++] = sta->sta_state; + + + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) + data[i] = 100000ULL * + cfg80211_calculate_bitrate(&sinfo.txrate); + i++; + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) + data[i] = 100000ULL * + cfg80211_calculate_bitrate(&sinfo.rxrate); + i++; + + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG)) + data[i] = (u8)sinfo.signal_avg; + i++; + } else { + list_for_each_entry(sta, &local->sta_list, list) { + /* Make sure this station belongs to the proper dev */ + if (sta->sdata->dev != dev) + continue; + + memset(&sinfo, 0, sizeof(sinfo)); + sta_set_sinfo(sta, &sinfo, false); + i = 0; + ADD_STA_STATS(&sta->deflink); + } + } + +do_survey: + i = STA_STATS_LEN - STA_STATS_SURVEY_LEN; + /* Get survey stats for current channel */ + survey.filled = 0; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + if (chanctx_conf) + channel = chanctx_conf->def.chan; + else + channel = NULL; + rcu_read_unlock(); + + if (channel) { + q = 0; + do { + survey.filled = 0; + if (drv_get_survey(local, q, &survey) != 0) { + survey.filled = 0; + break; + } + q++; + } while (channel != survey.channel); + } + + if (survey.filled) + data[i++] = survey.channel->center_freq; + else + data[i++] = 0; + if (survey.filled & SURVEY_INFO_NOISE_DBM) + data[i++] = (u8)survey.noise; + else + data[i++] = -1LL; + if (survey.filled & SURVEY_INFO_TIME) + data[i++] = survey.time; + else + data[i++] = -1LL; + if (survey.filled & SURVEY_INFO_TIME_BUSY) + data[i++] = survey.time_busy; + else + data[i++] = -1LL; + if (survey.filled & SURVEY_INFO_TIME_EXT_BUSY) + data[i++] = survey.time_ext_busy; + else + data[i++] = -1LL; + if (survey.filled & SURVEY_INFO_TIME_RX) + data[i++] = survey.time_rx; + else + data[i++] = -1LL; + if (survey.filled & SURVEY_INFO_TIME_TX) + data[i++] = survey.time_tx; + else + data[i++] = -1LL; + + mutex_unlock(&local->sta_mtx); + + if (WARN_ON(i != STA_STATS_LEN)) + return; + + drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN])); +} + +static void ieee80211_get_strings(struct net_device *dev, u32 sset, u8 *data) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + int sz_sta_stats = 0; + + if (sset == ETH_SS_STATS) { + sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats); + memcpy(data, ieee80211_gstrings_sta_stats, sz_sta_stats); + } + drv_get_et_strings(sdata, sset, &(data[sz_sta_stats])); +} + +static int ieee80211_get_regs_len(struct net_device *dev) +{ + return 0; +} + +static void ieee80211_get_regs(struct net_device *dev, + struct ethtool_regs *regs, + void *data) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + regs->version = wdev->wiphy->hw_version; + regs->len = 0; +} + +const struct ethtool_ops ieee80211_ethtool_ops = { + .get_drvinfo = cfg80211_get_drvinfo, + .get_regs_len = ieee80211_get_regs_len, + .get_regs = ieee80211_get_regs, + .get_link = ethtool_op_get_link, + .get_ringparam = ieee80211_get_ringparam, + .set_ringparam = ieee80211_set_ringparam, + .get_strings = ieee80211_get_strings, + .get_ethtool_stats = ieee80211_get_stats, + .get_sset_count = ieee80211_get_sset_count, +}; diff --git a/net/mac80211/fils_aead.c b/net/mac80211/fils_aead.c new file mode 100644 index 0000000000..912c46f74d --- /dev/null +++ b/net/mac80211/fils_aead.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * FILS AEAD for (Re)Association Request/Response frames + * Copyright 2016, Qualcomm Atheros, Inc. + */ + +#include <crypto/aes.h> +#include <crypto/hash.h> +#include <crypto/skcipher.h> +#include <crypto/utils.h> + +#include "ieee80211_i.h" +#include "aes_cmac.h" +#include "fils_aead.h" + +static void gf_mulx(u8 *pad) +{ + u64 a = get_unaligned_be64(pad); + u64 b = get_unaligned_be64(pad + 8); + + put_unaligned_be64((a << 1) | (b >> 63), pad); + put_unaligned_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0), pad + 8); +} + +static int aes_s2v(struct crypto_shash *tfm, + size_t num_elem, const u8 *addr[], size_t len[], u8 *v) +{ + u8 d[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE] = {}; + SHASH_DESC_ON_STACK(desc, tfm); + size_t i; + + desc->tfm = tfm; + + /* D = AES-CMAC(K, <zero>) */ + crypto_shash_digest(desc, tmp, AES_BLOCK_SIZE, d); + + for (i = 0; i < num_elem - 1; i++) { + /* D = dbl(D) xor AES_CMAC(K, Si) */ + gf_mulx(d); /* dbl */ + crypto_shash_digest(desc, addr[i], len[i], tmp); + crypto_xor(d, tmp, AES_BLOCK_SIZE); + } + + crypto_shash_init(desc); + + if (len[i] >= AES_BLOCK_SIZE) { + /* len(Sn) >= 128 */ + /* T = Sn xorend D */ + crypto_shash_update(desc, addr[i], len[i] - AES_BLOCK_SIZE); + crypto_xor(d, addr[i] + len[i] - AES_BLOCK_SIZE, + AES_BLOCK_SIZE); + } else { + /* len(Sn) < 128 */ + /* T = dbl(D) xor pad(Sn) */ + gf_mulx(d); /* dbl */ + crypto_xor(d, addr[i], len[i]); + d[len[i]] ^= 0x80; + } + /* V = AES-CMAC(K, T) */ + crypto_shash_finup(desc, d, AES_BLOCK_SIZE, v); + + return 0; +} + +/* Note: addr[] and len[] needs to have one extra slot at the end. */ +static int aes_siv_encrypt(const u8 *key, size_t key_len, + const u8 *plain, size_t plain_len, + size_t num_elem, const u8 *addr[], + size_t len[], u8 *out) +{ + u8 v[AES_BLOCK_SIZE]; + struct crypto_shash *tfm; + struct crypto_skcipher *tfm2; + struct skcipher_request *req; + int res; + struct scatterlist src[1], dst[1]; + u8 *tmp; + + key_len /= 2; /* S2V key || CTR key */ + + addr[num_elem] = plain; + len[num_elem] = plain_len; + num_elem++; + + /* S2V */ + + tfm = crypto_alloc_shash("cmac(aes)", 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + /* K1 for S2V */ + res = crypto_shash_setkey(tfm, key, key_len); + if (!res) + res = aes_s2v(tfm, num_elem, addr, len, v); + crypto_free_shash(tfm); + if (res) + return res; + + /* Use a temporary buffer of the plaintext to handle need for + * overwriting this during AES-CTR. + */ + tmp = kmemdup(plain, plain_len, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + /* IV for CTR before encrypted data */ + memcpy(out, v, AES_BLOCK_SIZE); + + /* Synthetic IV to be used as the initial counter in CTR: + * Q = V bitand (1^64 || 0^1 || 1^31 || 0^1 || 1^31) + */ + v[8] &= 0x7f; + v[12] &= 0x7f; + + /* CTR */ + + tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm2)) { + kfree(tmp); + return PTR_ERR(tfm2); + } + /* K2 for CTR */ + res = crypto_skcipher_setkey(tfm2, key + key_len, key_len); + if (res) + goto fail; + + req = skcipher_request_alloc(tfm2, GFP_KERNEL); + if (!req) { + res = -ENOMEM; + goto fail; + } + + sg_init_one(src, tmp, plain_len); + sg_init_one(dst, out + AES_BLOCK_SIZE, plain_len); + skcipher_request_set_crypt(req, src, dst, plain_len, v); + res = crypto_skcipher_encrypt(req); + skcipher_request_free(req); +fail: + kfree(tmp); + crypto_free_skcipher(tfm2); + return res; +} + +/* Note: addr[] and len[] needs to have one extra slot at the end. */ +static int aes_siv_decrypt(const u8 *key, size_t key_len, + const u8 *iv_crypt, size_t iv_c_len, + size_t num_elem, const u8 *addr[], size_t len[], + u8 *out) +{ + struct crypto_shash *tfm; + struct crypto_skcipher *tfm2; + struct skcipher_request *req; + struct scatterlist src[1], dst[1]; + size_t crypt_len; + int res; + u8 frame_iv[AES_BLOCK_SIZE], iv[AES_BLOCK_SIZE]; + u8 check[AES_BLOCK_SIZE]; + + crypt_len = iv_c_len - AES_BLOCK_SIZE; + key_len /= 2; /* S2V key || CTR key */ + addr[num_elem] = out; + len[num_elem] = crypt_len; + num_elem++; + + memcpy(iv, iv_crypt, AES_BLOCK_SIZE); + memcpy(frame_iv, iv_crypt, AES_BLOCK_SIZE); + + /* Synthetic IV to be used as the initial counter in CTR: + * Q = V bitand (1^64 || 0^1 || 1^31 || 0^1 || 1^31) + */ + iv[8] &= 0x7f; + iv[12] &= 0x7f; + + /* CTR */ + + tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm2)) + return PTR_ERR(tfm2); + /* K2 for CTR */ + res = crypto_skcipher_setkey(tfm2, key + key_len, key_len); + if (res) { + crypto_free_skcipher(tfm2); + return res; + } + + req = skcipher_request_alloc(tfm2, GFP_KERNEL); + if (!req) { + crypto_free_skcipher(tfm2); + return -ENOMEM; + } + + sg_init_one(src, iv_crypt + AES_BLOCK_SIZE, crypt_len); + sg_init_one(dst, out, crypt_len); + skcipher_request_set_crypt(req, src, dst, crypt_len, iv); + res = crypto_skcipher_decrypt(req); + skcipher_request_free(req); + crypto_free_skcipher(tfm2); + if (res) + return res; + + /* S2V */ + + tfm = crypto_alloc_shash("cmac(aes)", 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + /* K1 for S2V */ + res = crypto_shash_setkey(tfm, key, key_len); + if (!res) + res = aes_s2v(tfm, num_elem, addr, len, check); + crypto_free_shash(tfm); + if (res) + return res; + if (memcmp(check, frame_iv, AES_BLOCK_SIZE) != 0) + return -EINVAL; + return 0; +} + +int fils_encrypt_assoc_req(struct sk_buff *skb, + struct ieee80211_mgd_assoc_data *assoc_data) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + u8 *capab, *ies, *encr; + const u8 *addr[5 + 1]; + const struct element *session; + size_t len[5 + 1]; + size_t crypt_len; + + if (ieee80211_is_reassoc_req(mgmt->frame_control)) { + capab = (u8 *)&mgmt->u.reassoc_req.capab_info; + ies = mgmt->u.reassoc_req.variable; + } else { + capab = (u8 *)&mgmt->u.assoc_req.capab_info; + ies = mgmt->u.assoc_req.variable; + } + + session = cfg80211_find_ext_elem(WLAN_EID_EXT_FILS_SESSION, + ies, skb->data + skb->len - ies); + if (!session || session->datalen != 1 + 8) + return -EINVAL; + /* encrypt after FILS Session element */ + encr = (u8 *)session->data + 1 + 8; + + /* AES-SIV AAD vectors */ + + /* The STA's MAC address */ + addr[0] = mgmt->sa; + len[0] = ETH_ALEN; + /* The AP's BSSID */ + addr[1] = mgmt->da; + len[1] = ETH_ALEN; + /* The STA's nonce */ + addr[2] = assoc_data->fils_nonces; + len[2] = FILS_NONCE_LEN; + /* The AP's nonce */ + addr[3] = &assoc_data->fils_nonces[FILS_NONCE_LEN]; + len[3] = FILS_NONCE_LEN; + /* The (Re)Association Request frame from the Capability Information + * field to the FILS Session element (both inclusive). + */ + addr[4] = capab; + len[4] = encr - capab; + + crypt_len = skb->data + skb->len - encr; + skb_put(skb, AES_BLOCK_SIZE); + return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len, + encr, crypt_len, 5, addr, len, encr); +} + +int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata, + u8 *frame, size_t *frame_len, + struct ieee80211_mgd_assoc_data *assoc_data) +{ + struct ieee80211_mgmt *mgmt = (void *)frame; + u8 *capab, *ies, *encr; + const u8 *addr[5 + 1]; + const struct element *session; + size_t len[5 + 1]; + int res; + size_t crypt_len; + + if (*frame_len < 24 + 6) + return -EINVAL; + + capab = (u8 *)&mgmt->u.assoc_resp.capab_info; + ies = mgmt->u.assoc_resp.variable; + session = cfg80211_find_ext_elem(WLAN_EID_EXT_FILS_SESSION, + ies, frame + *frame_len - ies); + if (!session || session->datalen != 1 + 8) { + mlme_dbg(sdata, + "No (valid) FILS Session element in (Re)Association Response frame from %pM", + mgmt->sa); + return -EINVAL; + } + /* decrypt after FILS Session element */ + encr = (u8 *)session->data + 1 + 8; + + /* AES-SIV AAD vectors */ + + /* The AP's BSSID */ + addr[0] = mgmt->sa; + len[0] = ETH_ALEN; + /* The STA's MAC address */ + addr[1] = mgmt->da; + len[1] = ETH_ALEN; + /* The AP's nonce */ + addr[2] = &assoc_data->fils_nonces[FILS_NONCE_LEN]; + len[2] = FILS_NONCE_LEN; + /* The STA's nonce */ + addr[3] = assoc_data->fils_nonces; + len[3] = FILS_NONCE_LEN; + /* The (Re)Association Response frame from the Capability Information + * field to the FILS Session element (both inclusive). + */ + addr[4] = capab; + len[4] = encr - capab; + + crypt_len = frame + *frame_len - encr; + if (crypt_len < AES_BLOCK_SIZE) { + mlme_dbg(sdata, + "Not enough room for AES-SIV data after FILS Session element in (Re)Association Response frame from %pM", + mgmt->sa); + return -EINVAL; + } + res = aes_siv_decrypt(assoc_data->fils_kek, assoc_data->fils_kek_len, + encr, crypt_len, 5, addr, len, encr); + if (res != 0) { + mlme_dbg(sdata, + "AES-SIV decryption of (Re)Association Response frame from %pM failed", + mgmt->sa); + return res; + } + *frame_len -= AES_BLOCK_SIZE; + return 0; +} diff --git a/net/mac80211/fils_aead.h b/net/mac80211/fils_aead.h new file mode 100644 index 0000000000..c868153f87 --- /dev/null +++ b/net/mac80211/fils_aead.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * FILS AEAD for (Re)Association Request/Response frames + * Copyright 2016, Qualcomm Atheros, Inc. + */ + +#ifndef FILS_AEAD_H +#define FILS_AEAD_H + +int fils_encrypt_assoc_req(struct sk_buff *skb, + struct ieee80211_mgd_assoc_data *assoc_data); +int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata, + u8 *frame, size_t *frame_len, + struct ieee80211_mgd_assoc_data *assoc_data); + +#endif /* FILS_AEAD_H */ diff --git a/net/mac80211/he.c b/net/mac80211/he.c new file mode 100644 index 0000000000..9f5ffdc9db --- /dev/null +++ b/net/mac80211/he.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HE handling + * + * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2019 - 2023 Intel Corporation + */ + +#include "ieee80211_i.h" + +static void +ieee80211_update_from_he_6ghz_capa(const struct ieee80211_he_6ghz_capa *he_6ghz_capa, + struct link_sta_info *link_sta) +{ + struct sta_info *sta = link_sta->sta; + enum ieee80211_smps_mode smps_mode; + + if (sta->sdata->vif.type == NL80211_IFTYPE_AP || + sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + switch (le16_get_bits(he_6ghz_capa->capa, + IEEE80211_HE_6GHZ_CAP_SM_PS)) { + case WLAN_HT_CAP_SM_PS_INVALID: + case WLAN_HT_CAP_SM_PS_STATIC: + smps_mode = IEEE80211_SMPS_STATIC; + break; + case WLAN_HT_CAP_SM_PS_DYNAMIC: + smps_mode = IEEE80211_SMPS_DYNAMIC; + break; + case WLAN_HT_CAP_SM_PS_DISABLED: + smps_mode = IEEE80211_SMPS_OFF; + break; + } + + link_sta->pub->smps_mode = smps_mode; + } else { + link_sta->pub->smps_mode = IEEE80211_SMPS_OFF; + } + + switch (le16_get_bits(he_6ghz_capa->capa, + IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN)) { + case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: + link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454; + break; + case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991: + link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_7991; + break; + case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895: + default: + link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_3895; + break; + } + + ieee80211_sta_recalc_aggregates(&sta->sta); + + link_sta->pub->he_6ghz_capa = *he_6ghz_capa; +} + +static void ieee80211_he_mcs_disable(__le16 *he_mcs) +{ + u32 i; + + for (i = 0; i < 8; i++) + *he_mcs |= cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << i * 2); +} + +static void ieee80211_he_mcs_intersection(__le16 *he_own_rx, __le16 *he_peer_rx, + __le16 *he_own_tx, __le16 *he_peer_tx) +{ + u32 i; + u16 own_rx, own_tx, peer_rx, peer_tx; + + for (i = 0; i < 8; i++) { + own_rx = le16_to_cpu(*he_own_rx); + own_rx = (own_rx >> i * 2) & IEEE80211_HE_MCS_NOT_SUPPORTED; + + own_tx = le16_to_cpu(*he_own_tx); + own_tx = (own_tx >> i * 2) & IEEE80211_HE_MCS_NOT_SUPPORTED; + + peer_rx = le16_to_cpu(*he_peer_rx); + peer_rx = (peer_rx >> i * 2) & IEEE80211_HE_MCS_NOT_SUPPORTED; + + peer_tx = le16_to_cpu(*he_peer_tx); + peer_tx = (peer_tx >> i * 2) & IEEE80211_HE_MCS_NOT_SUPPORTED; + + if (peer_tx != IEEE80211_HE_MCS_NOT_SUPPORTED) { + if (own_rx == IEEE80211_HE_MCS_NOT_SUPPORTED) + peer_tx = IEEE80211_HE_MCS_NOT_SUPPORTED; + else if (own_rx < peer_tx) + peer_tx = own_rx; + } + + if (peer_rx != IEEE80211_HE_MCS_NOT_SUPPORTED) { + if (own_tx == IEEE80211_HE_MCS_NOT_SUPPORTED) + peer_rx = IEEE80211_HE_MCS_NOT_SUPPORTED; + else if (own_tx < peer_rx) + peer_rx = own_tx; + } + + *he_peer_rx &= + ~cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << i * 2); + *he_peer_rx |= cpu_to_le16(peer_rx << i * 2); + + *he_peer_tx &= + ~cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << i * 2); + *he_peer_tx |= cpu_to_le16(peer_tx << i * 2); + } +} + +void +ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const u8 *he_cap_ie, u8 he_cap_len, + const struct ieee80211_he_6ghz_capa *he_6ghz_capa, + struct link_sta_info *link_sta) +{ + struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap; + const struct ieee80211_sta_he_cap *own_he_cap_ptr; + struct ieee80211_sta_he_cap own_he_cap; + struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie; + u8 he_ppe_size; + u8 mcs_nss_size; + u8 he_total_size; + bool own_160, peer_160, own_80p80, peer_80p80; + + memset(he_cap, 0, sizeof(*he_cap)); + + if (!he_cap_ie) + return; + + own_he_cap_ptr = + ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); + if (!own_he_cap_ptr) + return; + + own_he_cap = *own_he_cap_ptr; + + /* Make sure size is OK */ + mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem); + he_ppe_size = + ieee80211_he_ppe_size(he_cap_ie[sizeof(he_cap->he_cap_elem) + + mcs_nss_size], + he_cap_ie_elem->phy_cap_info); + he_total_size = sizeof(he_cap->he_cap_elem) + mcs_nss_size + + he_ppe_size; + if (he_cap_len < he_total_size) + return; + + memcpy(&he_cap->he_cap_elem, he_cap_ie, sizeof(he_cap->he_cap_elem)); + + /* HE Tx/Rx HE MCS NSS Support Field */ + memcpy(&he_cap->he_mcs_nss_supp, + &he_cap_ie[sizeof(he_cap->he_cap_elem)], mcs_nss_size); + + /* Check if there are (optional) PPE Thresholds */ + if (he_cap->he_cap_elem.phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) + memcpy(he_cap->ppe_thres, + &he_cap_ie[sizeof(he_cap->he_cap_elem) + mcs_nss_size], + he_ppe_size); + + he_cap->has_he = true; + + link_sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(link_sta); + link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta); + + if (sband->band == NL80211_BAND_6GHZ && he_6ghz_capa) + ieee80211_update_from_he_6ghz_capa(he_6ghz_capa, link_sta); + + ieee80211_he_mcs_intersection(&own_he_cap.he_mcs_nss_supp.rx_mcs_80, + &he_cap->he_mcs_nss_supp.rx_mcs_80, + &own_he_cap.he_mcs_nss_supp.tx_mcs_80, + &he_cap->he_mcs_nss_supp.tx_mcs_80); + + own_160 = own_he_cap.he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; + peer_160 = he_cap->he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; + + if (peer_160 && own_160) { + ieee80211_he_mcs_intersection(&own_he_cap.he_mcs_nss_supp.rx_mcs_160, + &he_cap->he_mcs_nss_supp.rx_mcs_160, + &own_he_cap.he_mcs_nss_supp.tx_mcs_160, + &he_cap->he_mcs_nss_supp.tx_mcs_160); + } else if (peer_160 && !own_160) { + ieee80211_he_mcs_disable(&he_cap->he_mcs_nss_supp.rx_mcs_160); + ieee80211_he_mcs_disable(&he_cap->he_mcs_nss_supp.tx_mcs_160); + he_cap->he_cap_elem.phy_cap_info[0] &= + ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; + } + + own_80p80 = own_he_cap.he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G; + peer_80p80 = he_cap->he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G; + + if (peer_80p80 && own_80p80) { + ieee80211_he_mcs_intersection(&own_he_cap.he_mcs_nss_supp.rx_mcs_80p80, + &he_cap->he_mcs_nss_supp.rx_mcs_80p80, + &own_he_cap.he_mcs_nss_supp.tx_mcs_80p80, + &he_cap->he_mcs_nss_supp.tx_mcs_80p80); + } else if (peer_80p80 && !own_80p80) { + ieee80211_he_mcs_disable(&he_cap->he_mcs_nss_supp.rx_mcs_80p80); + ieee80211_he_mcs_disable(&he_cap->he_mcs_nss_supp.tx_mcs_80p80); + he_cap->he_cap_elem.phy_cap_info[0] &= + ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G; + } +} + +void +ieee80211_he_op_ie_to_bss_conf(struct ieee80211_vif *vif, + const struct ieee80211_he_operation *he_op_ie) +{ + memset(&vif->bss_conf.he_oper, 0, sizeof(vif->bss_conf.he_oper)); + if (!he_op_ie) + return; + + vif->bss_conf.he_oper.params = __le32_to_cpu(he_op_ie->he_oper_params); + vif->bss_conf.he_oper.nss_set = __le16_to_cpu(he_op_ie->he_mcs_nss_set); +} + +void +ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif, + const struct ieee80211_he_spr *he_spr_ie_elem) +{ + struct ieee80211_he_obss_pd *he_obss_pd = + &vif->bss_conf.he_obss_pd; + const u8 *data; + + memset(he_obss_pd, 0, sizeof(*he_obss_pd)); + + if (!he_spr_ie_elem) + return; + data = he_spr_ie_elem->optional; + + if (he_spr_ie_elem->he_sr_control & + IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT) + data++; + if (he_spr_ie_elem->he_sr_control & + IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) { + he_obss_pd->max_offset = *data++; + he_obss_pd->min_offset = *data++; + he_obss_pd->enable = true; + } +} diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c new file mode 100644 index 0000000000..b337187289 --- /dev/null +++ b/net/mac80211/ht.c @@ -0,0 +1,612 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HT handling + * + * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2007, Michael Wu <flamingice@sourmilk.net> + * Copyright 2007-2010, Intel Corporation + * Copyright 2017 Intel Deutschland GmbH + * Copyright(c) 2020-2023 Intel Corporation + */ + +#include <linux/ieee80211.h> +#include <linux/export.h> +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "rate.h" + +static void __check_htcap_disable(struct ieee80211_ht_cap *ht_capa, + struct ieee80211_ht_cap *ht_capa_mask, + struct ieee80211_sta_ht_cap *ht_cap, + u16 flag) +{ + __le16 le_flag = cpu_to_le16(flag); + if (ht_capa_mask->cap_info & le_flag) { + if (!(ht_capa->cap_info & le_flag)) + ht_cap->cap &= ~flag; + } +} + +static void __check_htcap_enable(struct ieee80211_ht_cap *ht_capa, + struct ieee80211_ht_cap *ht_capa_mask, + struct ieee80211_sta_ht_cap *ht_cap, + u16 flag) +{ + __le16 le_flag = cpu_to_le16(flag); + + if ((ht_capa_mask->cap_info & le_flag) && + (ht_capa->cap_info & le_flag)) + ht_cap->cap |= flag; +} + +void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta_ht_cap *ht_cap) +{ + struct ieee80211_ht_cap *ht_capa, *ht_capa_mask; + u8 *scaps, *smask; + int i; + + if (!ht_cap->ht_supported) + return; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + ht_capa = &sdata->u.mgd.ht_capa; + ht_capa_mask = &sdata->u.mgd.ht_capa_mask; + break; + case NL80211_IFTYPE_ADHOC: + ht_capa = &sdata->u.ibss.ht_capa; + ht_capa_mask = &sdata->u.ibss.ht_capa_mask; + break; + default: + WARN_ON_ONCE(1); + return; + } + + scaps = (u8 *)(&ht_capa->mcs.rx_mask); + smask = (u8 *)(&ht_capa_mask->mcs.rx_mask); + + /* NOTE: If you add more over-rides here, update register_hw + * ht_capa_mod_mask logic in main.c as well. + * And, if this method can ever change ht_cap.ht_supported, fix + * the check in ieee80211_add_ht_ie. + */ + + /* check for HT over-rides, MCS rates first. */ + for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) { + u8 m = smask[i]; + ht_cap->mcs.rx_mask[i] &= ~m; /* turn off all masked bits */ + /* Add back rates that are supported */ + ht_cap->mcs.rx_mask[i] |= (m & scaps[i]); + } + + /* Force removal of HT-40 capabilities? */ + __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, + IEEE80211_HT_CAP_SUP_WIDTH_20_40); + __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, + IEEE80211_HT_CAP_SGI_40); + + /* Allow user to disable SGI-20 (SGI-40 is handled above) */ + __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, + IEEE80211_HT_CAP_SGI_20); + + /* Allow user to disable the max-AMSDU bit. */ + __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, + IEEE80211_HT_CAP_MAX_AMSDU); + + /* Allow user to disable LDPC */ + __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, + IEEE80211_HT_CAP_LDPC_CODING); + + /* Allow user to enable 40 MHz intolerant bit. */ + __check_htcap_enable(ht_capa, ht_capa_mask, ht_cap, + IEEE80211_HT_CAP_40MHZ_INTOLERANT); + + /* Allow user to enable TX STBC bit */ + __check_htcap_enable(ht_capa, ht_capa_mask, ht_cap, + IEEE80211_HT_CAP_TX_STBC); + + /* Allow user to configure RX STBC bits */ + if (ht_capa_mask->cap_info & cpu_to_le16(IEEE80211_HT_CAP_RX_STBC)) + ht_cap->cap |= le16_to_cpu(ht_capa->cap_info) & + IEEE80211_HT_CAP_RX_STBC; + + /* Allow user to decrease AMPDU factor */ + if (ht_capa_mask->ampdu_params_info & + IEEE80211_HT_AMPDU_PARM_FACTOR) { + u8 n = ht_capa->ampdu_params_info & + IEEE80211_HT_AMPDU_PARM_FACTOR; + if (n < ht_cap->ampdu_factor) + ht_cap->ampdu_factor = n; + } + + /* Allow the user to increase AMPDU density. */ + if (ht_capa_mask->ampdu_params_info & + IEEE80211_HT_AMPDU_PARM_DENSITY) { + u8 n = (ht_capa->ampdu_params_info & + IEEE80211_HT_AMPDU_PARM_DENSITY) + >> IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT; + if (n > ht_cap->ampdu_density) + ht_cap->ampdu_density = n; + } +} + + +bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const struct ieee80211_ht_cap *ht_cap_ie, + struct link_sta_info *link_sta) +{ + struct ieee80211_bss_conf *link_conf; + struct sta_info *sta = link_sta->sta; + struct ieee80211_sta_ht_cap ht_cap, own_cap; + u8 ampdu_info, tx_mcs_set_cap; + int i, max_tx_streams; + bool changed; + enum ieee80211_sta_rx_bandwidth bw; + enum nl80211_chan_width width; + + memset(&ht_cap, 0, sizeof(ht_cap)); + + if (!ht_cap_ie || !sband->ht_cap.ht_supported) + goto apply; + + ht_cap.ht_supported = true; + + own_cap = sband->ht_cap; + + /* + * If user has specified capability over-rides, take care + * of that if the station we're setting up is the AP or TDLS peer that + * we advertised a restricted capability set to. Override + * our own capabilities and then use those below. + */ + if (sdata->vif.type == NL80211_IFTYPE_STATION || + sdata->vif.type == NL80211_IFTYPE_ADHOC) + ieee80211_apply_htcap_overrides(sdata, &own_cap); + + /* + * The bits listed in this expression should be + * the same for the peer and us, if the station + * advertises more then we can't use those thus + * we mask them out. + */ + ht_cap.cap = le16_to_cpu(ht_cap_ie->cap_info) & + (own_cap.cap | ~(IEEE80211_HT_CAP_LDPC_CODING | + IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_GRN_FLD | + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40 | + IEEE80211_HT_CAP_DSSSCCK40)); + + /* + * The STBC bits are asymmetric -- if we don't have + * TX then mask out the peer's RX and vice versa. + */ + if (!(own_cap.cap & IEEE80211_HT_CAP_TX_STBC)) + ht_cap.cap &= ~IEEE80211_HT_CAP_RX_STBC; + if (!(own_cap.cap & IEEE80211_HT_CAP_RX_STBC)) + ht_cap.cap &= ~IEEE80211_HT_CAP_TX_STBC; + + ampdu_info = ht_cap_ie->ampdu_params_info; + ht_cap.ampdu_factor = + ampdu_info & IEEE80211_HT_AMPDU_PARM_FACTOR; + ht_cap.ampdu_density = + (ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2; + + /* own MCS TX capabilities */ + tx_mcs_set_cap = own_cap.mcs.tx_params; + + /* Copy peer MCS TX capabilities, the driver might need them. */ + ht_cap.mcs.tx_params = ht_cap_ie->mcs.tx_params; + + /* can we TX with MCS rates? */ + if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED)) + goto apply; + + /* Counting from 0, therefore +1 */ + if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_RX_DIFF) + max_tx_streams = + ((tx_mcs_set_cap & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) + >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1; + else + max_tx_streams = IEEE80211_HT_MCS_TX_MAX_STREAMS; + + /* + * 802.11n-2009 20.3.5 / 20.6 says: + * - indices 0 to 7 and 32 are single spatial stream + * - 8 to 31 are multiple spatial streams using equal modulation + * [8..15 for two streams, 16..23 for three and 24..31 for four] + * - remainder are multiple spatial streams using unequal modulation + */ + for (i = 0; i < max_tx_streams; i++) + ht_cap.mcs.rx_mask[i] = + own_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i]; + + if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION) + for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE; + i < IEEE80211_HT_MCS_MASK_LEN; i++) + ht_cap.mcs.rx_mask[i] = + own_cap.mcs.rx_mask[i] & + ht_cap_ie->mcs.rx_mask[i]; + + /* handle MCS rate 32 too */ + if (own_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1) + ht_cap.mcs.rx_mask[32/8] |= 1; + + /* set Rx highest rate */ + ht_cap.mcs.rx_highest = ht_cap_ie->mcs.rx_highest; + + if (ht_cap.cap & IEEE80211_HT_CAP_MAX_AMSDU) + link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_7935; + else + link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_3839; + + ieee80211_sta_recalc_aggregates(&sta->sta); + + apply: + changed = memcmp(&link_sta->pub->ht_cap, &ht_cap, sizeof(ht_cap)); + + memcpy(&link_sta->pub->ht_cap, &ht_cap, sizeof(ht_cap)); + + rcu_read_lock(); + link_conf = rcu_dereference(sdata->vif.link_conf[link_sta->link_id]); + if (WARN_ON(!link_conf)) + width = NL80211_CHAN_WIDTH_20_NOHT; + else + width = link_conf->chandef.width; + + switch (width) { + default: + WARN_ON_ONCE(1); + fallthrough; + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + bw = IEEE80211_STA_RX_BW_20; + break; + case NL80211_CHAN_WIDTH_40: + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + case NL80211_CHAN_WIDTH_320: + bw = ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? + IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20; + break; + } + rcu_read_unlock(); + + link_sta->pub->bandwidth = bw; + + link_sta->cur_max_bandwidth = + ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? + IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20; + + if (sta->sdata->vif.type == NL80211_IFTYPE_AP || + sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + enum ieee80211_smps_mode smps_mode; + + switch ((ht_cap.cap & IEEE80211_HT_CAP_SM_PS) + >> IEEE80211_HT_CAP_SM_PS_SHIFT) { + case WLAN_HT_CAP_SM_PS_INVALID: + case WLAN_HT_CAP_SM_PS_STATIC: + smps_mode = IEEE80211_SMPS_STATIC; + break; + case WLAN_HT_CAP_SM_PS_DYNAMIC: + smps_mode = IEEE80211_SMPS_DYNAMIC; + break; + case WLAN_HT_CAP_SM_PS_DISABLED: + smps_mode = IEEE80211_SMPS_OFF; + break; + } + + if (smps_mode != link_sta->pub->smps_mode) + changed = true; + link_sta->pub->smps_mode = smps_mode; + } else { + link_sta->pub->smps_mode = IEEE80211_SMPS_OFF; + } + + return changed; +} + +void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, + enum ieee80211_agg_stop_reason reason) +{ + int i; + + mutex_lock(&sta->ampdu_mlme.mtx); + for (i = 0; i < IEEE80211_NUM_TIDS; i++) + ___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, + WLAN_REASON_QSTA_LEAVE_QBSS, + reason != AGG_STOP_DESTROY_STA && + reason != AGG_STOP_PEER_REQUEST); + + for (i = 0; i < IEEE80211_NUM_TIDS; i++) + ___ieee80211_stop_tx_ba_session(sta, i, reason); + mutex_unlock(&sta->ampdu_mlme.mtx); + + /* + * In case the tear down is part of a reconfigure due to HW restart + * request, it is possible that the low level driver requested to stop + * the BA session, so handle it to properly clean tid_tx data. + */ + if(reason == AGG_STOP_DESTROY_STA) { + cancel_work_sync(&sta->ampdu_mlme.work); + + mutex_lock(&sta->ampdu_mlme.mtx); + for (i = 0; i < IEEE80211_NUM_TIDS; i++) { + struct tid_ampdu_tx *tid_tx = + rcu_dereference_protected_tid_tx(sta, i); + + if (!tid_tx) + continue; + + if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state)) + ieee80211_stop_tx_ba_cb(sta, i, tid_tx); + } + mutex_unlock(&sta->ampdu_mlme.mtx); + } +} + +void ieee80211_ba_session_work(struct work_struct *work) +{ + struct sta_info *sta = + container_of(work, struct sta_info, ampdu_mlme.work); + struct tid_ampdu_tx *tid_tx; + bool blocked; + int tid; + + /* When this flag is set, new sessions should be blocked. */ + blocked = test_sta_flag(sta, WLAN_STA_BLOCK_BA); + + mutex_lock(&sta->ampdu_mlme.mtx); + for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { + if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired)) + ___ieee80211_stop_rx_ba_session( + sta, tid, WLAN_BACK_RECIPIENT, + WLAN_REASON_QSTA_TIMEOUT, true); + + if (test_and_clear_bit(tid, + sta->ampdu_mlme.tid_rx_stop_requested)) + ___ieee80211_stop_rx_ba_session( + sta, tid, WLAN_BACK_RECIPIENT, + WLAN_REASON_UNSPECIFIED, true); + + if (!blocked && + test_and_clear_bit(tid, + sta->ampdu_mlme.tid_rx_manage_offl)) + ___ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid, + IEEE80211_MAX_AMPDU_BUF_HT, + false, true, NULL); + + if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS, + sta->ampdu_mlme.tid_rx_manage_offl)) + ___ieee80211_stop_rx_ba_session( + sta, tid, WLAN_BACK_RECIPIENT, + 0, false); + + spin_lock_bh(&sta->lock); + + tid_tx = sta->ampdu_mlme.tid_start_tx[tid]; + if (!blocked && tid_tx) { + struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); + struct ieee80211_sub_if_data *sdata = + vif_to_sdata(txqi->txq.vif); + struct fq *fq = &sdata->local->fq; + + spin_lock_bh(&fq->lock); + + /* Allow only frags to be dequeued */ + set_bit(IEEE80211_TXQ_STOP, &txqi->flags); + + if (!skb_queue_empty(&txqi->frags)) { + /* Fragmented Tx is ongoing, wait for it to + * finish. Reschedule worker to retry later. + */ + + spin_unlock_bh(&fq->lock); + spin_unlock_bh(&sta->lock); + + /* Give the task working on the txq a chance + * to send out the queued frags + */ + synchronize_net(); + + mutex_unlock(&sta->ampdu_mlme.mtx); + + ieee80211_queue_work(&sdata->local->hw, work); + return; + } + + spin_unlock_bh(&fq->lock); + + /* + * Assign it over to the normal tid_tx array + * where it "goes live". + */ + + sta->ampdu_mlme.tid_start_tx[tid] = NULL; + /* could there be a race? */ + if (sta->ampdu_mlme.tid_tx[tid]) + kfree(tid_tx); + else + ieee80211_assign_tid_tx(sta, tid, tid_tx); + spin_unlock_bh(&sta->lock); + + ieee80211_tx_ba_session_handle_start(sta, tid); + continue; + } + spin_unlock_bh(&sta->lock); + + tid_tx = rcu_dereference_protected_tid_tx(sta, tid); + if (!tid_tx) + continue; + + if (!blocked && + test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state)) + ieee80211_start_tx_ba_cb(sta, tid, tid_tx); + if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state)) + ___ieee80211_stop_tx_ba_session(sta, tid, + AGG_STOP_LOCAL_REQUEST); + if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state)) + ieee80211_stop_tx_ba_cb(sta, tid, tid_tx); + } + mutex_unlock(&sta->ampdu_mlme.mtx); +} + +void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, + const u8 *da, u16 tid, + u16 initiator, u16 reason_code) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + u16 params; + + skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); + if (!skb) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom); + mgmt = skb_put_zero(skb, 24); + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + if (sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + sdata->vif.type == NL80211_IFTYPE_MESH_POINT) + memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); + else if (sdata->vif.type == NL80211_IFTYPE_STATION) + memcpy(mgmt->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN); + else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) + memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN); + + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + + skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba)); + + mgmt->u.action.category = WLAN_CATEGORY_BACK; + mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA; + params = (u16)(initiator << 11); /* bit 11 initiator */ + params |= (u16)(tid << 12); /* bit 15:12 TID number */ + + mgmt->u.action.u.delba.params = cpu_to_le16(params); + mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); + + ieee80211_tx_skb(sdata, skb); +} + +void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_mgmt *mgmt, size_t len) +{ + u16 tid, params; + u16 initiator; + + params = le16_to_cpu(mgmt->u.action.u.delba.params); + tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; + initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11; + + ht_dbg_ratelimited(sdata, "delba from %pM (%s) tid %d reason code %d\n", + mgmt->sa, initiator ? "initiator" : "recipient", + tid, + le16_to_cpu(mgmt->u.action.u.delba.reason_code)); + + if (initiator == WLAN_BACK_INITIATOR) + __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0, + true); + else + __ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_PEER_REQUEST); +} + +enum nl80211_smps_mode +ieee80211_smps_mode_to_smps_mode(enum ieee80211_smps_mode smps) +{ + switch (smps) { + case IEEE80211_SMPS_OFF: + return NL80211_SMPS_OFF; + case IEEE80211_SMPS_STATIC: + return NL80211_SMPS_STATIC; + case IEEE80211_SMPS_DYNAMIC: + return NL80211_SMPS_DYNAMIC; + default: + return NL80211_SMPS_OFF; + } +} + +int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, + enum ieee80211_smps_mode smps, const u8 *da, + const u8 *bssid) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *action_frame; + + /* 27 = header + category + action + smps mode */ + skb = dev_alloc_skb(27 + local->hw.extra_tx_headroom); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, local->hw.extra_tx_headroom); + action_frame = skb_put(skb, 27); + memcpy(action_frame->da, da, ETH_ALEN); + memcpy(action_frame->sa, sdata->dev->dev_addr, ETH_ALEN); + memcpy(action_frame->bssid, bssid, ETH_ALEN); + action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + action_frame->u.action.category = WLAN_CATEGORY_HT; + action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS; + switch (smps) { + case IEEE80211_SMPS_AUTOMATIC: + case IEEE80211_SMPS_NUM_MODES: + WARN_ON(1); + fallthrough; + case IEEE80211_SMPS_OFF: + action_frame->u.action.u.ht_smps.smps_control = + WLAN_HT_SMPS_CONTROL_DISABLED; + break; + case IEEE80211_SMPS_STATIC: + action_frame->u.action.u.ht_smps.smps_control = + WLAN_HT_SMPS_CONTROL_STATIC; + break; + case IEEE80211_SMPS_DYNAMIC: + action_frame->u.action.u.ht_smps.smps_control = + WLAN_HT_SMPS_CONTROL_DYNAMIC; + break; + } + + /* we'll do more on status of this frame */ + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + ieee80211_tx_skb(sdata, skb); + + return 0; +} + +void ieee80211_request_smps(struct ieee80211_vif *vif, unsigned int link_id, + enum ieee80211_smps_mode smps_mode) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_link_data *link; + + if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION)) + return; + + rcu_read_lock(); + link = rcu_dereference(sdata->link[link_id]); + if (WARN_ON(!link)) + goto out; + + if (link->u.mgd.driver_smps_mode == smps_mode) + goto out; + + link->u.mgd.driver_smps_mode = smps_mode; + wiphy_work_queue(sdata->local->hw.wiphy, + &link->u.mgd.request_smps_work); +out: + rcu_read_unlock(); +} +/* this might change ... don't want non-open drivers using it */ +EXPORT_SYMBOL_GPL(ieee80211_request_smps); diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c new file mode 100644 index 0000000000..5542c93edf --- /dev/null +++ b/net/mac80211/ibss.c @@ -0,0 +1,1892 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * IBSS mode implementation + * Copyright 2003-2008, Jouni Malinen <j@w1.fi> + * Copyright 2004, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2007, Michael Wu <flamingice@sourmilk.net> + * Copyright 2009, Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2018-2023 Intel Corporation + */ + +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/if_ether.h> +#include <linux/skbuff.h> +#include <linux/if_arp.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <net/mac80211.h> + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" + +#define IEEE80211_SCAN_INTERVAL (2 * HZ) +#define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) + +#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) +#define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) +#define IEEE80211_IBSS_RSN_INACTIVITY_LIMIT (10 * HZ) + +#define IEEE80211_IBSS_MAX_STA_ENTRIES 128 + +static struct beacon_data * +ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata, + const int beacon_int, const u32 basic_rates, + const u16 capability, u64 tsf, + struct cfg80211_chan_def *chandef, + bool *have_higher_than_11mbit, + struct cfg80211_csa_settings *csa_settings) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + int rates_n = 0, i, ri; + struct ieee80211_mgmt *mgmt; + u8 *pos; + struct ieee80211_supported_band *sband; + u32 rate_flags, rates = 0, rates_added = 0; + struct beacon_data *presp; + int frame_len; + int shift; + + /* Build IBSS probe response */ + frame_len = sizeof(struct ieee80211_hdr_3addr) + + 12 /* struct ieee80211_mgmt.u.beacon */ + + 2 + IEEE80211_MAX_SSID_LEN /* max SSID */ + + 2 + 8 /* max Supported Rates */ + + 3 /* max DS params */ + + 4 /* IBSS params */ + + 5 /* Channel Switch Announcement */ + + 2 + (IEEE80211_MAX_SUPP_RATES - 8) + + 2 + sizeof(struct ieee80211_ht_cap) + + 2 + sizeof(struct ieee80211_ht_operation) + + 2 + sizeof(struct ieee80211_vht_cap) + + 2 + sizeof(struct ieee80211_vht_operation) + + ifibss->ie_len; + presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL); + if (!presp) + return NULL; + + presp->head = (void *)(presp + 1); + + mgmt = (void *) presp->head; + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_RESP); + eth_broadcast_addr(mgmt->da); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); + mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int); + mgmt->u.beacon.timestamp = cpu_to_le64(tsf); + mgmt->u.beacon.capab_info = cpu_to_le16(capability); + + pos = (u8 *)mgmt + offsetof(struct ieee80211_mgmt, u.beacon.variable); + + *pos++ = WLAN_EID_SSID; + *pos++ = ifibss->ssid_len; + memcpy(pos, ifibss->ssid, ifibss->ssid_len); + pos += ifibss->ssid_len; + + sband = local->hw.wiphy->bands[chandef->chan->band]; + rate_flags = ieee80211_chandef_rate_flags(chandef); + shift = ieee80211_chandef_get_shift(chandef); + rates_n = 0; + if (have_higher_than_11mbit) + *have_higher_than_11mbit = false; + + for (i = 0; i < sband->n_bitrates; i++) { + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + if (sband->bitrates[i].bitrate > 110 && + have_higher_than_11mbit) + *have_higher_than_11mbit = true; + + rates |= BIT(i); + rates_n++; + } + + *pos++ = WLAN_EID_SUPP_RATES; + *pos++ = min_t(int, 8, rates_n); + for (ri = 0; ri < sband->n_bitrates; ri++) { + int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate, + 5 * (1 << shift)); + u8 basic = 0; + if (!(rates & BIT(ri))) + continue; + + if (basic_rates & BIT(ri)) + basic = 0x80; + *pos++ = basic | (u8) rate; + if (++rates_added == 8) { + ri++; /* continue at next rate for EXT_SUPP_RATES */ + break; + } + } + + if (sband->band == NL80211_BAND_2GHZ) { + *pos++ = WLAN_EID_DS_PARAMS; + *pos++ = 1; + *pos++ = ieee80211_frequency_to_channel( + chandef->chan->center_freq); + } + + *pos++ = WLAN_EID_IBSS_PARAMS; + *pos++ = 2; + /* FIX: set ATIM window based on scan results */ + *pos++ = 0; + *pos++ = 0; + + if (csa_settings) { + *pos++ = WLAN_EID_CHANNEL_SWITCH; + *pos++ = 3; + *pos++ = csa_settings->block_tx ? 1 : 0; + *pos++ = ieee80211_frequency_to_channel( + csa_settings->chandef.chan->center_freq); + presp->cntdwn_counter_offsets[0] = (pos - presp->head); + *pos++ = csa_settings->count; + presp->cntdwn_current_counter = csa_settings->count; + } + + /* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */ + if (rates_n > 8) { + *pos++ = WLAN_EID_EXT_SUPP_RATES; + *pos++ = rates_n - 8; + for (; ri < sband->n_bitrates; ri++) { + int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate, + 5 * (1 << shift)); + u8 basic = 0; + if (!(rates & BIT(ri))) + continue; + + if (basic_rates & BIT(ri)) + basic = 0x80; + *pos++ = basic | (u8) rate; + } + } + + if (ifibss->ie_len) { + memcpy(pos, ifibss->ie, ifibss->ie_len); + pos += ifibss->ie_len; + } + + /* add HT capability and information IEs */ + if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT && + chandef->width != NL80211_CHAN_WIDTH_5 && + chandef->width != NL80211_CHAN_WIDTH_10 && + sband->ht_cap.ht_supported) { + struct ieee80211_sta_ht_cap ht_cap; + + memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); + ieee80211_apply_htcap_overrides(sdata, &ht_cap); + + pos = ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap); + /* + * Note: According to 802.11n-2009 9.13.3.1, HT Protection + * field and RIFS Mode are reserved in IBSS mode, therefore + * keep them at 0 + */ + pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap, + chandef, 0, false); + + /* add VHT capability and information IEs */ + if (chandef->width != NL80211_CHAN_WIDTH_20 && + chandef->width != NL80211_CHAN_WIDTH_40 && + sband->vht_cap.vht_supported) { + pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap, + sband->vht_cap.cap); + pos = ieee80211_ie_build_vht_oper(pos, &sband->vht_cap, + chandef); + } + } + + if (local->hw.queues >= IEEE80211_NUM_ACS) + pos = ieee80211_add_wmm_info_ie(pos, 0); /* U-APSD not in use */ + + presp->head_len = pos - presp->head; + if (WARN_ON(presp->head_len > frame_len)) + goto error; + + return presp; +error: + kfree(presp); + return NULL; +} + +static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, + const u8 *bssid, const int beacon_int, + struct cfg80211_chan_def *req_chandef, + const u32 basic_rates, + const u16 capability, u64 tsf, + bool creator) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + struct ieee80211_mgmt *mgmt; + struct cfg80211_bss *bss; + u64 bss_change; + struct cfg80211_chan_def chandef; + struct ieee80211_channel *chan; + struct beacon_data *presp; + struct cfg80211_inform_bss bss_meta = {}; + bool have_higher_than_11mbit; + bool radar_required; + int err; + + sdata_assert_lock(sdata); + + /* Reset own TSF to allow time synchronization work. */ + drv_reset_tsf(local, sdata); + + if (!ether_addr_equal(ifibss->bssid, bssid)) + sta_info_flush(sdata); + + /* if merging, indicate to driver that we leave the old IBSS */ + if (sdata->vif.cfg.ibss_joined) { + sdata->vif.cfg.ibss_joined = false; + sdata->vif.cfg.ibss_creator = false; + sdata->vif.bss_conf.enable_beacon = false; + netif_carrier_off(sdata->dev); + ieee80211_bss_info_change_notify(sdata, + BSS_CHANGED_IBSS | + BSS_CHANGED_BEACON_ENABLED); + drv_leave_ibss(local, sdata); + } + + presp = sdata_dereference(ifibss->presp, sdata); + RCU_INIT_POINTER(ifibss->presp, NULL); + if (presp) + kfree_rcu(presp, rcu_head); + + /* make a copy of the chandef, it could be modified below. */ + chandef = *req_chandef; + chan = chandef.chan; + if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef, + NL80211_IFTYPE_ADHOC)) { + if (chandef.width == NL80211_CHAN_WIDTH_5 || + chandef.width == NL80211_CHAN_WIDTH_10 || + chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + chandef.width == NL80211_CHAN_WIDTH_20) { + sdata_info(sdata, + "Failed to join IBSS, beacons forbidden\n"); + return; + } + chandef.width = NL80211_CHAN_WIDTH_20; + chandef.center_freq1 = chan->center_freq; + /* check again for downgraded chandef */ + if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef, + NL80211_IFTYPE_ADHOC)) { + sdata_info(sdata, + "Failed to join IBSS, beacons forbidden\n"); + return; + } + } + + err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, + &chandef, NL80211_IFTYPE_ADHOC); + if (err < 0) { + sdata_info(sdata, + "Failed to join IBSS, invalid chandef\n"); + return; + } + if (err > 0 && !ifibss->userspace_handles_dfs) { + sdata_info(sdata, + "Failed to join IBSS, DFS channel without control program\n"); + return; + } + + radar_required = err; + + mutex_lock(&local->mtx); + if (ieee80211_link_use_channel(&sdata->deflink, &chandef, + ifibss->fixed_channel ? + IEEE80211_CHANCTX_SHARED : + IEEE80211_CHANCTX_EXCLUSIVE)) { + sdata_info(sdata, "Failed to join IBSS, no channel context\n"); + mutex_unlock(&local->mtx); + return; + } + sdata->deflink.radar_required = radar_required; + mutex_unlock(&local->mtx); + + memcpy(ifibss->bssid, bssid, ETH_ALEN); + + presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates, + capability, tsf, &chandef, + &have_higher_than_11mbit, NULL); + if (!presp) + return; + + rcu_assign_pointer(ifibss->presp, presp); + mgmt = (void *)presp->head; + + sdata->vif.bss_conf.enable_beacon = true; + sdata->vif.bss_conf.beacon_int = beacon_int; + sdata->vif.bss_conf.basic_rates = basic_rates; + sdata->vif.cfg.ssid_len = ifibss->ssid_len; + memcpy(sdata->vif.cfg.ssid, ifibss->ssid, ifibss->ssid_len); + bss_change = BSS_CHANGED_BEACON_INT; + bss_change |= ieee80211_reset_erp_info(sdata); + bss_change |= BSS_CHANGED_BSSID; + bss_change |= BSS_CHANGED_BEACON; + bss_change |= BSS_CHANGED_BEACON_ENABLED; + bss_change |= BSS_CHANGED_BASIC_RATES; + bss_change |= BSS_CHANGED_HT; + bss_change |= BSS_CHANGED_IBSS; + bss_change |= BSS_CHANGED_SSID; + + /* + * In 5 GHz/802.11a, we can always use short slot time. + * (IEEE 802.11-2012 18.3.8.7) + * + * In 2.4GHz, we must always use long slots in IBSS for compatibility + * reasons. + * (IEEE 802.11-2012 19.4.5) + * + * HT follows these specifications (IEEE 802.11-2012 20.3.18) + */ + sdata->vif.bss_conf.use_short_slot = chan->band == NL80211_BAND_5GHZ; + bss_change |= BSS_CHANGED_ERP_SLOT; + + /* cf. IEEE 802.11 9.2.12 */ + sdata->deflink.operating_11g_mode = + chan->band == NL80211_BAND_2GHZ && have_higher_than_11mbit; + + ieee80211_set_wmm_default(&sdata->deflink, true, false); + + sdata->vif.cfg.ibss_joined = true; + sdata->vif.cfg.ibss_creator = creator; + + err = drv_join_ibss(local, sdata); + if (err) { + sdata->vif.cfg.ibss_joined = false; + sdata->vif.cfg.ibss_creator = false; + sdata->vif.bss_conf.enable_beacon = false; + sdata->vif.cfg.ssid_len = 0; + RCU_INIT_POINTER(ifibss->presp, NULL); + kfree_rcu(presp, rcu_head); + mutex_lock(&local->mtx); + ieee80211_link_release_channel(&sdata->deflink); + mutex_unlock(&local->mtx); + sdata_info(sdata, "Failed to join IBSS, driver failure: %d\n", + err); + return; + } + + ieee80211_bss_info_change_notify(sdata, bss_change); + + ifibss->state = IEEE80211_IBSS_MLME_JOINED; + mod_timer(&ifibss->timer, + round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); + + bss_meta.chan = chan; + bss_meta.scan_width = cfg80211_chandef_to_scan_width(&chandef); + bss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta, mgmt, + presp->head_len, GFP_KERNEL); + + cfg80211_put_bss(local->hw.wiphy, bss); + netif_carrier_on(sdata->dev); + cfg80211_ibss_joined(sdata->dev, ifibss->bssid, chan, GFP_KERNEL); +} + +static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss *bss) +{ + struct cfg80211_bss *cbss = + container_of((void *)bss, struct cfg80211_bss, priv); + struct ieee80211_supported_band *sband; + struct cfg80211_chan_def chandef; + u32 basic_rates; + int i, j; + u16 beacon_int = cbss->beacon_interval; + const struct cfg80211_bss_ies *ies; + enum nl80211_channel_type chan_type; + u64 tsf; + u32 rate_flags; + int shift; + + sdata_assert_lock(sdata); + + if (beacon_int < 10) + beacon_int = 10; + + switch (sdata->u.ibss.chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_40: + chan_type = cfg80211_get_chandef_type(&sdata->u.ibss.chandef); + cfg80211_chandef_create(&chandef, cbss->channel, chan_type); + break; + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: + cfg80211_chandef_create(&chandef, cbss->channel, + NL80211_CHAN_NO_HT); + chandef.width = sdata->u.ibss.chandef.width; + break; + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + chandef = sdata->u.ibss.chandef; + chandef.chan = cbss->channel; + break; + default: + /* fall back to 20 MHz for unsupported modes */ + cfg80211_chandef_create(&chandef, cbss->channel, + NL80211_CHAN_NO_HT); + break; + } + + sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; + rate_flags = ieee80211_chandef_rate_flags(&sdata->u.ibss.chandef); + shift = ieee80211_vif_get_shift(&sdata->vif); + + basic_rates = 0; + + for (i = 0; i < bss->supp_rates_len; i++) { + int rate = bss->supp_rates[i] & 0x7f; + bool is_basic = !!(bss->supp_rates[i] & 0x80); + + for (j = 0; j < sband->n_bitrates; j++) { + int brate; + if ((rate_flags & sband->bitrates[j].flags) + != rate_flags) + continue; + + brate = DIV_ROUND_UP(sband->bitrates[j].bitrate, + 5 * (1 << shift)); + if (brate == rate) { + if (is_basic) + basic_rates |= BIT(j); + break; + } + } + } + + rcu_read_lock(); + ies = rcu_dereference(cbss->ies); + tsf = ies->tsf; + rcu_read_unlock(); + + __ieee80211_sta_join_ibss(sdata, cbss->bssid, + beacon_int, + &chandef, + basic_rates, + cbss->capability, + tsf, false); +} + +int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata, + struct cfg80211_csa_settings *csa_settings, + u64 *changed) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct beacon_data *presp, *old_presp; + struct cfg80211_bss *cbss; + const struct cfg80211_bss_ies *ies; + u16 capability = WLAN_CAPABILITY_IBSS; + u64 tsf; + + sdata_assert_lock(sdata); + + if (ifibss->privacy) + capability |= WLAN_CAPABILITY_PRIVACY; + + cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan, + ifibss->bssid, ifibss->ssid, + ifibss->ssid_len, IEEE80211_BSS_TYPE_IBSS, + IEEE80211_PRIVACY(ifibss->privacy)); + + if (WARN_ON(!cbss)) + return -EINVAL; + + rcu_read_lock(); + ies = rcu_dereference(cbss->ies); + tsf = ies->tsf; + rcu_read_unlock(); + cfg80211_put_bss(sdata->local->hw.wiphy, cbss); + + old_presp = sdata_dereference(ifibss->presp, sdata); + + presp = ieee80211_ibss_build_presp(sdata, + sdata->vif.bss_conf.beacon_int, + sdata->vif.bss_conf.basic_rates, + capability, tsf, &ifibss->chandef, + NULL, csa_settings); + if (!presp) + return -ENOMEM; + + rcu_assign_pointer(ifibss->presp, presp); + if (old_presp) + kfree_rcu(old_presp, rcu_head); + + *changed |= BSS_CHANGED_BEACON; + return 0; +} + +int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct cfg80211_bss *cbss; + + sdata_assert_lock(sdata); + + /* When not connected/joined, sending CSA doesn't make sense. */ + if (ifibss->state != IEEE80211_IBSS_MLME_JOINED) + return -ENOLINK; + + /* update cfg80211 bss information with the new channel */ + if (!is_zero_ether_addr(ifibss->bssid)) { + cbss = cfg80211_get_bss(sdata->local->hw.wiphy, + ifibss->chandef.chan, + ifibss->bssid, ifibss->ssid, + ifibss->ssid_len, + IEEE80211_BSS_TYPE_IBSS, + IEEE80211_PRIVACY(ifibss->privacy)); + /* XXX: should not really modify cfg80211 data */ + if (cbss) { + cbss->channel = sdata->deflink.csa_chandef.chan; + cfg80211_put_bss(sdata->local->hw.wiphy, cbss); + } + } + + ifibss->chandef = sdata->deflink.csa_chandef; + + /* generate the beacon */ + return ieee80211_ibss_csa_beacon(sdata, NULL, changed); +} + +void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + + wiphy_work_cancel(sdata->local->hw.wiphy, + &ifibss->csa_connection_drop_work); +} + +static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta) + __acquires(RCU) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + u8 addr[ETH_ALEN]; + + memcpy(addr, sta->sta.addr, ETH_ALEN); + + ibss_dbg(sdata, "Adding new IBSS station %pM\n", addr); + + sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); + sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); + /* authorize the station only if the network is not RSN protected. If + * not wait for the userspace to authorize it */ + if (!sta->sdata->u.ibss.control_port) + sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED); + + rate_control_rate_init(sta); + + /* If it fails, maybe we raced another insertion? */ + if (sta_info_insert_rcu(sta)) + return sta_info_get(sdata, addr); + return sta; +} + +static struct sta_info * +ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, + const u8 *addr, u32 supp_rates) + __acquires(RCU) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_supported_band *sband; + enum nl80211_bss_scan_width scan_width; + int band; + + /* + * XXX: Consider removing the least recently used entry and + * allow new one to be added. + */ + if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { + net_info_ratelimited("%s: No room for a new IBSS STA entry %pM\n", + sdata->name, addr); + rcu_read_lock(); + return NULL; + } + + if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) { + rcu_read_lock(); + return NULL; + } + + if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) { + rcu_read_lock(); + return NULL; + } + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + if (WARN_ON_ONCE(!chanctx_conf)) + return NULL; + band = chanctx_conf->def.chan->band; + scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def); + rcu_read_unlock(); + + sta = sta_info_alloc(sdata, addr, GFP_KERNEL); + if (!sta) { + rcu_read_lock(); + return NULL; + } + + /* make sure mandatory rates are always added */ + sband = local->hw.wiphy->bands[band]; + sta->sta.deflink.supp_rates[band] = supp_rates | + ieee80211_mandatory_rates(sband, scan_width); + + return ieee80211_ibss_finish_sta(sta); +} + +static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + int active = 0; + struct sta_info *sta; + + sdata_assert_lock(sdata); + + rcu_read_lock(); + + list_for_each_entry_rcu(sta, &local->sta_list, list) { + unsigned long last_active = ieee80211_sta_last_active(sta); + + if (sta->sdata == sdata && + time_is_after_jiffies(last_active + + IEEE80211_IBSS_MERGE_INTERVAL)) { + active++; + break; + } + } + + rcu_read_unlock(); + + return active; +} + +static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + struct cfg80211_bss *cbss; + struct beacon_data *presp; + struct sta_info *sta; + + if (!is_zero_ether_addr(ifibss->bssid)) { + cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->chandef.chan, + ifibss->bssid, ifibss->ssid, + ifibss->ssid_len, + IEEE80211_BSS_TYPE_IBSS, + IEEE80211_PRIVACY(ifibss->privacy)); + + if (cbss) { + cfg80211_unlink_bss(local->hw.wiphy, cbss); + cfg80211_put_bss(sdata->local->hw.wiphy, cbss); + } + } + + ifibss->state = IEEE80211_IBSS_MLME_SEARCH; + + sta_info_flush(sdata); + + spin_lock_bh(&ifibss->incomplete_lock); + while (!list_empty(&ifibss->incomplete_stations)) { + sta = list_first_entry(&ifibss->incomplete_stations, + struct sta_info, list); + list_del(&sta->list); + spin_unlock_bh(&ifibss->incomplete_lock); + + sta_info_free(local, sta); + spin_lock_bh(&ifibss->incomplete_lock); + } + spin_unlock_bh(&ifibss->incomplete_lock); + + netif_carrier_off(sdata->dev); + + sdata->vif.cfg.ibss_joined = false; + sdata->vif.cfg.ibss_creator = false; + sdata->vif.bss_conf.enable_beacon = false; + sdata->vif.cfg.ssid_len = 0; + + /* remove beacon */ + presp = sdata_dereference(ifibss->presp, sdata); + RCU_INIT_POINTER(sdata->u.ibss.presp, NULL); + if (presp) + kfree_rcu(presp, rcu_head); + + clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | + BSS_CHANGED_IBSS); + drv_leave_ibss(local, sdata); + mutex_lock(&local->mtx); + ieee80211_link_release_channel(&sdata->deflink); + mutex_unlock(&local->mtx); +} + +static void ieee80211_csa_connection_drop_work(struct wiphy *wiphy, + struct wiphy_work *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, + u.ibss.csa_connection_drop_work); + + sdata_lock(sdata); + + ieee80211_ibss_disconnect(sdata); + synchronize_rcu(); + skb_queue_purge(&sdata->skb_queue); + + /* trigger a scan to find another IBSS network to join */ + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); + + sdata_unlock(sdata); +} + +static void ieee80211_ibss_csa_mark_radar(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + int err; + + /* if the current channel is a DFS channel, mark the channel as + * unavailable. + */ + err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, + &ifibss->chandef, + NL80211_IFTYPE_ADHOC); + if (err > 0) + cfg80211_radar_event(sdata->local->hw.wiphy, &ifibss->chandef, + GFP_ATOMIC); +} + +static bool +ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *elems, + bool beacon) +{ + struct cfg80211_csa_settings params; + struct ieee80211_csa_ie csa_ie; + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + enum nl80211_channel_type ch_type; + int err; + ieee80211_conn_flags_t conn_flags; + u32 vht_cap_info = 0; + + sdata_assert_lock(sdata); + + conn_flags = IEEE80211_CONN_DISABLE_VHT; + + switch (ifibss->chandef.width) { + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: + case NL80211_CHAN_WIDTH_20_NOHT: + conn_flags |= IEEE80211_CONN_DISABLE_HT; + fallthrough; + case NL80211_CHAN_WIDTH_20: + conn_flags |= IEEE80211_CONN_DISABLE_40MHZ; + break; + default: + break; + } + + if (elems->vht_cap_elem) + vht_cap_info = le32_to_cpu(elems->vht_cap_elem->vht_cap_info); + + memset(¶ms, 0, sizeof(params)); + err = ieee80211_parse_ch_switch_ie(sdata, elems, + ifibss->chandef.chan->band, + vht_cap_info, + conn_flags, ifibss->bssid, &csa_ie); + /* can't switch to destination channel, fail */ + if (err < 0) + goto disconnect; + + /* did not contain a CSA */ + if (err) + return false; + + /* channel switch is not supported, disconnect */ + if (!(sdata->local->hw.wiphy->flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)) + goto disconnect; + + params.count = csa_ie.count; + params.chandef = csa_ie.chandef; + + switch (ifibss->chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_40: + /* keep our current HT mode (HT20/HT40+/HT40-), even if + * another mode has been announced. The mode is not adopted + * within the beacon while doing CSA and we should therefore + * keep the mode which we announce. + */ + ch_type = cfg80211_get_chandef_type(&ifibss->chandef); + cfg80211_chandef_create(¶ms.chandef, params.chandef.chan, + ch_type); + break; + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: + if (params.chandef.width != ifibss->chandef.width) { + sdata_info(sdata, + "IBSS %pM received channel switch from incompatible channel width (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", + ifibss->bssid, + params.chandef.chan->center_freq, + params.chandef.width, + params.chandef.center_freq1, + params.chandef.center_freq2); + goto disconnect; + } + break; + default: + /* should not happen, conn_flags should prevent VHT modes. */ + WARN_ON(1); + goto disconnect; + } + + if (!cfg80211_reg_can_beacon(sdata->local->hw.wiphy, ¶ms.chandef, + NL80211_IFTYPE_ADHOC)) { + sdata_info(sdata, + "IBSS %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", + ifibss->bssid, + params.chandef.chan->center_freq, + params.chandef.width, + params.chandef.center_freq1, + params.chandef.center_freq2); + goto disconnect; + } + + err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, + ¶ms.chandef, + NL80211_IFTYPE_ADHOC); + if (err < 0) + goto disconnect; + if (err > 0 && !ifibss->userspace_handles_dfs) { + /* IBSS-DFS only allowed with a control program */ + goto disconnect; + } + + params.radar_required = err; + + if (cfg80211_chandef_identical(¶ms.chandef, + &sdata->vif.bss_conf.chandef)) { + ibss_dbg(sdata, + "received csa with an identical chandef, ignoring\n"); + return true; + } + + /* all checks done, now perform the channel switch. */ + ibss_dbg(sdata, + "received channel switch announcement to go to channel %d MHz\n", + params.chandef.chan->center_freq); + + params.block_tx = !!csa_ie.mode; + + if (ieee80211_channel_switch(sdata->local->hw.wiphy, sdata->dev, + ¶ms)) + goto disconnect; + + ieee80211_ibss_csa_mark_radar(sdata); + + return true; +disconnect: + ibss_dbg(sdata, "Can't handle channel switch, disconnect\n"); + wiphy_work_queue(sdata->local->hw.wiphy, + &ifibss->csa_connection_drop_work); + + ieee80211_ibss_csa_mark_radar(sdata); + + return true; +} + +static void +ieee80211_rx_mgmt_spectrum_mgmt(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee80211_rx_status *rx_status, + struct ieee802_11_elems *elems) +{ + int required_len; + + if (len < IEEE80211_MIN_ACTION_SIZE + 1) + return; + + /* CSA is the only action we handle for now */ + if (mgmt->u.action.u.measurement.action_code != + WLAN_ACTION_SPCT_CHL_SWITCH) + return; + + required_len = IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.chan_switch); + if (len < required_len) + return; + + if (!sdata->vif.bss_conf.csa_active) + ieee80211_ibss_process_chanswitch(sdata, elems, false); +} + +static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len) +{ + u16 reason = le16_to_cpu(mgmt->u.deauth.reason_code); + + if (len < IEEE80211_DEAUTH_FRAME_LEN) + return; + + ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); + ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason); + sta_info_destroy_addr(sdata, mgmt->sa); +} + +static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len) +{ + u16 auth_alg, auth_transaction; + + sdata_assert_lock(sdata); + + if (len < 24 + 6) + return; + + auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); + auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); + + ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); + ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n", + mgmt->bssid, auth_transaction); + + if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) + return; + + /* + * IEEE 802.11 standard does not require authentication in IBSS + * networks and most implementations do not seem to use it. + * However, try to reply to authentication attempts if someone + * has actually implemented this. + */ + ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, 0, NULL, 0, + mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0, 0); +} + +static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee80211_rx_status *rx_status, + struct ieee802_11_elems *elems, + struct ieee80211_channel *channel) +{ + struct sta_info *sta; + enum nl80211_band band = rx_status->band; + enum nl80211_bss_scan_width scan_width; + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + bool rates_updated = false; + u32 supp_rates = 0; + + if (sdata->vif.type != NL80211_IFTYPE_ADHOC) + return; + + if (!ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid)) + return; + + sband = local->hw.wiphy->bands[band]; + if (WARN_ON(!sband)) + return; + + rcu_read_lock(); + sta = sta_info_get(sdata, mgmt->sa); + + if (elems->supp_rates) { + supp_rates = ieee80211_sta_get_rates(sdata, elems, + band, NULL); + if (sta) { + u32 prev_rates; + + prev_rates = sta->sta.deflink.supp_rates[band]; + /* make sure mandatory rates are always added */ + scan_width = NL80211_BSS_CHAN_WIDTH_20; + if (rx_status->bw == RATE_INFO_BW_5) + scan_width = NL80211_BSS_CHAN_WIDTH_5; + else if (rx_status->bw == RATE_INFO_BW_10) + scan_width = NL80211_BSS_CHAN_WIDTH_10; + + sta->sta.deflink.supp_rates[band] = supp_rates | + ieee80211_mandatory_rates(sband, scan_width); + if (sta->sta.deflink.supp_rates[band] != prev_rates) { + ibss_dbg(sdata, + "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n", + sta->sta.addr, prev_rates, + sta->sta.deflink.supp_rates[band]); + rates_updated = true; + } + } else { + rcu_read_unlock(); + sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, + mgmt->sa, supp_rates); + } + } + + if (sta && !sta->sta.wme && + (elems->wmm_info || elems->s1g_capab) && + local->hw.queues >= IEEE80211_NUM_ACS) { + sta->sta.wme = true; + ieee80211_check_fast_xmit(sta); + } + + if (sta && elems->ht_operation && elems->ht_cap_elem && + sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT && + sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_5 && + sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_10) { + /* we both use HT */ + struct ieee80211_ht_cap htcap_ie; + struct cfg80211_chan_def chandef; + enum ieee80211_sta_rx_bandwidth bw = sta->sta.deflink.bandwidth; + + cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); + ieee80211_chandef_ht_oper(elems->ht_operation, &chandef); + + memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie)); + rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, + &htcap_ie, + &sta->deflink); + + if (elems->vht_operation && elems->vht_cap_elem && + sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20 && + sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_40) { + /* we both use VHT */ + struct ieee80211_vht_cap cap_ie; + struct ieee80211_sta_vht_cap cap = sta->sta.deflink.vht_cap; + u32 vht_cap_info = + le32_to_cpu(elems->vht_cap_elem->vht_cap_info); + + ieee80211_chandef_vht_oper(&local->hw, vht_cap_info, + elems->vht_operation, + elems->ht_operation, + &chandef); + memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie)); + ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, + &cap_ie, NULL, + &sta->deflink); + if (memcmp(&cap, &sta->sta.deflink.vht_cap, sizeof(cap))) + rates_updated |= true; + } + + if (bw != sta->sta.deflink.bandwidth) + rates_updated |= true; + + if (!cfg80211_chandef_compatible(&sdata->u.ibss.chandef, + &chandef)) + WARN_ON_ONCE(1); + } + + if (sta && rates_updated) { + u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED; + u8 rx_nss = sta->sta.deflink.rx_nss; + + /* Force rx_nss recalculation */ + sta->sta.deflink.rx_nss = 0; + rate_control_rate_init(sta); + if (sta->sta.deflink.rx_nss != rx_nss) + changed |= IEEE80211_RC_NSS_CHANGED; + + drv_sta_rc_update(local, sdata, &sta->sta, changed); + } + + rcu_read_unlock(); +} + +static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee80211_rx_status *rx_status, + struct ieee802_11_elems *elems) +{ + struct ieee80211_local *local = sdata->local; + struct cfg80211_bss *cbss; + struct ieee80211_bss *bss; + struct ieee80211_channel *channel; + u64 beacon_timestamp, rx_timestamp; + u32 supp_rates = 0; + enum nl80211_band band = rx_status->band; + + channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq); + if (!channel) + return; + + ieee80211_update_sta_info(sdata, mgmt, len, rx_status, elems, channel); + + bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, channel); + if (!bss) + return; + + cbss = container_of((void *)bss, struct cfg80211_bss, priv); + + /* same for beacon and probe response */ + beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); + + /* check if we need to merge IBSS */ + + /* not an IBSS */ + if (!(cbss->capability & WLAN_CAPABILITY_IBSS)) + goto put_bss; + + /* different channel */ + if (sdata->u.ibss.fixed_channel && + sdata->u.ibss.chandef.chan != cbss->channel) + goto put_bss; + + /* different SSID */ + if (elems->ssid_len != sdata->u.ibss.ssid_len || + memcmp(elems->ssid, sdata->u.ibss.ssid, + sdata->u.ibss.ssid_len)) + goto put_bss; + + /* process channel switch */ + if (sdata->vif.bss_conf.csa_active || + ieee80211_ibss_process_chanswitch(sdata, elems, true)) + goto put_bss; + + /* same BSSID */ + if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid)) + goto put_bss; + + /* we use a fixed BSSID */ + if (sdata->u.ibss.fixed_bssid) + goto put_bss; + + if (ieee80211_have_rx_timestamp(rx_status)) { + /* time when timestamp field was received */ + rx_timestamp = + ieee80211_calculate_rx_timestamp(local, rx_status, + len + FCS_LEN, 24); + } else { + /* + * second best option: get current TSF + * (will return -1 if not supported) + */ + rx_timestamp = drv_get_tsf(local, sdata); + } + + ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n", + mgmt->sa, mgmt->bssid, + (unsigned long long)rx_timestamp); + ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n", + (unsigned long long)beacon_timestamp, + (unsigned long long)(rx_timestamp - beacon_timestamp), + jiffies); + + if (beacon_timestamp > rx_timestamp) { + ibss_dbg(sdata, + "beacon TSF higher than local TSF - IBSS merge with BSSID %pM\n", + mgmt->bssid); + ieee80211_sta_join_ibss(sdata, bss); + supp_rates = ieee80211_sta_get_rates(sdata, elems, band, NULL); + ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, + supp_rates); + rcu_read_unlock(); + } + + put_bss: + ieee80211_rx_bss_put(local, bss); +} + +void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, + const u8 *bssid, const u8 *addr, + u32 supp_rates) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_supported_band *sband; + enum nl80211_bss_scan_width scan_width; + int band; + + /* + * XXX: Consider removing the least recently used entry and + * allow new one to be added. + */ + if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { + net_info_ratelimited("%s: No room for a new IBSS STA entry %pM\n", + sdata->name, addr); + return; + } + + if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) + return; + + if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) + return; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + if (WARN_ON_ONCE(!chanctx_conf)) { + rcu_read_unlock(); + return; + } + band = chanctx_conf->def.chan->band; + scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def); + rcu_read_unlock(); + + sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); + if (!sta) + return; + + /* make sure mandatory rates are always added */ + sband = local->hw.wiphy->bands[band]; + sta->sta.deflink.supp_rates[band] = supp_rates | + ieee80211_mandatory_rates(sband, scan_width); + + spin_lock(&ifibss->incomplete_lock); + list_add(&sta->list, &ifibss->incomplete_stations); + spin_unlock(&ifibss->incomplete_lock); + wiphy_work_queue(local->hw.wiphy, &sdata->work); +} + +static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta, *tmp; + unsigned long exp_time = IEEE80211_IBSS_INACTIVITY_LIMIT; + unsigned long exp_rsn = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT; + + mutex_lock(&local->sta_mtx); + + list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { + unsigned long last_active = ieee80211_sta_last_active(sta); + + if (sdata != sta->sdata) + continue; + + if (time_is_before_jiffies(last_active + exp_time) || + (time_is_before_jiffies(last_active + exp_rsn) && + sta->sta_state != IEEE80211_STA_AUTHORIZED)) { + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + + sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n", + sta->sta_state != IEEE80211_STA_AUTHORIZED ? + "not authorized " : "", sta->sta.addr); + + ieee80211_send_deauth_disassoc(sdata, sta->sta.addr, + ifibss->bssid, + IEEE80211_STYPE_DEAUTH, + WLAN_REASON_DEAUTH_LEAVING, + true, frame_buf); + WARN_ON(__sta_info_destroy(sta)); + } + } + + mutex_unlock(&local->sta_mtx); +} + +/* + * This function is called with state == IEEE80211_IBSS_MLME_JOINED + */ + +static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + enum nl80211_bss_scan_width scan_width; + + sdata_assert_lock(sdata); + + mod_timer(&ifibss->timer, + round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); + + ieee80211_ibss_sta_expire(sdata); + + if (time_before(jiffies, ifibss->last_scan_completed + + IEEE80211_IBSS_MERGE_INTERVAL)) + return; + + if (ieee80211_sta_active_ibss(sdata)) + return; + + if (ifibss->fixed_channel) + return; + + sdata_info(sdata, + "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n"); + + scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef); + ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len, + NULL, 0, scan_width); +} + +static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + u8 bssid[ETH_ALEN]; + u16 capability; + int i; + + sdata_assert_lock(sdata); + + if (ifibss->fixed_bssid) { + memcpy(bssid, ifibss->bssid, ETH_ALEN); + } else { + /* Generate random, not broadcast, locally administered BSSID. Mix in + * own MAC address to make sure that devices that do not have proper + * random number generator get different BSSID. */ + get_random_bytes(bssid, ETH_ALEN); + for (i = 0; i < ETH_ALEN; i++) + bssid[i] ^= sdata->vif.addr[i]; + bssid[0] &= ~0x01; + bssid[0] |= 0x02; + } + + sdata_info(sdata, "Creating new IBSS network, BSSID %pM\n", bssid); + + capability = WLAN_CAPABILITY_IBSS; + + if (ifibss->privacy) + capability |= WLAN_CAPABILITY_PRIVACY; + + __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, + &ifibss->chandef, ifibss->basic_rates, + capability, 0, true); +} + +static unsigned int ibss_setup_channels(struct wiphy *wiphy, + struct ieee80211_channel **channels, + unsigned int channels_max, + u32 center_freq, u32 width) +{ + struct ieee80211_channel *chan = NULL; + unsigned int n_chan = 0; + u32 start_freq, end_freq, freq; + + if (width <= 20) { + start_freq = center_freq; + end_freq = center_freq; + } else { + start_freq = center_freq - width / 2 + 10; + end_freq = center_freq + width / 2 - 10; + } + + for (freq = start_freq; freq <= end_freq; freq += 20) { + chan = ieee80211_get_channel(wiphy, freq); + if (!chan) + continue; + if (n_chan >= channels_max) + return n_chan; + + channels[n_chan] = chan; + n_chan++; + } + + return n_chan; +} + +static unsigned int +ieee80211_ibss_setup_scan_channels(struct wiphy *wiphy, + const struct cfg80211_chan_def *chandef, + struct ieee80211_channel **channels, + unsigned int channels_max) +{ + unsigned int n_chan = 0; + u32 width, cf1, cf2 = 0; + + switch (chandef->width) { + case NL80211_CHAN_WIDTH_40: + width = 40; + break; + case NL80211_CHAN_WIDTH_80P80: + cf2 = chandef->center_freq2; + fallthrough; + case NL80211_CHAN_WIDTH_80: + width = 80; + break; + case NL80211_CHAN_WIDTH_160: + width = 160; + break; + default: + width = 20; + break; + } + + cf1 = chandef->center_freq1; + + n_chan = ibss_setup_channels(wiphy, channels, channels_max, cf1, width); + + if (cf2) + n_chan += ibss_setup_channels(wiphy, &channels[n_chan], + channels_max - n_chan, cf2, + width); + + return n_chan; +} + +/* + * This function is called with state == IEEE80211_IBSS_MLME_SEARCH + */ + +static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + struct cfg80211_bss *cbss; + struct ieee80211_channel *chan = NULL; + const u8 *bssid = NULL; + enum nl80211_bss_scan_width scan_width; + int active_ibss; + + sdata_assert_lock(sdata); + + active_ibss = ieee80211_sta_active_ibss(sdata); + ibss_dbg(sdata, "sta_find_ibss (active_ibss=%d)\n", active_ibss); + + if (active_ibss) + return; + + if (ifibss->fixed_bssid) + bssid = ifibss->bssid; + if (ifibss->fixed_channel) + chan = ifibss->chandef.chan; + if (!is_zero_ether_addr(ifibss->bssid)) + bssid = ifibss->bssid; + cbss = cfg80211_get_bss(local->hw.wiphy, chan, bssid, + ifibss->ssid, ifibss->ssid_len, + IEEE80211_BSS_TYPE_IBSS, + IEEE80211_PRIVACY(ifibss->privacy)); + + if (cbss) { + struct ieee80211_bss *bss; + + bss = (void *)cbss->priv; + ibss_dbg(sdata, + "sta_find_ibss: selected %pM current %pM\n", + cbss->bssid, ifibss->bssid); + sdata_info(sdata, + "Selected IBSS BSSID %pM based on configured SSID\n", + cbss->bssid); + + ieee80211_sta_join_ibss(sdata, bss); + ieee80211_rx_bss_put(local, bss); + return; + } + + /* if a fixed bssid and a fixed freq have been provided create the IBSS + * directly and do not waste time scanning + */ + if (ifibss->fixed_bssid && ifibss->fixed_channel) { + sdata_info(sdata, "Created IBSS using preconfigured BSSID %pM\n", + bssid); + ieee80211_sta_create_ibss(sdata); + return; + } + + + ibss_dbg(sdata, "sta_find_ibss: did not try to join ibss\n"); + + /* Selected IBSS not found in current scan results - try to scan */ + if (time_after(jiffies, ifibss->last_scan_completed + + IEEE80211_SCAN_INTERVAL)) { + struct ieee80211_channel *channels[8]; + unsigned int num; + + sdata_info(sdata, "Trigger new scan to find an IBSS to join\n"); + + scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef); + + if (ifibss->fixed_channel) { + num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy, + &ifibss->chandef, + channels, + ARRAY_SIZE(channels)); + ieee80211_request_ibss_scan(sdata, ifibss->ssid, + ifibss->ssid_len, channels, + num, scan_width); + } else { + ieee80211_request_ibss_scan(sdata, ifibss->ssid, + ifibss->ssid_len, NULL, + 0, scan_width); + } + } else { + int interval = IEEE80211_SCAN_INTERVAL; + + if (time_after(jiffies, ifibss->ibss_join_req + + IEEE80211_IBSS_JOIN_TIMEOUT)) + ieee80211_sta_create_ibss(sdata); + + mod_timer(&ifibss->timer, + round_jiffies(jiffies + interval)); + } +} + +static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, + struct sk_buff *req) +{ + struct ieee80211_mgmt *mgmt = (void *)req->data; + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + int tx_last_beacon, len = req->len; + struct sk_buff *skb; + struct beacon_data *presp; + u8 *pos, *end; + + sdata_assert_lock(sdata); + + presp = sdata_dereference(ifibss->presp, sdata); + + if (ifibss->state != IEEE80211_IBSS_MLME_JOINED || + len < 24 + 2 || !presp) + return; + + tx_last_beacon = drv_tx_last_beacon(local); + + ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); + ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n", + mgmt->bssid, tx_last_beacon); + + if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) + return; + + if (!ether_addr_equal(mgmt->bssid, ifibss->bssid) && + !is_broadcast_ether_addr(mgmt->bssid)) + return; + + end = ((u8 *) mgmt) + len; + pos = mgmt->u.probe_req.variable; + if (pos[0] != WLAN_EID_SSID || + pos + 2 + pos[1] > end) { + ibss_dbg(sdata, "Invalid SSID IE in ProbeReq from %pM\n", + mgmt->sa); + return; + } + if (pos[1] != 0 && + (pos[1] != ifibss->ssid_len || + memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { + /* Ignore ProbeReq for foreign SSID */ + return; + } + + /* Reply with ProbeResp */ + skb = dev_alloc_skb(local->tx_headroom + presp->head_len); + if (!skb) + return; + + skb_reserve(skb, local->tx_headroom); + skb_put_data(skb, presp->head, presp->head_len); + + memcpy(((struct ieee80211_mgmt *) skb->data)->da, mgmt->sa, ETH_ALEN); + ibss_dbg(sdata, "Sending ProbeResp to %pM\n", mgmt->sa); + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + + /* avoid excessive retries for probe request to wildcard SSIDs */ + if (pos[1] == 0) + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_NO_ACK; + + ieee80211_tx_skb(sdata, skb); +} + +static +void ieee80211_rx_mgmt_probe_beacon(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee80211_rx_status *rx_status) +{ + size_t baselen; + struct ieee802_11_elems *elems; + + BUILD_BUG_ON(offsetof(typeof(mgmt->u.probe_resp), variable) != + offsetof(typeof(mgmt->u.beacon), variable)); + + /* + * either beacon or probe_resp but the variable field is at the + * same offset + */ + baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; + if (baselen > len) + return; + + elems = ieee802_11_parse_elems(mgmt->u.probe_resp.variable, + len - baselen, false, NULL); + + if (elems) { + ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, elems); + kfree(elems); + } +} + +void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_rx_status *rx_status; + struct ieee80211_mgmt *mgmt; + u16 fc; + struct ieee802_11_elems *elems; + int ies_len; + + rx_status = IEEE80211_SKB_RXCB(skb); + mgmt = (struct ieee80211_mgmt *) skb->data; + fc = le16_to_cpu(mgmt->frame_control); + + sdata_lock(sdata); + + if (!sdata->u.ibss.ssid_len) + goto mgmt_out; /* not ready to merge yet */ + + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_PROBE_REQ: + ieee80211_rx_mgmt_probe_req(sdata, skb); + break; + case IEEE80211_STYPE_PROBE_RESP: + case IEEE80211_STYPE_BEACON: + ieee80211_rx_mgmt_probe_beacon(sdata, mgmt, skb->len, + rx_status); + break; + case IEEE80211_STYPE_AUTH: + ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len); + break; + case IEEE80211_STYPE_DEAUTH: + ieee80211_rx_mgmt_deauth_ibss(sdata, mgmt, skb->len); + break; + case IEEE80211_STYPE_ACTION: + switch (mgmt->u.action.category) { + case WLAN_CATEGORY_SPECTRUM_MGMT: + ies_len = skb->len - + offsetof(struct ieee80211_mgmt, + u.action.u.chan_switch.variable); + + if (ies_len < 0) + break; + + elems = ieee802_11_parse_elems( + mgmt->u.action.u.chan_switch.variable, + ies_len, true, NULL); + + if (elems && !elems->parse_error) + ieee80211_rx_mgmt_spectrum_mgmt(sdata, mgmt, + skb->len, + rx_status, + elems); + kfree(elems); + break; + } + } + + mgmt_out: + sdata_unlock(sdata); +} + +void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct sta_info *sta; + + sdata_lock(sdata); + + /* + * Work could be scheduled after scan or similar + * when we aren't even joined (or trying) with a + * network. + */ + if (!ifibss->ssid_len) + goto out; + + spin_lock_bh(&ifibss->incomplete_lock); + while (!list_empty(&ifibss->incomplete_stations)) { + sta = list_first_entry(&ifibss->incomplete_stations, + struct sta_info, list); + list_del(&sta->list); + spin_unlock_bh(&ifibss->incomplete_lock); + + ieee80211_ibss_finish_sta(sta); + rcu_read_unlock(); + spin_lock_bh(&ifibss->incomplete_lock); + } + spin_unlock_bh(&ifibss->incomplete_lock); + + switch (ifibss->state) { + case IEEE80211_IBSS_MLME_SEARCH: + ieee80211_sta_find_ibss(sdata); + break; + case IEEE80211_IBSS_MLME_JOINED: + ieee80211_sta_merge_ibss(sdata); + break; + default: + WARN_ON(1); + break; + } + + out: + sdata_unlock(sdata); +} + +static void ieee80211_ibss_timer(struct timer_list *t) +{ + struct ieee80211_sub_if_data *sdata = + from_timer(sdata, t, u.ibss.timer); + + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); +} + +void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + + timer_setup(&ifibss->timer, ieee80211_ibss_timer, 0); + INIT_LIST_HEAD(&ifibss->incomplete_stations); + spin_lock_init(&ifibss->incomplete_lock); + wiphy_work_init(&ifibss->csa_connection_drop_work, + ieee80211_csa_connection_drop_work); +} + +/* scan finished notification */ +void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + + mutex_lock(&local->iflist_mtx); + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + if (sdata->vif.type != NL80211_IFTYPE_ADHOC) + continue; + sdata->u.ibss.last_scan_completed = jiffies; + } + mutex_unlock(&local->iflist_mtx); +} + +int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, + struct cfg80211_ibss_params *params) +{ + u64 changed = 0; + u32 rate_flags; + struct ieee80211_supported_band *sband; + enum ieee80211_chanctx_mode chanmode; + struct ieee80211_local *local = sdata->local; + int radar_detect_width = 0; + int i; + int ret; + + if (params->chandef.chan->freq_offset) { + /* this may work, but is untested */ + return -EOPNOTSUPP; + } + + ret = cfg80211_chandef_dfs_required(local->hw.wiphy, + ¶ms->chandef, + sdata->wdev.iftype); + if (ret < 0) + return ret; + + if (ret > 0) { + if (!params->userspace_handles_dfs) + return -EINVAL; + radar_detect_width = BIT(params->chandef.width); + } + + chanmode = (params->channel_fixed && !ret) ? + IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE; + + mutex_lock(&local->chanctx_mtx); + ret = ieee80211_check_combinations(sdata, ¶ms->chandef, chanmode, + radar_detect_width); + mutex_unlock(&local->chanctx_mtx); + if (ret < 0) + return ret; + + if (params->bssid) { + memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN); + sdata->u.ibss.fixed_bssid = true; + } else + sdata->u.ibss.fixed_bssid = false; + + sdata->u.ibss.privacy = params->privacy; + sdata->u.ibss.control_port = params->control_port; + sdata->u.ibss.userspace_handles_dfs = params->userspace_handles_dfs; + sdata->u.ibss.basic_rates = params->basic_rates; + sdata->u.ibss.last_scan_completed = jiffies; + + /* fix basic_rates if channel does not support these rates */ + rate_flags = ieee80211_chandef_rate_flags(¶ms->chandef); + sband = local->hw.wiphy->bands[params->chandef.chan->band]; + for (i = 0; i < sband->n_bitrates; i++) { + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + sdata->u.ibss.basic_rates &= ~BIT(i); + } + memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate, + sizeof(params->mcast_rate)); + + sdata->vif.bss_conf.beacon_int = params->beacon_interval; + + sdata->u.ibss.chandef = params->chandef; + sdata->u.ibss.fixed_channel = params->channel_fixed; + + if (params->ie) { + sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len, + GFP_KERNEL); + if (sdata->u.ibss.ie) + sdata->u.ibss.ie_len = params->ie_len; + } + + sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; + sdata->u.ibss.ibss_join_req = jiffies; + + memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len); + sdata->u.ibss.ssid_len = params->ssid_len; + + memcpy(&sdata->u.ibss.ht_capa, ¶ms->ht_capa, + sizeof(sdata->u.ibss.ht_capa)); + memcpy(&sdata->u.ibss.ht_capa_mask, ¶ms->ht_capa_mask, + sizeof(sdata->u.ibss.ht_capa_mask)); + + /* + * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is + * reserved, but an HT STA shall protect HT transmissions as though + * the HT Protection field were set to non-HT mixed mode. + * + * In an IBSS, the RIFS Mode field of the HT Operation element is + * also reserved, but an HT STA shall operate as though this field + * were set to 1. + */ + + sdata->vif.bss_conf.ht_operation_mode |= + IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED + | IEEE80211_HT_PARAM_RIFS_MODE; + + changed |= BSS_CHANGED_HT | BSS_CHANGED_MCAST_RATE; + ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed); + + sdata->deflink.smps_mode = IEEE80211_SMPS_OFF; + sdata->deflink.needed_rx_chains = local->rx_chains; + sdata->control_port_over_nl80211 = params->control_port_over_nl80211; + + wiphy_work_queue(local->hw.wiphy, &sdata->work); + + return 0; +} + +int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + + ieee80211_ibss_disconnect(sdata); + ifibss->ssid_len = 0; + eth_zero_addr(ifibss->bssid); + + /* remove beacon */ + kfree(sdata->u.ibss.ie); + sdata->u.ibss.ie = NULL; + sdata->u.ibss.ie_len = 0; + + /* on the next join, re-program HT parameters */ + memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa)); + memset(&ifibss->ht_capa_mask, 0, sizeof(ifibss->ht_capa_mask)); + + synchronize_rcu(); + + skb_queue_purge(&sdata->skb_queue); + + del_timer_sync(&sdata->u.ibss.timer); + + return 0; +} diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h new file mode 100644 index 0000000000..07beb72ddd --- /dev/null +++ b/net/mac80211/ieee80211_i.h @@ -0,0 +1,2642 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2018-2022 Intel Corporation + */ + +#ifndef IEEE80211_I_H +#define IEEE80211_I_H + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/if_ether.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/workqueue.h> +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/etherdevice.h> +#include <linux/leds.h> +#include <linux/idr.h> +#include <linux/rhashtable.h> +#include <linux/rbtree.h> +#include <net/ieee80211_radiotap.h> +#include <net/cfg80211.h> +#include <net/mac80211.h> +#include <net/fq.h> +#include "key.h" +#include "sta_info.h" +#include "debug.h" +#include "drop.h" + +extern const struct cfg80211_ops mac80211_config_ops; + +struct ieee80211_local; +struct ieee80211_mesh_fast_tx; + +/* Maximum number of broadcast/multicast frames to buffer when some of the + * associated stations are using power saving. */ +#define AP_MAX_BC_BUFFER 128 + +/* Maximum number of frames buffered to all STAs, including multicast frames. + * Note: increasing this limit increases the potential memory requirement. Each + * frame can be up to about 2 kB long. */ +#define TOTAL_MAX_TX_BUFFER 512 + +/* Required encryption head and tailroom */ +#define IEEE80211_ENCRYPT_HEADROOM 8 +#define IEEE80211_ENCRYPT_TAILROOM 18 + +/* power level hasn't been configured (or set to automatic) */ +#define IEEE80211_UNSET_POWER_LEVEL INT_MIN + +/* + * Some APs experience problems when working with U-APSD. Decreasing the + * probability of that happening by using legacy mode for all ACs but VO isn't + * enough. + * + * Cisco 4410N originally forced us to enable VO by default only because it + * treated non-VO ACs as legacy. + * + * However some APs (notably Netgear R7000) silently reclassify packets to + * different ACs. Since u-APSD ACs require trigger frames for frame retrieval + * clients would never see some frames (e.g. ARP responses) or would fetch them + * accidentally after a long time. + * + * It makes little sense to enable u-APSD queues by default because it needs + * userspace applications to be aware of it to actually take advantage of the + * possible additional powersavings. Implicitly depending on driver autotrigger + * frame support doesn't make much sense. + */ +#define IEEE80211_DEFAULT_UAPSD_QUEUES 0 + +#define IEEE80211_DEFAULT_MAX_SP_LEN \ + IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL + +extern const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS]; + +#define IEEE80211_DEAUTH_FRAME_LEN (24 /* hdr */ + 2 /* reason */) + +#define IEEE80211_MAX_NAN_INSTANCE_ID 255 + + +/* + * Keep a station's queues on the active list for deficit accounting purposes + * if it was active or queued during the last 100ms + */ +#define AIRTIME_ACTIVE_DURATION (HZ / 10) + +struct ieee80211_bss { + u32 device_ts_beacon, device_ts_presp; + + bool wmm_used; + bool uapsd_supported; + +#define IEEE80211_MAX_SUPP_RATES 32 + u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; + size_t supp_rates_len; + struct ieee80211_rate *beacon_rate; + + u32 vht_cap_info; + + /* + * During association, we save an ERP value from a probe response so + * that we can feed ERP info to the driver when handling the + * association completes. these fields probably won't be up-to-date + * otherwise, you probably don't want to use them. + */ + bool has_erp_value; + u8 erp_value; + + /* Keep track of the corruption of the last beacon/probe response. */ + u8 corrupt_data; + + /* Keep track of what bits of information we have valid info for. */ + u8 valid_data; +}; + +/** + * enum ieee80211_corrupt_data_flags - BSS data corruption flags + * @IEEE80211_BSS_CORRUPT_BEACON: last beacon frame received was corrupted + * @IEEE80211_BSS_CORRUPT_PROBE_RESP: last probe response received was corrupted + * + * These are bss flags that are attached to a bss in the + * @corrupt_data field of &struct ieee80211_bss. + */ +enum ieee80211_bss_corrupt_data_flags { + IEEE80211_BSS_CORRUPT_BEACON = BIT(0), + IEEE80211_BSS_CORRUPT_PROBE_RESP = BIT(1) +}; + +/** + * enum ieee80211_valid_data_flags - BSS valid data flags + * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE + * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE + * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE + * + * These are bss flags that are attached to a bss in the + * @valid_data field of &struct ieee80211_bss. They show which parts + * of the data structure were received as a result of an un-corrupted + * beacon/probe response. + */ +enum ieee80211_bss_valid_data_flags { + IEEE80211_BSS_VALID_WMM = BIT(1), + IEEE80211_BSS_VALID_RATES = BIT(2), + IEEE80211_BSS_VALID_ERP = BIT(3) +}; + +typedef unsigned __bitwise ieee80211_tx_result; +#define TX_CONTINUE ((__force ieee80211_tx_result) 0u) +#define TX_DROP ((__force ieee80211_tx_result) 1u) +#define TX_QUEUED ((__force ieee80211_tx_result) 2u) + +#define IEEE80211_TX_UNICAST BIT(1) +#define IEEE80211_TX_PS_BUFFERED BIT(2) + +struct ieee80211_tx_data { + struct sk_buff *skb; + struct sk_buff_head skbs; + struct ieee80211_local *local; + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + struct ieee80211_key *key; + struct ieee80211_tx_rate rate; + + unsigned int flags; +}; + +/** + * enum ieee80211_packet_rx_flags - packet RX flags + * @IEEE80211_RX_AMSDU: a-MSDU packet + * @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed + * @IEEE80211_RX_DEFERRED_RELEASE: frame was subjected to receive reordering + * + * These are per-frame flags that are attached to a frame in the + * @rx_flags field of &struct ieee80211_rx_status. + */ +enum ieee80211_packet_rx_flags { + IEEE80211_RX_AMSDU = BIT(3), + IEEE80211_RX_MALFORMED_ACTION_FRM = BIT(4), + IEEE80211_RX_DEFERRED_RELEASE = BIT(5), +}; + +/** + * enum ieee80211_rx_flags - RX data flags + * + * @IEEE80211_RX_CMNTR: received on cooked monitor already + * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported + * to cfg80211_report_obss_beacon(). + * + * These flags are used across handling multiple interfaces + * for a single frame. + */ +enum ieee80211_rx_flags { + IEEE80211_RX_CMNTR = BIT(0), + IEEE80211_RX_BEACON_REPORTED = BIT(1), +}; + +struct ieee80211_rx_data { + struct list_head *list; + struct sk_buff *skb; + struct ieee80211_local *local; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_link_data *link; + struct sta_info *sta; + struct link_sta_info *link_sta; + struct ieee80211_key *key; + + unsigned int flags; + + /* + * Index into sequence numbers array, 0..16 + * since the last (16) is used for non-QoS, + * will be 16 on non-QoS frames. + */ + int seqno_idx; + + /* + * Index into the security IV/PN arrays, 0..16 + * since the last (16) is used for CCMP-encrypted + * management frames, will be set to 16 on mgmt + * frames and 0 on non-QoS frames. + */ + int security_idx; + + int link_id; + + union { + struct { + u32 iv32; + u16 iv16; + } tkip; + struct { + u8 pn[IEEE80211_CCMP_PN_LEN]; + } ccm_gcm; + }; +}; + +struct ieee80211_csa_settings { + const u16 *counter_offsets_beacon; + const u16 *counter_offsets_presp; + + int n_counter_offsets_beacon; + int n_counter_offsets_presp; + + u8 count; +}; + +struct ieee80211_color_change_settings { + u16 counter_offset_beacon; + u16 counter_offset_presp; + u8 count; +}; + +struct beacon_data { + u8 *head, *tail; + int head_len, tail_len; + struct ieee80211_meshconf_ie *meshconf; + u16 cntdwn_counter_offsets[IEEE80211_MAX_CNTDWN_COUNTERS_NUM]; + u8 cntdwn_current_counter; + struct cfg80211_mbssid_elems *mbssid_ies; + struct cfg80211_rnr_elems *rnr_ies; + struct rcu_head rcu_head; +}; + +struct probe_resp { + struct rcu_head rcu_head; + int len; + u16 cntdwn_counter_offsets[IEEE80211_MAX_CNTDWN_COUNTERS_NUM]; + u8 data[]; +}; + +struct fils_discovery_data { + struct rcu_head rcu_head; + int len; + u8 data[]; +}; + +struct unsol_bcast_probe_resp_data { + struct rcu_head rcu_head; + int len; + u8 data[]; +}; + +struct ps_data { + /* yes, this looks ugly, but guarantees that we can later use + * bitmap_empty :) + * NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */ + u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)] + __aligned(__alignof__(unsigned long)); + struct sk_buff_head bc_buf; + atomic_t num_sta_ps; /* number of stations in PS mode */ + int dtim_count; + bool dtim_bc_mc; +}; + +struct ieee80211_if_ap { + struct list_head vlans; /* write-protected with RTNL and local->mtx */ + + struct ps_data ps; + atomic_t num_mcast_sta; /* number of stations receiving multicast */ + + bool multicast_to_unicast; + bool active; +}; + +struct ieee80211_if_vlan { + struct list_head list; /* write-protected with RTNL and local->mtx */ + + /* used for all tx if the VLAN is configured to 4-addr mode */ + struct sta_info __rcu *sta; + atomic_t num_mcast_sta; /* number of stations receiving multicast */ +}; + +struct mesh_stats { + __u32 fwded_mcast; /* Mesh forwarded multicast frames */ + __u32 fwded_unicast; /* Mesh forwarded unicast frames */ + __u32 fwded_frames; /* Mesh total forwarded frames */ + __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/ + __u32 dropped_frames_no_route; /* Not transmitted, no route found */ +}; + +#define PREQ_Q_F_START 0x1 +#define PREQ_Q_F_REFRESH 0x2 +struct mesh_preq_queue { + struct list_head list; + u8 dst[ETH_ALEN]; + u8 flags; +}; + +struct ieee80211_roc_work { + struct list_head list; + + struct ieee80211_sub_if_data *sdata; + + struct ieee80211_channel *chan; + + bool started, abort, hw_begun, notified; + bool on_channel; + + unsigned long start_time; + + u32 duration, req_duration; + struct sk_buff *frame; + u64 cookie, mgmt_tx_cookie; + enum ieee80211_roc_type type; +}; + +/* flags used in struct ieee80211_if_managed.flags */ +enum ieee80211_sta_flags { + IEEE80211_STA_CONNECTION_POLL = BIT(1), + IEEE80211_STA_CONTROL_PORT = BIT(2), + IEEE80211_STA_MFP_ENABLED = BIT(6), + IEEE80211_STA_UAPSD_ENABLED = BIT(7), + IEEE80211_STA_NULLFUNC_ACKED = BIT(8), + IEEE80211_STA_ENABLE_RRM = BIT(15), +}; + +typedef u32 __bitwise ieee80211_conn_flags_t; + +enum ieee80211_conn_flags { + IEEE80211_CONN_DISABLE_HT = (__force ieee80211_conn_flags_t)BIT(0), + IEEE80211_CONN_DISABLE_40MHZ = (__force ieee80211_conn_flags_t)BIT(1), + IEEE80211_CONN_DISABLE_VHT = (__force ieee80211_conn_flags_t)BIT(2), + IEEE80211_CONN_DISABLE_80P80MHZ = (__force ieee80211_conn_flags_t)BIT(3), + IEEE80211_CONN_DISABLE_160MHZ = (__force ieee80211_conn_flags_t)BIT(4), + IEEE80211_CONN_DISABLE_HE = (__force ieee80211_conn_flags_t)BIT(5), + IEEE80211_CONN_DISABLE_EHT = (__force ieee80211_conn_flags_t)BIT(6), + IEEE80211_CONN_DISABLE_320MHZ = (__force ieee80211_conn_flags_t)BIT(7), +}; + +struct ieee80211_mgd_auth_data { + struct cfg80211_bss *bss; + unsigned long timeout; + int tries; + u16 algorithm, expected_transaction; + + u8 key[WLAN_KEY_LEN_WEP104]; + u8 key_len, key_idx; + bool done, waiting; + bool peer_confirmed; + bool timeout_started; + int link_id; + + u8 ap_addr[ETH_ALEN] __aligned(2); + + u16 sae_trans, sae_status; + size_t data_len; + u8 data[]; +}; + +struct ieee80211_mgd_assoc_data { + struct { + struct cfg80211_bss *bss; + + u8 addr[ETH_ALEN] __aligned(2); + + u8 ap_ht_param; + + struct ieee80211_vht_cap ap_vht_cap; + + size_t elems_len; + u8 *elems; /* pointing to inside ie[] below */ + + ieee80211_conn_flags_t conn_flags; + + u16 status; + + bool disabled; + } link[IEEE80211_MLD_MAX_NUM_LINKS]; + + u8 ap_addr[ETH_ALEN] __aligned(2); + + /* this is for a workaround, so we use it only for non-MLO */ + const u8 *supp_rates; + u8 supp_rates_len; + + unsigned long timeout; + int tries; + + u8 prev_ap_addr[ETH_ALEN]; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len; + bool wmm, uapsd; + bool need_beacon; + bool synced; + bool timeout_started; + bool s1g; + + unsigned int assoc_link_id; + + u8 fils_nonces[2 * FILS_NONCE_LEN]; + u8 fils_kek[FILS_MAX_KEK_LEN]; + size_t fils_kek_len; + + size_t ie_len; + u8 *ie_pos; /* used to fill ie[] with link[].elems */ + u8 ie[]; +}; + +struct ieee80211_sta_tx_tspec { + /* timestamp of the first packet in the time slice */ + unsigned long time_slice_start; + + u32 admitted_time; /* in usecs, unlike over the air */ + u8 tsid; + s8 up; /* signed to be able to invalidate with -1 during teardown */ + + /* consumed TX time in microseconds in the time slice */ + u32 consumed_tx_time; + enum { + TX_TSPEC_ACTION_NONE = 0, + TX_TSPEC_ACTION_DOWNGRADE, + TX_TSPEC_ACTION_STOP_DOWNGRADE, + } action; + bool downgraded; +}; + +DECLARE_EWMA(beacon_signal, 4, 4) + +struct ieee80211_if_managed { + struct timer_list timer; + struct timer_list conn_mon_timer; + struct timer_list bcn_mon_timer; + struct work_struct monitor_work; + struct wiphy_work beacon_connection_loss_work; + struct wiphy_work csa_connection_drop_work; + + unsigned long beacon_timeout; + unsigned long probe_timeout; + int probe_send_count; + bool nullfunc_failed; + u8 connection_loss:1, + driver_disconnect:1, + reconnect:1, + associated:1; + + struct ieee80211_mgd_auth_data *auth_data; + struct ieee80211_mgd_assoc_data *assoc_data; + + bool powersave; /* powersave requested for this iface */ + bool broken_ap; /* AP is broken -- turn off powersave */ + + unsigned int flags; + + bool status_acked; + bool status_received; + __le16 status_fc; + + enum { + IEEE80211_MFP_DISABLED, + IEEE80211_MFP_OPTIONAL, + IEEE80211_MFP_REQUIRED + } mfp; /* management frame protection */ + + /* + * Bitmask of enabled u-apsd queues, + * IEEE80211_WMM_IE_STA_QOSINFO_AC_BE & co. Needs a new association + * to take effect. + */ + unsigned int uapsd_queues; + + /* + * Maximum number of buffered frames AP can deliver during a + * service period, IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL or similar. + * Needs a new association to take effect. + */ + unsigned int uapsd_max_sp_len; + + u8 use_4addr; + + /* + * State variables for keeping track of RSSI of the AP currently + * connected to and informing driver when RSSI has gone + * below/above a certain threshold. + */ + int rssi_min_thold, rssi_max_thold; + + struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */ + struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */ + struct ieee80211_vht_cap vht_capa; /* configured VHT overrides */ + struct ieee80211_vht_cap vht_capa_mask; /* Valid parts of vht_capa */ + struct ieee80211_s1g_cap s1g_capa; /* configured S1G overrides */ + struct ieee80211_s1g_cap s1g_capa_mask; /* valid s1g_capa bits */ + + /* TDLS support */ + u8 tdls_peer[ETH_ALEN] __aligned(2); + struct delayed_work tdls_peer_del_work; + struct sk_buff *orig_teardown_skb; /* The original teardown skb */ + struct sk_buff *teardown_skb; /* A copy to send through the AP */ + spinlock_t teardown_lock; /* To lock changing teardown_skb */ + bool tdls_wider_bw_prohibited; + + /* WMM-AC TSPEC support */ + struct ieee80211_sta_tx_tspec tx_tspec[IEEE80211_NUM_ACS]; + /* Use a separate work struct so that we can do something here + * while the sdata->work is flushing the queues, for example. + * otherwise, in scenarios where we hardly get any traffic out + * on the BE queue, but there's a lot of VO traffic, we might + * get stuck in a downgraded situation and flush takes forever. + */ + struct delayed_work tx_tspec_wk; + + /* Information elements from the last transmitted (Re)Association + * Request frame. + */ + u8 *assoc_req_ies; + size_t assoc_req_ies_len; + + struct wiphy_delayed_work ml_reconf_work; + u16 removed_links; +}; + +struct ieee80211_if_ibss { + struct timer_list timer; + struct wiphy_work csa_connection_drop_work; + + unsigned long last_scan_completed; + + u32 basic_rates; + + bool fixed_bssid; + bool fixed_channel; + bool privacy; + + bool control_port; + bool userspace_handles_dfs; + + u8 bssid[ETH_ALEN] __aligned(2); + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len, ie_len; + u8 *ie; + struct cfg80211_chan_def chandef; + + unsigned long ibss_join_req; + /* probe response/beacon for IBSS */ + struct beacon_data __rcu *presp; + + struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */ + struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */ + + spinlock_t incomplete_lock; + struct list_head incomplete_stations; + + enum { + IEEE80211_IBSS_MLME_SEARCH, + IEEE80211_IBSS_MLME_JOINED, + } state; +}; + +/** + * struct ieee80211_if_ocb - OCB mode state + * + * @housekeeping_timer: timer for periodic invocation of a housekeeping task + * @wrkq_flags: OCB deferred task action + * @incomplete_lock: delayed STA insertion lock + * @incomplete_stations: list of STAs waiting for delayed insertion + * @joined: indication if the interface is connected to an OCB network + */ +struct ieee80211_if_ocb { + struct timer_list housekeeping_timer; + unsigned long wrkq_flags; + + spinlock_t incomplete_lock; + struct list_head incomplete_stations; + + bool joined; +}; + +/** + * struct ieee80211_mesh_sync_ops - Extensible synchronization framework interface + * + * these declarations define the interface, which enables + * vendor-specific mesh synchronization + * + */ +struct ieee802_11_elems; +struct ieee80211_mesh_sync_ops { + void (*rx_bcn_presp)(struct ieee80211_sub_if_data *sdata, u16 stype, + struct ieee80211_mgmt *mgmt, unsigned int len, + const struct ieee80211_meshconf_ie *mesh_cfg, + struct ieee80211_rx_status *rx_status); + + /* should be called with beacon_data under RCU read lock */ + void (*adjust_tsf)(struct ieee80211_sub_if_data *sdata, + struct beacon_data *beacon); + /* add other framework functions here */ +}; + +struct mesh_csa_settings { + struct rcu_head rcu_head; + struct cfg80211_csa_settings settings; +}; + +/** + * struct mesh_table + * + * @known_gates: list of known mesh gates and their mpaths by the station. The + * gate's mpath may or may not be resolved and active. + * @gates_lock: protects updates to known_gates + * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr + * @walk_head: linked list containing all mesh_path objects + * @walk_lock: lock protecting walk_head + * @entries: number of entries in the table + */ +struct mesh_table { + struct hlist_head known_gates; + spinlock_t gates_lock; + struct rhashtable rhead; + struct hlist_head walk_head; + spinlock_t walk_lock; + atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ +}; + +/** + * struct mesh_tx_cache - mesh fast xmit header cache + * + * @rht: hash table containing struct ieee80211_mesh_fast_tx, using skb DA as key + * @walk_head: linked list containing all ieee80211_mesh_fast_tx objects + * @walk_lock: lock protecting walk_head and rht + */ +struct mesh_tx_cache { + struct rhashtable rht; + struct hlist_head walk_head; + spinlock_t walk_lock; +}; + +struct ieee80211_if_mesh { + struct timer_list housekeeping_timer; + struct timer_list mesh_path_timer; + struct timer_list mesh_path_root_timer; + + unsigned long wrkq_flags; + unsigned long mbss_changed[64 / BITS_PER_LONG]; + + bool userspace_handles_dfs; + + u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; + size_t mesh_id_len; + /* Active Path Selection Protocol Identifier */ + u8 mesh_pp_id; + /* Active Path Selection Metric Identifier */ + u8 mesh_pm_id; + /* Congestion Control Mode Identifier */ + u8 mesh_cc_id; + /* Synchronization Protocol Identifier */ + u8 mesh_sp_id; + /* Authentication Protocol Identifier */ + u8 mesh_auth_id; + /* Local mesh Sequence Number */ + u32 sn; + /* Last used PREQ ID */ + u32 preq_id; + atomic_t mpaths; + /* Timestamp of last SN update */ + unsigned long last_sn_update; + /* Time when it's ok to send next PERR */ + unsigned long next_perr; + /* Timestamp of last PREQ sent */ + unsigned long last_preq; + struct mesh_rmc *rmc; + spinlock_t mesh_preq_queue_lock; + struct mesh_preq_queue preq_queue; + int preq_queue_len; + struct mesh_stats mshstats; + struct mesh_config mshcfg; + atomic_t estab_plinks; + atomic_t mesh_seqnum; + bool accepting_plinks; + int num_gates; + struct beacon_data __rcu *beacon; + const u8 *ie; + u8 ie_len; + enum { + IEEE80211_MESH_SEC_NONE = 0x0, + IEEE80211_MESH_SEC_AUTHED = 0x1, + IEEE80211_MESH_SEC_SECURED = 0x2, + } security; + bool user_mpm; + /* Extensible Synchronization Framework */ + const struct ieee80211_mesh_sync_ops *sync_ops; + s64 sync_offset_clockdrift_max; + spinlock_t sync_offset_lock; + /* mesh power save */ + enum nl80211_mesh_power_mode nonpeer_pm; + int ps_peers_light_sleep; + int ps_peers_deep_sleep; + struct ps_data ps; + /* Channel Switching Support */ + struct mesh_csa_settings __rcu *csa; + enum { + IEEE80211_MESH_CSA_ROLE_NONE, + IEEE80211_MESH_CSA_ROLE_INIT, + IEEE80211_MESH_CSA_ROLE_REPEATER, + } csa_role; + u8 chsw_ttl; + u16 pre_value; + + /* offset from skb->data while building IE */ + int meshconf_offset; + + struct mesh_table mesh_paths; + struct mesh_table mpp_paths; /* Store paths for MPP&MAP */ + int mesh_paths_generation; + int mpp_paths_generation; + struct mesh_tx_cache tx_cache; +}; + +#ifdef CONFIG_MAC80211_MESH +#define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \ + do { (msh)->mshstats.name++; } while (0) +#else +#define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \ + do { } while (0) +#endif + +/** + * enum ieee80211_sub_if_data_flags - virtual interface flags + * + * @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets + * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between + * associated stations and deliver multicast frames both + * back to wireless media and to the local net stack. + * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume. + * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver + * @IEEE80211_SDATA_DISCONNECT_HW_RESTART: Disconnect after hardware restart + * recovery + */ +enum ieee80211_sub_if_data_flags { + IEEE80211_SDATA_ALLMULTI = BIT(0), + IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3), + IEEE80211_SDATA_DISCONNECT_RESUME = BIT(4), + IEEE80211_SDATA_IN_DRIVER = BIT(5), + IEEE80211_SDATA_DISCONNECT_HW_RESTART = BIT(6), +}; + +/** + * enum ieee80211_sdata_state_bits - virtual interface state bits + * @SDATA_STATE_RUNNING: virtual interface is up & running; this + * mirrors netif_running() but is separate for interface type + * change handling while the interface is up + * @SDATA_STATE_OFFCHANNEL: This interface is currently in offchannel + * mode, so queues are stopped + * @SDATA_STATE_OFFCHANNEL_BEACON_STOPPED: Beaconing was stopped due + * to offchannel, reset when offchannel returns + */ +enum ieee80211_sdata_state_bits { + SDATA_STATE_RUNNING, + SDATA_STATE_OFFCHANNEL, + SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, +}; + +/** + * enum ieee80211_chanctx_mode - channel context configuration mode + * + * @IEEE80211_CHANCTX_SHARED: channel context may be used by + * multiple interfaces + * @IEEE80211_CHANCTX_EXCLUSIVE: channel context can be used + * only by a single interface. This can be used for example for + * non-fixed channel IBSS. + */ +enum ieee80211_chanctx_mode { + IEEE80211_CHANCTX_SHARED, + IEEE80211_CHANCTX_EXCLUSIVE +}; + +/** + * enum ieee80211_chanctx_replace_state - channel context replacement state + * + * This is used for channel context in-place reservations that require channel + * context switch/swap. + * + * @IEEE80211_CHANCTX_REPLACE_NONE: no replacement is taking place + * @IEEE80211_CHANCTX_WILL_BE_REPLACED: this channel context will be replaced + * by a (not yet registered) channel context pointed by %replace_ctx. + * @IEEE80211_CHANCTX_REPLACES_OTHER: this (not yet registered) channel context + * replaces an existing channel context pointed to by %replace_ctx. + */ +enum ieee80211_chanctx_replace_state { + IEEE80211_CHANCTX_REPLACE_NONE, + IEEE80211_CHANCTX_WILL_BE_REPLACED, + IEEE80211_CHANCTX_REPLACES_OTHER, +}; + +struct ieee80211_chanctx { + struct list_head list; + struct rcu_head rcu_head; + + struct list_head assigned_links; + struct list_head reserved_links; + + enum ieee80211_chanctx_replace_state replace_state; + struct ieee80211_chanctx *replace_ctx; + + enum ieee80211_chanctx_mode mode; + bool driver_present; + + struct ieee80211_chanctx_conf conf; +}; + +struct mac80211_qos_map { + struct cfg80211_qos_map qos_map; + struct rcu_head rcu_head; +}; + +enum txq_info_flags { + IEEE80211_TXQ_STOP, + IEEE80211_TXQ_AMPDU, + IEEE80211_TXQ_NO_AMSDU, + IEEE80211_TXQ_DIRTY, +}; + +/** + * struct txq_info - per tid queue + * + * @tin: contains packets split into multiple flows + * @def_flow: used as a fallback flow when a packet destined to @tin hashes to + * a fq_flow which is already owned by a different tin + * @def_cvars: codel vars for @def_flow + * @frags: used to keep fragments created after dequeue + * @schedule_order: used with ieee80211_local->active_txqs + * @schedule_round: counter to prevent infinite loops on TXQ scheduling + */ +struct txq_info { + struct fq_tin tin; + struct codel_vars def_cvars; + struct codel_stats cstats; + + u16 schedule_round; + struct list_head schedule_order; + + struct sk_buff_head frags; + + unsigned long flags; + + /* keep last! */ + struct ieee80211_txq txq; +}; + +struct ieee80211_if_mntr { + u32 flags; + u8 mu_follow_addr[ETH_ALEN] __aligned(2); + + struct list_head list; +}; + +/** + * struct ieee80211_if_nan - NAN state + * + * @conf: current NAN configuration + * @func_ids: a bitmap of available instance_id's + */ +struct ieee80211_if_nan { + struct cfg80211_nan_conf conf; + + /* protects function_inst_ids */ + spinlock_t func_lock; + struct idr function_inst_ids; +}; + +struct ieee80211_link_data_managed { + u8 bssid[ETH_ALEN] __aligned(2); + + u8 dtim_period; + enum ieee80211_smps_mode req_smps, /* requested smps mode */ + driver_smps_mode; /* smps mode request */ + + ieee80211_conn_flags_t conn_flags; + + s16 p2p_noa_index; + + bool tdls_chan_switch_prohibited; + + bool have_beacon; + bool tracking_signal_avg; + bool disable_wmm_tracking; + bool operating_11g_mode; + + bool csa_waiting_bcn; + bool csa_ignored_same_chan; + struct wiphy_delayed_work chswitch_work; + + struct wiphy_work request_smps_work; + bool beacon_crc_valid; + u32 beacon_crc; + struct ewma_beacon_signal ave_beacon_signal; + int last_ave_beacon_signal; + + /* + * Number of Beacon frames used in ave_beacon_signal. This can be used + * to avoid generating less reliable cqm events that would be based + * only on couple of received frames. + */ + unsigned int count_beacon_signal; + + /* Number of times beacon loss was invoked. */ + unsigned int beacon_loss_count; + + /* + * Last Beacon frame signal strength average (ave_beacon_signal / 16) + * that triggered a cqm event. 0 indicates that no event has been + * generated for the current association. + */ + int last_cqm_event_signal; + + int wmm_last_param_set; + int mu_edca_last_param_set; + + u8 bss_param_ch_cnt; + + struct cfg80211_bss *bss; +}; + +struct ieee80211_link_data_ap { + struct beacon_data __rcu *beacon; + struct probe_resp __rcu *probe_resp; + struct fils_discovery_data __rcu *fils_discovery; + struct unsol_bcast_probe_resp_data __rcu *unsol_bcast_probe_resp; + + /* to be used after channel switch. */ + struct cfg80211_beacon_data *next_beacon; +}; + +struct ieee80211_link_data { + struct ieee80211_sub_if_data *sdata; + unsigned int link_id; + + struct list_head assigned_chanctx_list; /* protected by chanctx_mtx */ + struct list_head reserved_chanctx_list; /* protected by chanctx_mtx */ + + /* multicast keys only */ + struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + + NUM_DEFAULT_MGMT_KEYS + + NUM_DEFAULT_BEACON_KEYS]; + struct ieee80211_key __rcu *default_multicast_key; + struct ieee80211_key __rcu *default_mgmt_key; + struct ieee80211_key __rcu *default_beacon_key; + + struct work_struct csa_finalize_work; + bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */ + + bool operating_11g_mode; + + struct cfg80211_chan_def csa_chandef; + + struct work_struct color_change_finalize_work; + struct delayed_work color_collision_detect_work; + u64 color_bitmap; + + /* context reservation -- protected with chanctx_mtx */ + struct ieee80211_chanctx *reserved_chanctx; + struct cfg80211_chan_def reserved_chandef; + bool reserved_radar_required; + bool reserved_ready; + + u8 needed_rx_chains; + enum ieee80211_smps_mode smps_mode; + + int user_power_level; /* in dBm */ + int ap_power_level; /* in dBm */ + + bool radar_required; + struct delayed_work dfs_cac_timer_work; + + union { + struct ieee80211_link_data_managed mgd; + struct ieee80211_link_data_ap ap; + } u; + + struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS]; + + struct ieee80211_bss_conf *conf; + +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *debugfs_dir; +#endif +}; + +struct ieee80211_sub_if_data { + struct list_head list; + + struct wireless_dev wdev; + + /* keys */ + struct list_head key_list; + + /* count for keys needing tailroom space allocation */ + int crypto_tx_tailroom_needed_cnt; + int crypto_tx_tailroom_pending_dec; + struct delayed_work dec_tailroom_needed_wk; + + struct net_device *dev; + struct ieee80211_local *local; + + unsigned int flags; + + unsigned long state; + + char name[IFNAMSIZ]; + + struct ieee80211_fragment_cache frags; + + /* TID bitmap for NoAck policy */ + u16 noack_map; + + /* bit field of ACM bits (BIT(802.1D tag)) */ + u8 wmm_acm; + + struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS]; + struct ieee80211_key __rcu *default_unicast_key; + + u16 sequence_number; + u16 mld_mcast_seq; + __be16 control_port_protocol; + bool control_port_no_encrypt; + bool control_port_no_preauth; + bool control_port_over_nl80211; + + atomic_t num_tx_queued; + struct mac80211_qos_map __rcu *qos_map; + + /* used to reconfigure hardware SM PS */ + struct work_struct recalc_smps; + + struct wiphy_work work; + struct sk_buff_head skb_queue; + struct sk_buff_head status_queue; + + /* + * AP this belongs to: self in AP mode and + * corresponding AP in VLAN mode, NULL for + * all others (might be needed later in IBSS) + */ + struct ieee80211_if_ap *bss; + + /* bitmap of allowed (non-MCS) rate indexes for rate control */ + u32 rc_rateidx_mask[NUM_NL80211_BANDS]; + + bool rc_has_mcs_mask[NUM_NL80211_BANDS]; + u8 rc_rateidx_mcs_mask[NUM_NL80211_BANDS][IEEE80211_HT_MCS_MASK_LEN]; + + bool rc_has_vht_mcs_mask[NUM_NL80211_BANDS]; + u16 rc_rateidx_vht_mcs_mask[NUM_NL80211_BANDS][NL80211_VHT_NSS_MAX]; + + /* Beacon frame (non-MCS) rate (as a bitmap) */ + u32 beacon_rateidx_mask[NUM_NL80211_BANDS]; + bool beacon_rate_set; + + union { + struct ieee80211_if_ap ap; + struct ieee80211_if_vlan vlan; + struct ieee80211_if_managed mgd; + struct ieee80211_if_ibss ibss; + struct ieee80211_if_mesh mesh; + struct ieee80211_if_ocb ocb; + struct ieee80211_if_mntr mntr; + struct ieee80211_if_nan nan; + } u; + + struct ieee80211_link_data deflink; + struct ieee80211_link_data __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + + /* for ieee80211_set_active_links_async() */ + struct work_struct activate_links_work; + u16 desired_active_links; + +#ifdef CONFIG_MAC80211_DEBUGFS + struct { + struct dentry *subdir_stations; + struct dentry *default_unicast_key; + struct dentry *default_multicast_key; + struct dentry *default_mgmt_key; + struct dentry *default_beacon_key; + } debugfs; +#endif + + /* must be last, dynamically sized area in this! */ + struct ieee80211_vif vif; +}; + +static inline +struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p) +{ + return container_of(p, struct ieee80211_sub_if_data, vif); +} + +static inline void sdata_lock(struct ieee80211_sub_if_data *sdata) + __acquires(&sdata->wdev.mtx) +{ + mutex_lock(&sdata->wdev.mtx); + __acquire(&sdata->wdev.mtx); +} + +static inline void sdata_unlock(struct ieee80211_sub_if_data *sdata) + __releases(&sdata->wdev.mtx) +{ + mutex_unlock(&sdata->wdev.mtx); + __release(&sdata->wdev.mtx); +} + +#define sdata_dereference(p, sdata) \ + rcu_dereference_protected(p, lockdep_is_held(&sdata->wdev.mtx)) + +static inline void +sdata_assert_lock(struct ieee80211_sub_if_data *sdata) +{ + lockdep_assert_held(&sdata->wdev.mtx); +} + +static inline int +ieee80211_chanwidth_get_shift(enum nl80211_chan_width width) +{ + switch (width) { + case NL80211_CHAN_WIDTH_5: + return 2; + case NL80211_CHAN_WIDTH_10: + return 1; + default: + return 0; + } +} + +static inline int +ieee80211_chandef_get_shift(struct cfg80211_chan_def *chandef) +{ + return ieee80211_chanwidth_get_shift(chandef->width); +} + +static inline int +ieee80211_vif_get_shift(struct ieee80211_vif *vif) +{ + struct ieee80211_chanctx_conf *chanctx_conf; + int shift = 0; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf); + if (chanctx_conf) + shift = ieee80211_chandef_get_shift(&chanctx_conf->def); + rcu_read_unlock(); + + return shift; +} + +static inline int +ieee80211_get_mbssid_beacon_len(struct cfg80211_mbssid_elems *elems, + struct cfg80211_rnr_elems *rnr_elems, + u8 i) +{ + int len = 0; + + if (!elems || !elems->cnt || i > elems->cnt) + return 0; + + if (i < elems->cnt) { + len = elems->elem[i].len; + if (rnr_elems) { + len += rnr_elems->elem[i].len; + for (i = elems->cnt; i < rnr_elems->cnt; i++) + len += rnr_elems->elem[i].len; + } + return len; + } + + /* i == elems->cnt, calculate total length of all MBSSID elements */ + for (i = 0; i < elems->cnt; i++) + len += elems->elem[i].len; + + if (rnr_elems) { + for (i = 0; i < rnr_elems->cnt; i++) + len += rnr_elems->elem[i].len; + } + + return len; +} + +enum { + IEEE80211_RX_MSG = 1, + IEEE80211_TX_STATUS_MSG = 2, +}; + +enum queue_stop_reason { + IEEE80211_QUEUE_STOP_REASON_DRIVER, + IEEE80211_QUEUE_STOP_REASON_PS, + IEEE80211_QUEUE_STOP_REASON_CSA, + IEEE80211_QUEUE_STOP_REASON_AGGREGATION, + IEEE80211_QUEUE_STOP_REASON_SUSPEND, + IEEE80211_QUEUE_STOP_REASON_SKB_ADD, + IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL, + IEEE80211_QUEUE_STOP_REASON_FLUSH, + IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN, + IEEE80211_QUEUE_STOP_REASON_RESERVE_TID, + IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE, + + IEEE80211_QUEUE_STOP_REASONS, +}; + +#ifdef CONFIG_MAC80211_LEDS +struct tpt_led_trigger { + char name[32]; + const struct ieee80211_tpt_blink *blink_table; + unsigned int blink_table_len; + struct timer_list timer; + struct ieee80211_local *local; + unsigned long prev_traffic; + unsigned long tx_bytes, rx_bytes; + unsigned int active, want; + bool running; +}; +#endif + +/** + * mac80211 scan flags - currently active scan mode + * + * @SCAN_SW_SCANNING: We're currently in the process of scanning but may as + * well be on the operating channel + * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to + * determine if we are on the operating channel or not + * @SCAN_ONCHANNEL_SCANNING: Do a software scan on only the current operating + * channel. This should not interrupt normal traffic. + * @SCAN_COMPLETED: Set for our scan work function when the driver reported + * that the scan completed. + * @SCAN_ABORTED: Set for our scan work function when the driver reported + * a scan complete for an aborted scan. + * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being + * cancelled. + * @SCAN_BEACON_WAIT: Set whenever we're passive scanning because of radar/no-IR + * and could send a probe request after receiving a beacon. + * @SCAN_BEACON_DONE: Beacon received, we can now send a probe request + */ +enum { + SCAN_SW_SCANNING, + SCAN_HW_SCANNING, + SCAN_ONCHANNEL_SCANNING, + SCAN_COMPLETED, + SCAN_ABORTED, + SCAN_HW_CANCELLED, + SCAN_BEACON_WAIT, + SCAN_BEACON_DONE, +}; + +/** + * enum mac80211_scan_state - scan state machine states + * + * @SCAN_DECISION: Main entry point to the scan state machine, this state + * determines if we should keep on scanning or switch back to the + * operating channel + * @SCAN_SET_CHANNEL: Set the next channel to be scanned + * @SCAN_SEND_PROBE: Send probe requests and wait for probe responses + * @SCAN_SUSPEND: Suspend the scan and go back to operating channel to + * send out data + * @SCAN_RESUME: Resume the scan and scan the next channel + * @SCAN_ABORT: Abort the scan and go back to operating channel + */ +enum mac80211_scan_state { + SCAN_DECISION, + SCAN_SET_CHANNEL, + SCAN_SEND_PROBE, + SCAN_SUSPEND, + SCAN_RESUME, + SCAN_ABORT, +}; + +DECLARE_STATIC_KEY_FALSE(aql_disable); + +struct ieee80211_local { + /* embed the driver visible part. + * don't cast (use the static inlines below), but we keep + * it first anyway so they become a no-op */ + struct ieee80211_hw hw; + + struct fq fq; + struct codel_vars *cvars; + struct codel_params cparams; + + /* protects active_txqs and txqi->schedule_order */ + spinlock_t active_txq_lock[IEEE80211_NUM_ACS]; + struct list_head active_txqs[IEEE80211_NUM_ACS]; + u16 schedule_round[IEEE80211_NUM_ACS]; + + /* serializes ieee80211_handle_wake_tx_queue */ + spinlock_t handle_wake_tx_queue_lock; + + u16 airtime_flags; + u32 aql_txq_limit_low[IEEE80211_NUM_ACS]; + u32 aql_txq_limit_high[IEEE80211_NUM_ACS]; + u32 aql_threshold; + atomic_t aql_total_pending_airtime; + atomic_t aql_ac_pending_airtime[IEEE80211_NUM_ACS]; + + const struct ieee80211_ops *ops; + + /* + * private workqueue to mac80211. mac80211 makes this accessible + * via ieee80211_queue_work() + */ + struct workqueue_struct *workqueue; + + unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES]; + int q_stop_reasons[IEEE80211_MAX_QUEUES][IEEE80211_QUEUE_STOP_REASONS]; + /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ + spinlock_t queue_stop_reason_lock; + + int open_count; + int monitors, cooked_mntrs; + /* number of interfaces with corresponding FIF_ flags */ + int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll, + fif_probe_req; + bool probe_req_reg; + bool rx_mcast_action_reg; + unsigned int filter_flags; /* FIF_* */ + + bool wiphy_ciphers_allocated; + + bool use_chanctx; + + /* protects the aggregated multicast list and filter calls */ + spinlock_t filter_lock; + + /* used for uploading changed mc list */ + struct work_struct reconfig_filter; + + /* aggregated multicast list */ + struct netdev_hw_addr_list mc_list; + + bool tim_in_locked_section; /* see ieee80211_beacon_get() */ + + /* + * suspended is true if we finished all the suspend _and_ we have + * not yet come up from resume. This is to be used by mac80211 + * to ensure driver sanity during suspend and mac80211's own + * sanity. It can eventually be used for WoW as well. + */ + bool suspended; + + /* suspending is true during the whole suspend process */ + bool suspending; + + /* + * Resuming is true while suspended, but when we're reprogramming the + * hardware -- at that time it's allowed to use ieee80211_queue_work() + * again even though some other parts of the stack are still suspended + * and we still drop received frames to avoid waking the stack. + */ + bool resuming; + + /* + * quiescing is true during the suspend process _only_ to + * ease timer cancelling etc. + */ + bool quiescing; + + /* device is started */ + bool started; + + /* device is during a HW reconfig */ + bool in_reconfig; + + /* reconfiguration failed ... suppress some warnings etc. */ + bool reconfig_failure; + + /* wowlan is enabled -- don't reconfig on resume */ + bool wowlan; + + struct wiphy_work radar_detected_work; + + /* number of RX chains the hardware has */ + u8 rx_chains; + + /* bitmap of which sbands were copied */ + u8 sband_allocated; + + int tx_headroom; /* required headroom for hardware/radiotap */ + + /* Tasklet and skb queue to process calls from IRQ mode. All frames + * added to skb_queue will be processed, but frames in + * skb_queue_unreliable may be dropped if the total length of these + * queues increases over the limit. */ +#define IEEE80211_IRQSAFE_QUEUE_LIMIT 128 + struct tasklet_struct tasklet; + struct sk_buff_head skb_queue; + struct sk_buff_head skb_queue_unreliable; + + spinlock_t rx_path_lock; + + /* Station data */ + /* + * The mutex only protects the list, hash table and + * counter, reads are done with RCU. + */ + struct mutex sta_mtx; + spinlock_t tim_lock; + unsigned long num_sta; + struct list_head sta_list; + struct rhltable sta_hash; + struct rhltable link_sta_hash; + struct timer_list sta_cleanup; + int sta_generation; + + struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; + struct tasklet_struct tx_pending_tasklet; + struct tasklet_struct wake_txqs_tasklet; + + atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES]; + + /* number of interfaces with allmulti RX */ + atomic_t iff_allmultis; + + struct rate_control_ref *rate_ctrl; + + struct arc4_ctx wep_tx_ctx; + struct arc4_ctx wep_rx_ctx; + u32 wep_iv; + + /* see iface.c */ + struct list_head interfaces; + struct list_head mon_list; /* only that are IFF_UP && !cooked */ + struct mutex iflist_mtx; + + /* + * Key mutex, protects sdata's key_list and sta_info's + * key pointers and ptk_idx (write access, they're RCU.) + */ + struct mutex key_mtx; + + /* mutex for scan and work locking */ + struct mutex mtx; + + /* Scanning and BSS list */ + unsigned long scanning; + struct cfg80211_ssid scan_ssid; + struct cfg80211_scan_request *int_scan_req; + struct cfg80211_scan_request __rcu *scan_req; + struct ieee80211_scan_request *hw_scan_req; + struct cfg80211_chan_def scan_chandef; + enum nl80211_band hw_scan_band; + int scan_channel_idx; + int scan_ies_len; + int hw_scan_ies_bufsize; + struct cfg80211_scan_info scan_info; + + struct wiphy_work sched_scan_stopped_work; + struct ieee80211_sub_if_data __rcu *sched_scan_sdata; + struct cfg80211_sched_scan_request __rcu *sched_scan_req; + u8 scan_addr[ETH_ALEN]; + + unsigned long leave_oper_channel_time; + enum mac80211_scan_state next_scan_state; + struct wiphy_delayed_work scan_work; + struct ieee80211_sub_if_data __rcu *scan_sdata; + /* For backward compatibility only -- do not use */ + struct cfg80211_chan_def _oper_chandef; + + /* Temporary remain-on-channel for off-channel operations */ + struct ieee80211_channel *tmp_channel; + + /* channel contexts */ + struct list_head chanctx_list; + struct mutex chanctx_mtx; + +#ifdef CONFIG_MAC80211_LEDS + struct led_trigger tx_led, rx_led, assoc_led, radio_led; + struct led_trigger tpt_led; + atomic_t tx_led_active, rx_led_active, assoc_led_active; + atomic_t radio_led_active, tpt_led_active; + struct tpt_led_trigger *tpt_led_trigger; +#endif + +#ifdef CONFIG_MAC80211_DEBUG_COUNTERS + /* SNMP counters */ + /* dot11CountersTable */ + u32 dot11TransmittedFragmentCount; + u32 dot11MulticastTransmittedFrameCount; + u32 dot11FailedCount; + u32 dot11RetryCount; + u32 dot11MultipleRetryCount; + u32 dot11FrameDuplicateCount; + u32 dot11ReceivedFragmentCount; + u32 dot11MulticastReceivedFrameCount; + u32 dot11TransmittedFrameCount; + + /* TX/RX handler statistics */ + unsigned int tx_handlers_drop; + unsigned int tx_handlers_queued; + unsigned int tx_handlers_drop_wep; + unsigned int tx_handlers_drop_not_assoc; + unsigned int tx_handlers_drop_unauth_port; + unsigned int rx_handlers_drop; + unsigned int rx_handlers_queued; + unsigned int rx_handlers_drop_nullfunc; + unsigned int rx_handlers_drop_defrag; + unsigned int tx_expand_skb_head; + unsigned int tx_expand_skb_head_cloned; + unsigned int rx_expand_skb_head_defrag; + unsigned int rx_handlers_fragments; + unsigned int tx_status_drop; +#define I802_DEBUG_INC(c) (c)++ +#else /* CONFIG_MAC80211_DEBUG_COUNTERS */ +#define I802_DEBUG_INC(c) do { } while (0) +#endif /* CONFIG_MAC80211_DEBUG_COUNTERS */ + + + int total_ps_buffered; /* total number of all buffered unicast and + * multicast packets for power saving stations + */ + + bool pspolling; + /* + * PS can only be enabled when we have exactly one managed + * interface (and monitors) in PS, this then points there. + */ + struct ieee80211_sub_if_data *ps_sdata; + struct work_struct dynamic_ps_enable_work; + struct work_struct dynamic_ps_disable_work; + struct timer_list dynamic_ps_timer; + struct notifier_block ifa_notifier; + struct notifier_block ifa6_notifier; + + /* + * The dynamic ps timeout configured from user space via WEXT - + * this will override whatever chosen by mac80211 internally. + */ + int dynamic_ps_forced_timeout; + + int user_power_level; /* in dBm, for all interfaces */ + + enum ieee80211_smps_mode smps_mode; + + struct work_struct restart_work; + +#ifdef CONFIG_MAC80211_DEBUGFS + struct local_debugfsdentries { + struct dentry *rcdir; + struct dentry *keys; + } debugfs; + bool force_tx_status; +#endif + + /* + * Remain-on-channel support + */ + struct wiphy_delayed_work roc_work; + struct list_head roc_list; + struct wiphy_work hw_roc_start, hw_roc_done; + unsigned long hw_roc_start_time; + u64 roc_cookie_counter; + + struct idr ack_status_frames; + spinlock_t ack_status_lock; + + struct ieee80211_sub_if_data __rcu *p2p_sdata; + + /* virtual monitor interface */ + struct ieee80211_sub_if_data __rcu *monitor_sdata; + struct cfg80211_chan_def monitor_chandef; + + /* extended capabilities provided by mac80211 */ + u8 ext_capa[8]; +}; + +static inline struct ieee80211_sub_if_data * +IEEE80211_DEV_TO_SUB_IF(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +static inline struct ieee80211_sub_if_data * +IEEE80211_WDEV_TO_SUB_IF(struct wireless_dev *wdev) +{ + return container_of(wdev, struct ieee80211_sub_if_data, wdev); +} + +static inline struct ieee80211_supported_band * +ieee80211_get_sband(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *chanctx_conf; + enum nl80211_band band; + + WARN_ON(ieee80211_vif_is_mld(&sdata->vif)); + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + + if (!chanctx_conf) { + rcu_read_unlock(); + return NULL; + } + + band = chanctx_conf->def.chan->band; + rcu_read_unlock(); + + return local->hw.wiphy->bands[band]; +} + +static inline struct ieee80211_supported_band * +ieee80211_get_link_sband(struct ieee80211_link_data *link) +{ + struct ieee80211_local *local = link->sdata->local; + struct ieee80211_chanctx_conf *chanctx_conf; + enum nl80211_band band; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(link->conf->chanctx_conf); + if (!chanctx_conf) { + rcu_read_unlock(); + return NULL; + } + + band = chanctx_conf->def.chan->band; + rcu_read_unlock(); + + return local->hw.wiphy->bands[band]; +} + +/* this struct holds the value parsing from channel switch IE */ +struct ieee80211_csa_ie { + struct cfg80211_chan_def chandef; + u8 mode; + u8 count; + u8 ttl; + u16 pre_value; + u16 reason_code; + u32 max_switch_time; +}; + +/* Parsed Information Elements */ +struct ieee802_11_elems { + const u8 *ie_start; + size_t total_len; + u32 crc; + + /* pointers to IEs */ + const struct ieee80211_tdls_lnkie *lnk_id; + const struct ieee80211_ch_switch_timing *ch_sw_timing; + const u8 *ext_capab; + const u8 *ssid; + const u8 *supp_rates; + const u8 *ds_params; + const struct ieee80211_tim_ie *tim; + const u8 *rsn; + const u8 *rsnx; + const u8 *erp_info; + const u8 *ext_supp_rates; + const u8 *wmm_info; + const u8 *wmm_param; + const struct ieee80211_ht_cap *ht_cap_elem; + const struct ieee80211_ht_operation *ht_operation; + const struct ieee80211_vht_cap *vht_cap_elem; + const struct ieee80211_vht_operation *vht_operation; + const struct ieee80211_meshconf_ie *mesh_config; + const u8 *he_cap; + const struct ieee80211_he_operation *he_operation; + const struct ieee80211_he_spr *he_spr; + const struct ieee80211_mu_edca_param_set *mu_edca_param_set; + const struct ieee80211_he_6ghz_capa *he_6ghz_capa; + const struct ieee80211_tx_pwr_env *tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT]; + const u8 *uora_element; + const u8 *mesh_id; + const u8 *peering; + const __le16 *awake_window; + const u8 *preq; + const u8 *prep; + const u8 *perr; + const struct ieee80211_rann_ie *rann; + const struct ieee80211_channel_sw_ie *ch_switch_ie; + const struct ieee80211_ext_chansw_ie *ext_chansw_ie; + const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie; + const u8 *max_channel_switch_time; + const u8 *country_elem; + const u8 *pwr_constr_elem; + const u8 *cisco_dtpc_elem; + const struct ieee80211_timeout_interval_ie *timeout_int; + const u8 *opmode_notif; + const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; + struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; + const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie; + const struct ieee80211_multiple_bssid_configuration *mbssid_config_ie; + const struct ieee80211_bssid_index *bssid_index; + u8 max_bssid_indicator; + u8 dtim_count; + u8 dtim_period; + const struct ieee80211_addba_ext_ie *addba_ext_ie; + const struct ieee80211_s1g_cap *s1g_capab; + const struct ieee80211_s1g_oper_ie *s1g_oper; + const struct ieee80211_s1g_bcn_compat_ie *s1g_bcn_compat; + const struct ieee80211_aid_response_ie *aid_resp; + const struct ieee80211_eht_cap_elem *eht_cap; + const struct ieee80211_eht_operation *eht_operation; + const struct ieee80211_multi_link_elem *ml_basic; + const struct ieee80211_multi_link_elem *ml_reconf; + + /* length of them, respectively */ + u8 ext_capab_len; + u8 ssid_len; + u8 supp_rates_len; + u8 tim_len; + u8 rsn_len; + u8 rsnx_len; + u8 ext_supp_rates_len; + u8 wmm_info_len; + u8 wmm_param_len; + u8 he_cap_len; + u8 mesh_id_len; + u8 peering_len; + u8 preq_len; + u8 prep_len; + u8 perr_len; + u8 country_elem_len; + u8 bssid_index_len; + u8 tx_pwr_env_len[IEEE80211_TPE_MAX_IE_COUNT]; + u8 tx_pwr_env_num; + u8 eht_cap_len; + + /* mult-link element can be de-fragmented and thus u8 is not sufficient */ + size_t ml_basic_len; + size_t ml_reconf_len; + + /* The basic Multi-Link element in the original IEs */ + const struct element *ml_basic_elem; + + /* The reconfiguration Multi-Link element in the original IEs */ + const struct element *ml_reconf_elem; + + /* + * store the per station profile pointer and length in case that the + * parsing also handled Multi-Link element parsing for a specific link + * ID. + */ + struct ieee80211_mle_per_sta_profile *prof; + size_t sta_prof_len; + + /* whether a parse error occurred while retrieving these elements */ + bool parse_error; + + /* + * scratch buffer that can be used for various element parsing related + * tasks, e.g., element de-fragmentation etc. + */ + size_t scratch_len; + u8 *scratch_pos; + u8 scratch[]; +}; + +static inline struct ieee80211_local *hw_to_local( + struct ieee80211_hw *hw) +{ + return container_of(hw, struct ieee80211_local, hw); +} + +static inline struct txq_info *to_txq_info(struct ieee80211_txq *txq) +{ + return container_of(txq, struct txq_info, txq); +} + +static inline bool txq_has_queue(struct ieee80211_txq *txq) +{ + struct txq_info *txqi = to_txq_info(txq); + + return !(skb_queue_empty(&txqi->frags) && !txqi->tin.backlog_packets); +} + +static inline bool +ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status) +{ + WARN_ON_ONCE(status->flag & RX_FLAG_MACTIME_START && + status->flag & RX_FLAG_MACTIME_END); + return !!(status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END | + RX_FLAG_MACTIME_PLCP_START)); +} + +void ieee80211_vif_inc_num_mcast(struct ieee80211_sub_if_data *sdata); +void ieee80211_vif_dec_num_mcast(struct ieee80211_sub_if_data *sdata); + +/* This function returns the number of multicast stations connected to this + * interface. It returns -1 if that number is not tracked, that is for netdevs + * not in AP or AP_VLAN mode or when using 4addr. + */ +static inline int +ieee80211_vif_get_num_mcast_if(struct ieee80211_sub_if_data *sdata) +{ + if (sdata->vif.type == NL80211_IFTYPE_AP) + return atomic_read(&sdata->u.ap.num_mcast_sta); + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) + return atomic_read(&sdata->u.vlan.num_mcast_sta); + return -1; +} + +u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, + struct ieee80211_rx_status *status, + unsigned int mpdu_len, + unsigned int mpdu_offset); +int ieee80211_hw_config(struct ieee80211_local *local, u32 changed); +void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); +void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, + u64 changed); +void ieee80211_vif_cfg_change_notify(struct ieee80211_sub_if_data *sdata, + u64 changed); +void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link, + u64 changed); +void ieee80211_configure_filter(struct ieee80211_local *local); +u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); + +u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local); +int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb, + u64 *cookie, gfp_t gfp); + +void ieee80211_check_fast_rx(struct sta_info *sta); +void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata); +void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata); +void ieee80211_clear_fast_rx(struct sta_info *sta); + +bool ieee80211_is_our_addr(struct ieee80211_sub_if_data *sdata, + const u8 *addr, int *out_link_id); + +/* STA code */ +void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata); +int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, + struct cfg80211_auth_request *req); +int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, + struct cfg80211_assoc_request *req); +int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, + struct cfg80211_deauth_request *req); +int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, + struct cfg80211_disassoc_request *req); +void ieee80211_send_pspoll(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); +void ieee80211_recalc_ps(struct ieee80211_local *local); +void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata); +void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata); +void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +void ieee80211_sta_rx_queued_ext(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata); +void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); +void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata); +void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata, + __le16 fc, bool acked); +void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata); +void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); +void ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata); +void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, + u8 reason, bool tx); +void ieee80211_mgd_setup_link(struct ieee80211_link_data *link); +void ieee80211_mgd_stop_link(struct ieee80211_link_data *link); +void ieee80211_mgd_set_link_qos_params(struct ieee80211_link_data *link); + +/* IBSS code */ +void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); +void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata); +void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, + const u8 *bssid, const u8 *addr, u32 supp_rates); +int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, + struct cfg80211_ibss_params *params); +int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); +void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata); +void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata, + struct cfg80211_csa_settings *csa_settings, + u64 *changed); +int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata, + u64 *changed); +void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata); + +/* OCB code */ +void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata); +void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata, + const u8 *bssid, const u8 *addr, u32 supp_rates); +void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata); +int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata, + struct ocb_setup *setup); +int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata); + +/* mesh code */ +void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata); +void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata, + struct cfg80211_csa_settings *csa_settings, + u64 *changed); +int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata, + u64 *changed); + +/* scan/BSS handling */ +void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work); +int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata, + const u8 *ssid, u8 ssid_len, + struct ieee80211_channel **channels, + unsigned int n_channels, + enum nl80211_bss_scan_width scan_width); +int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, + struct cfg80211_scan_request *req); +void ieee80211_scan_cancel(struct ieee80211_local *local); +void ieee80211_run_deferred_scan(struct ieee80211_local *local); +void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb); + +void ieee80211_inform_bss(struct wiphy *wiphy, struct cfg80211_bss *bss, + const struct cfg80211_bss_ies *ies, void *data); + +void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); +struct ieee80211_bss * +ieee80211_bss_info_update(struct ieee80211_local *local, + struct ieee80211_rx_status *rx_status, + struct ieee80211_mgmt *mgmt, + size_t len, + struct ieee80211_channel *channel); +void ieee80211_rx_bss_put(struct ieee80211_local *local, + struct ieee80211_bss *bss); + +/* scheduled scan handling */ +int +__ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, + struct cfg80211_sched_scan_request *req); +int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, + struct cfg80211_sched_scan_request *req); +int ieee80211_request_sched_scan_stop(struct ieee80211_local *local); +void ieee80211_sched_scan_end(struct ieee80211_local *local); +void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy, + struct wiphy_work *work); + +/* off-channel/mgmt-tx */ +void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local); +void ieee80211_offchannel_return(struct ieee80211_local *local); +void ieee80211_roc_setup(struct ieee80211_local *local); +void ieee80211_start_next_roc(struct ieee80211_local *local); +void ieee80211_roc_purge(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); +int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, + struct ieee80211_channel *chan, + unsigned int duration, u64 *cookie); +int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy, + struct wireless_dev *wdev, u64 cookie); +int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_mgmt_tx_params *params, u64 *cookie); +int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, + struct wireless_dev *wdev, u64 cookie); + +/* channel switch handling */ +void ieee80211_csa_finalize_work(struct work_struct *work); +int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_csa_settings *params); + +/* color change handling */ +void ieee80211_color_change_finalize_work(struct work_struct *work); +void ieee80211_color_collision_detection_work(struct work_struct *work); + +/* interface handling */ +#define MAC80211_SUPPORTED_FEATURES_TX (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ + NETIF_F_HW_CSUM | NETIF_F_SG | \ + NETIF_F_HIGHDMA | NETIF_F_GSO_SOFTWARE | \ + NETIF_F_HW_TC) +#define MAC80211_SUPPORTED_FEATURES_RX (NETIF_F_RXCSUM) +#define MAC80211_SUPPORTED_FEATURES (MAC80211_SUPPORTED_FEATURES_TX | \ + MAC80211_SUPPORTED_FEATURES_RX) + +int ieee80211_iface_init(void); +void ieee80211_iface_exit(void); +int ieee80211_if_add(struct ieee80211_local *local, const char *name, + unsigned char name_assign_type, + struct wireless_dev **new_wdev, enum nl80211_iftype type, + struct vif_params *params); +int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, + enum nl80211_iftype type); +void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); +void ieee80211_remove_interfaces(struct ieee80211_local *local); +u32 ieee80211_idle_off(struct ieee80211_local *local); +void ieee80211_recalc_idle(struct ieee80211_local *local); +void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, + const int offset); +int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up); +void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata); +int ieee80211_add_virtual_monitor(struct ieee80211_local *local); +void ieee80211_del_virtual_monitor(struct ieee80211_local *local); + +bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata); +void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata, + bool update_bss); +void ieee80211_recalc_offload(struct ieee80211_local *local); + +static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata) +{ + return test_bit(SDATA_STATE_RUNNING, &sdata->state); +} + +/* link handling */ +void ieee80211_link_setup(struct ieee80211_link_data *link); +void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, + int link_id, + struct ieee80211_link_data *link, + struct ieee80211_bss_conf *link_conf); +void ieee80211_link_stop(struct ieee80211_link_data *link); +int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata, + u16 new_links, u16 dormant_links); +void ieee80211_vif_clear_links(struct ieee80211_sub_if_data *sdata); +int __ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links); + +/* tx handling */ +void ieee80211_clear_tx_pending(struct ieee80211_local *local); +void ieee80211_tx_pending(struct tasklet_struct *t); +netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, + struct net_device *dev); +netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, + struct net_device *dev); +netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb, + struct net_device *dev); +void __ieee80211_subif_start_xmit(struct sk_buff *skb, + struct net_device *dev, + u32 info_flags, + u32 ctrl_flags, + u64 *cookie); +void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, + struct sk_buff_head *skbs); +struct sk_buff * +ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u32 info_flags); +void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, + int retry_count, int shift, bool send_to_cooked, + struct ieee80211_tx_status *status); + +void ieee80211_check_fast_xmit(struct sta_info *sta); +void ieee80211_check_fast_xmit_all(struct ieee80211_local *local); +void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata); +void ieee80211_clear_fast_xmit(struct sta_info *sta); +int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, + const u8 *buf, size_t len, + const u8 *dest, __be16 proto, bool unencrypted, + int link_id, u64 *cookie); +int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev, + const u8 *buf, size_t len); +void __ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_fast_tx *fast_tx, + struct sk_buff *skb, bool ampdu, + const u8 *da, const u8 *sa); +void ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, struct sk_buff *skb); + +/* HT */ +void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta_ht_cap *ht_cap); +bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const struct ieee80211_ht_cap *ht_cap_ie, + struct link_sta_info *link_sta); +void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, + const u8 *da, u16 tid, + u16 initiator, u16 reason_code); +int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, + enum ieee80211_smps_mode smps, const u8 *da, + const u8 *bssid); +bool ieee80211_smps_is_restrictive(enum ieee80211_smps_mode smps_mode_old, + enum ieee80211_smps_mode smps_mode_new); + +void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, + u16 initiator, u16 reason, bool stop); +void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, + u16 initiator, u16 reason, bool stop); +void ___ieee80211_start_rx_ba_session(struct sta_info *sta, + u8 dialog_token, u16 timeout, + u16 start_seq_num, u16 ba_policy, u16 tid, + u16 buf_size, bool tx, bool auto_seq, + const struct ieee80211_addba_ext_ie *addbaext); +void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, + enum ieee80211_agg_stop_reason reason); +void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_mgmt *mgmt, size_t len); +void ieee80211_process_addba_resp(struct ieee80211_local *local, + struct sta_info *sta, + struct ieee80211_mgmt *mgmt, + size_t len); +void ieee80211_process_addba_request(struct ieee80211_local *local, + struct sta_info *sta, + struct ieee80211_mgmt *mgmt, + size_t len); + +int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, + enum ieee80211_agg_stop_reason reason); +int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, + enum ieee80211_agg_stop_reason reason); +void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid, + struct tid_ampdu_tx *tid_tx); +void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid, + struct tid_ampdu_tx *tid_tx); +void ieee80211_ba_session_work(struct work_struct *work); +void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); +void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid); + +u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs); +enum nl80211_smps_mode +ieee80211_smps_mode_to_smps_mode(enum ieee80211_smps_mode smps); + +/* VHT */ +void +ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const struct ieee80211_vht_cap *vht_cap_ie, + const struct ieee80211_vht_cap *vht_cap_ie2, + struct link_sta_info *link_sta); +enum ieee80211_sta_rx_bandwidth +ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta); +enum ieee80211_sta_rx_bandwidth +ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta); +void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta); +enum ieee80211_sta_rx_bandwidth +ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width); +enum nl80211_chan_width +ieee80211_sta_cap_chan_bw(struct link_sta_info *link_sta); +void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link, + struct ieee80211_mgmt *mgmt); +u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, + struct link_sta_info *sta, + u8 opmode, enum nl80211_band band); +void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, + struct link_sta_info *sta, + u8 opmode, enum nl80211_band band); +void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta_vht_cap *vht_cap); +void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, + u16 vht_mask[NL80211_VHT_NSS_MAX]); +enum nl80211_chan_width +ieee80211_sta_rx_bw_to_chan_width(struct link_sta_info *sta); + +/* HE */ +void +ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const u8 *he_cap_ie, u8 he_cap_len, + const struct ieee80211_he_6ghz_capa *he_6ghz_capa, + struct link_sta_info *link_sta); +void +ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif, + const struct ieee80211_he_spr *he_spr_ie_elem); + +void +ieee80211_he_op_ie_to_bss_conf(struct ieee80211_vif *vif, + const struct ieee80211_he_operation *he_op_ie_elem); + +/* S1G */ +void ieee80211_s1g_sta_rate_init(struct sta_info *sta); +bool ieee80211_s1g_is_twt_setup(struct sk_buff *skb); +void ieee80211_s1g_rx_twt_action(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +void ieee80211_s1g_status_twt_action(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); + +/* Spectrum management */ +void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len); +/** + * ieee80211_parse_ch_switch_ie - parses channel switch IEs + * @sdata: the sdata of the interface which has received the frame + * @elems: parsed 802.11 elements received with the frame + * @current_band: indicates the current band + * @vht_cap_info: VHT capabilities of the transmitter + * @conn_flags: contains information about own capabilities and restrictions + * to decide which channel switch announcements can be accepted, using + * flags from &enum ieee80211_conn_flags. + * @bssid: the currently connected bssid (for reporting) + * @csa_ie: parsed 802.11 csa elements on count, mode, chandef and mesh ttl. + All of them will be filled with if success only. + * Return: 0 on success, <0 on error and >0 if there is nothing to parse. + */ +int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *elems, + enum nl80211_band current_band, + u32 vht_cap_info, + ieee80211_conn_flags_t conn_flags, u8 *bssid, + struct ieee80211_csa_ie *csa_ie); + +/* Suspend/resume and hw reconfiguration */ +int ieee80211_reconfig(struct ieee80211_local *local); +void ieee80211_stop_device(struct ieee80211_local *local); + +int __ieee80211_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan); + +static inline int __ieee80211_resume(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + WARN(test_bit(SCAN_HW_SCANNING, &local->scanning) && + !test_bit(SCAN_COMPLETED, &local->scanning), + "%s: resume with hardware scan still in progress\n", + wiphy_name(hw->wiphy)); + + return ieee80211_reconfig(hw_to_local(hw)); +} + +/* utility functions/constants */ +extern const void *const mac80211_wiphy_privid; /* for wiphy privid */ +int ieee80211_frame_duration(enum nl80211_band band, size_t len, + int rate, int erp, int short_preamble, + int shift); +void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, + struct ieee80211_tx_queue_params *qparam, + int ac); +void ieee80211_set_wmm_default(struct ieee80211_link_data *link, + bool bss_notify, bool enable_qos); +void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, struct sk_buff *skb); + +void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, int tid, int link_id, + enum nl80211_band band); + +/* sta_out needs to be checked for ERR_PTR() before using */ +int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, + struct sta_info **sta_out); + +static inline void +ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, int tid, + enum nl80211_band band) +{ + rcu_read_lock(); + __ieee80211_tx_skb_tid_band(sdata, skb, tid, -1, band); + rcu_read_unlock(); +} + +void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, int tid, int link_id); + +static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */ + ieee80211_tx_skb_tid(sdata, skb, 7, -1); +} + +/** + * struct ieee80211_elems_parse_params - element parsing parameters + * @start: pointer to the elements + * @len: length of the elements + * @action: %true if the elements came from an action frame + * @filter: bitmap of element IDs to filter out while calculating + * the element CRC + * @crc: CRC starting value + * @bss: the BSS to parse this as, for multi-BSSID cases this can + * represent a non-transmitting BSS in which case the data + * for that non-transmitting BSS is returned + * @link_id: the link ID to parse elements for, if a STA profile + * is present in the multi-link element, or -1 to ignore; + * note that the code currently assumes parsing an association + * (or re-association) response frame if this is given + * @from_ap: frame is received from an AP (currently used only + * for EHT capabilities parsing) + */ +struct ieee80211_elems_parse_params { + const u8 *start; + size_t len; + bool action; + u64 filter; + u32 crc; + struct cfg80211_bss *bss; + int link_id; + bool from_ap; +}; + +struct ieee802_11_elems * +ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params); + +static inline struct ieee802_11_elems * +ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, + u64 filter, u32 crc, + struct cfg80211_bss *bss) +{ + struct ieee80211_elems_parse_params params = { + .start = start, + .len = len, + .action = action, + .filter = filter, + .crc = crc, + .bss = bss, + .link_id = -1, + }; + + return ieee802_11_parse_elems_full(¶ms); +} + +static inline struct ieee802_11_elems * +ieee802_11_parse_elems(const u8 *start, size_t len, bool action, + struct cfg80211_bss *bss) +{ + return ieee802_11_parse_elems_crc(start, len, action, 0, 0, bss); +} + +void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos, u8 frag_id); + +extern const int ieee802_1d_to_ac[8]; + +static inline int ieee80211_ac_from_tid(int tid) +{ + return ieee802_1d_to_ac[tid & 7]; +} + +void ieee80211_dynamic_ps_enable_work(struct work_struct *work); +void ieee80211_dynamic_ps_disable_work(struct work_struct *work); +void ieee80211_dynamic_ps_timer(struct timer_list *t); +void ieee80211_send_nullfunc(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + bool powersave); +void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); +void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, + struct ieee80211_hdr *hdr, bool ack, u16 tx_time); + +void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, + unsigned long queues, + enum queue_stop_reason reason, + bool refcounted); +void ieee80211_stop_vif_queues(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum queue_stop_reason reason); +void ieee80211_wake_vif_queues(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum queue_stop_reason reason); +void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, + unsigned long queues, + enum queue_stop_reason reason, + bool refcounted); +void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, + enum queue_stop_reason reason, + bool refcounted); +void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, + enum queue_stop_reason reason, + bool refcounted); +void ieee80211_add_pending_skb(struct ieee80211_local *local, + struct sk_buff *skb); +void ieee80211_add_pending_skbs(struct ieee80211_local *local, + struct sk_buff_head *skbs); +void ieee80211_flush_queues(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, bool drop); +void __ieee80211_flush_queues(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + unsigned int queues, bool drop); + +static inline bool ieee80211_can_run_worker(struct ieee80211_local *local) +{ + /* + * It's unsafe to try to do any work during reconfigure flow. + * When the flow ends the work will be requeued. + */ + if (local->in_reconfig) + return false; + + /* + * If quiescing is set, we are racing with __ieee80211_suspend. + * __ieee80211_suspend flushes the workers after setting quiescing, + * and we check quiescing / suspended before enqueing new workers. + * We should abort the worker to avoid the races below. + */ + if (local->quiescing) + return false; + + /* + * We might already be suspended if the following scenario occurs: + * __ieee80211_suspend Control path + * + * if (local->quiescing) + * return; + * local->quiescing = true; + * flush_workqueue(); + * queue_work(...); + * local->suspended = true; + * local->quiescing = false; + * worker starts running... + */ + if (local->suspended) + return false; + + return true; +} + +int ieee80211_txq_setup_flows(struct ieee80211_local *local); +void ieee80211_txq_set_params(struct ieee80211_local *local); +void ieee80211_txq_teardown_flows(struct ieee80211_local *local); +void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct txq_info *txq, int tid); +void ieee80211_txq_purge(struct ieee80211_local *local, + struct txq_info *txqi); +void ieee80211_txq_remove_vlan(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); +void ieee80211_fill_txq_stats(struct cfg80211_txq_stats *txqstats, + struct txq_info *txqi); +void ieee80211_wake_txqs(struct tasklet_struct *t); +void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, + u16 transaction, u16 auth_alg, u16 status, + const u8 *extra, size_t extra_len, const u8 *bssid, + const u8 *da, const u8 *key, u8 key_len, u8 key_idx, + u32 tx_flags); +void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, + const u8 *da, const u8 *bssid, + u16 stype, u16 reason, + bool send_frame, u8 *frame_buf); +u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end); + +enum { + IEEE80211_PROBE_FLAG_DIRECTED = BIT(0), + IEEE80211_PROBE_FLAG_MIN_CONTENT = BIT(1), + IEEE80211_PROBE_FLAG_RANDOM_SN = BIT(2), +}; + +int ieee80211_build_preq_ies(struct ieee80211_sub_if_data *sdata, u8 *buffer, + size_t buffer_len, + struct ieee80211_scan_ies *ie_desc, + const u8 *ie, size_t ie_len, + u8 bands_used, u32 *rate_masks, + struct cfg80211_chan_def *chandef, + u32 flags); +struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, + const u8 *src, const u8 *dst, + u32 ratemask, + struct ieee80211_channel *chan, + const u8 *ssid, size_t ssid_len, + const u8 *ie, size_t ie_len, + u32 flags); +u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *elems, + enum nl80211_band band, u32 *basic_rates); +int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link, + enum ieee80211_smps_mode smps_mode); +void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link); +void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata, + int link_id); + +size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset); +u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, + u16 cap); +u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, + const struct cfg80211_chan_def *chandef, + u16 prot_mode, bool rifs_mode); +void ieee80211_ie_build_wide_bw_cs(u8 *pos, + const struct cfg80211_chan_def *chandef); +u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, + u32 cap); +u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, + const struct cfg80211_chan_def *chandef); +u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype); +u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos, + const struct ieee80211_sta_he_cap *he_cap, + u8 *end); +void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata, + enum ieee80211_smps_mode smps_mode, + struct sk_buff *skb); +u8 *ieee80211_ie_build_he_oper(u8 *pos, struct cfg80211_chan_def *chandef); +u8 *ieee80211_ie_build_eht_oper(u8 *pos, struct cfg80211_chan_def *chandef, + const struct ieee80211_sta_eht_cap *eht_cap); +int ieee80211_parse_bitrates(enum nl80211_chan_width width, + const struct ieee80211_supported_band *sband, + const u8 *srates, int srates_len, u32 *rates); +int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, bool need_basic, + enum nl80211_band band); +int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, bool need_basic, + enum nl80211_band band); +u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo); +void ieee80211_add_s1g_capab_ie(struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta_s1g_cap *caps, + struct sk_buff *skb); +void ieee80211_add_aid_request_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +u8 *ieee80211_ie_build_s1g_cap(u8 *pos, struct ieee80211_sta_s1g_cap *s1g_cap); + +/* channel management */ +bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper, + struct cfg80211_chan_def *chandef); +bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info, + const struct ieee80211_vht_operation *oper, + const struct ieee80211_ht_operation *htop, + struct cfg80211_chan_def *chandef); +void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation *eht_oper, + bool support_160, bool support_320, + struct cfg80211_chan_def *chandef); +bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata, + const struct ieee80211_he_operation *he_oper, + const struct ieee80211_eht_operation *eht_oper, + struct cfg80211_chan_def *chandef); +bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper, + struct cfg80211_chan_def *chandef); +ieee80211_conn_flags_t ieee80211_chandef_downgrade(struct cfg80211_chan_def *c); + +int __must_check +ieee80211_link_use_channel(struct ieee80211_link_data *link, + const struct cfg80211_chan_def *chandef, + enum ieee80211_chanctx_mode mode); +int __must_check +ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link, + const struct cfg80211_chan_def *chandef, + enum ieee80211_chanctx_mode mode, + bool radar_required); +int __must_check +ieee80211_link_use_reserved_context(struct ieee80211_link_data *link); +int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link); + +int __must_check +ieee80211_link_change_bandwidth(struct ieee80211_link_data *link, + const struct cfg80211_chan_def *chandef, + u64 *changed); +void ieee80211_link_release_channel(struct ieee80211_link_data *link); +void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link); +void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link, + bool clear); +int ieee80211_chanctx_refcount(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx); + +void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, + struct ieee80211_chanctx *chanctx); +void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for); +bool ieee80211_is_radar_required(struct ieee80211_local *local); + +void ieee80211_dfs_cac_timer_work(struct work_struct *work); +void ieee80211_dfs_cac_cancel(struct ieee80211_local *local); +void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy, + struct wiphy_work *work); +int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata, + struct cfg80211_csa_settings *csa_settings); + +void ieee80211_recalc_dtim(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); +int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, + const struct cfg80211_chan_def *chandef, + enum ieee80211_chanctx_mode chanmode, + u8 radar_detect); +int ieee80211_max_num_channels(struct ieee80211_local *local); +void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx); + +/* TDLS */ +int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, int link_id, + u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, bool initiator, + const u8 *extra_ies, size_t extra_ies_len); +int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, enum nl80211_tdls_operation oper); +void ieee80211_tdls_peer_del_work(struct work_struct *wk); +int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev, + const u8 *addr, u8 oper_class, + struct cfg80211_chan_def *chandef); +void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy, + struct net_device *dev, + const u8 *addr); +void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata); +void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata, + const u8 *peer, u16 reason); +void +ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); + + +const char *ieee80211_get_reason_code_string(u16 reason_code); +u16 ieee80211_encode_usf(int val); +u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, + enum nl80211_iftype type); + +extern const struct ethtool_ops ieee80211_ethtool_ops; + +u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *pubsta, + int len, bool ampdu); +#ifdef CONFIG_MAC80211_NOINLINE +#define debug_noinline noinline +#else +#define debug_noinline +#endif + +void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache); +void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache); + +u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype); +u8 *ieee80211_ie_build_eht_cap(u8 *pos, + const struct ieee80211_sta_he_cap *he_cap, + const struct ieee80211_sta_eht_cap *eht_cap, + u8 *end, + bool for_ap); + +void +ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const u8 *he_cap_ie, u8 he_cap_len, + const struct ieee80211_eht_cap_elem *eht_cap_ie_elem, + u8 eht_cap_len, + struct link_sta_info *link_sta); +#endif /* IEEE80211_I_H */ diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c new file mode 100644 index 0000000000..6e3bfb46af --- /dev/null +++ b/net/mac80211/iface.c @@ -0,0 +1,2356 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Interface handling + * + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> + * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (c) 2016 Intel Deutschland GmbH + * Copyright (C) 2018-2023 Intel Corporation + */ +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/if_arp.h> +#include <linux/netdevice.h> +#include <linux/rtnetlink.h> +#include <linux/kcov.h> +#include <net/mac80211.h> +#include <net/ieee80211_radiotap.h> +#include "ieee80211_i.h" +#include "sta_info.h" +#include "debugfs_netdev.h" +#include "mesh.h" +#include "led.h" +#include "driver-ops.h" +#include "wme.h" +#include "rate.h" + +/** + * DOC: Interface list locking + * + * The interface list in each struct ieee80211_local is protected + * three-fold: + * + * (1) modifications may only be done under the RTNL + * (2) modifications and readers are protected against each other by + * the iflist_mtx. + * (3) modifications are done in an RCU manner so atomic readers + * can traverse the list in RCU-safe blocks. + * + * As a consequence, reads (traversals) of the list can be protected + * by either the RTNL, the iflist_mtx or RCU. + */ + +static void ieee80211_iface_work(struct wiphy *wiphy, struct wiphy_work *work); + +bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_chanctx_conf *chanctx_conf; + int power; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + if (!chanctx_conf) { + rcu_read_unlock(); + return false; + } + + power = ieee80211_chandef_max_power(&chanctx_conf->def); + rcu_read_unlock(); + + if (sdata->deflink.user_power_level != IEEE80211_UNSET_POWER_LEVEL) + power = min(power, sdata->deflink.user_power_level); + + if (sdata->deflink.ap_power_level != IEEE80211_UNSET_POWER_LEVEL) + power = min(power, sdata->deflink.ap_power_level); + + if (power != sdata->vif.bss_conf.txpower) { + sdata->vif.bss_conf.txpower = power; + ieee80211_hw_config(sdata->local, 0); + return true; + } + + return false; +} + +void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata, + bool update_bss) +{ + if (__ieee80211_recalc_txpower(sdata) || + (update_bss && ieee80211_sdata_running(sdata))) + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_TXPOWER); +} + +static u32 __ieee80211_idle_off(struct ieee80211_local *local) +{ + if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) + return 0; + + local->hw.conf.flags &= ~IEEE80211_CONF_IDLE; + return IEEE80211_CONF_CHANGE_IDLE; +} + +static u32 __ieee80211_idle_on(struct ieee80211_local *local) +{ + if (local->hw.conf.flags & IEEE80211_CONF_IDLE) + return 0; + + ieee80211_flush_queues(local, NULL, false); + + local->hw.conf.flags |= IEEE80211_CONF_IDLE; + return IEEE80211_CONF_CHANGE_IDLE; +} + +static u32 __ieee80211_recalc_idle(struct ieee80211_local *local, + bool force_active) +{ + bool working, scanning, active; + unsigned int led_trig_start = 0, led_trig_stop = 0; + + lockdep_assert_held(&local->mtx); + + active = force_active || + !list_empty(&local->chanctx_list) || + local->monitors; + + working = !local->ops->remain_on_channel && + !list_empty(&local->roc_list); + + scanning = test_bit(SCAN_SW_SCANNING, &local->scanning) || + test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning); + + if (working || scanning) + led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK; + else + led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK; + + if (active) + led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; + else + led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; + + ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); + + if (working || scanning || active) + return __ieee80211_idle_off(local); + return __ieee80211_idle_on(local); +} + +u32 ieee80211_idle_off(struct ieee80211_local *local) +{ + return __ieee80211_recalc_idle(local, true); +} + +void ieee80211_recalc_idle(struct ieee80211_local *local) +{ + u32 change = __ieee80211_recalc_idle(local, false); + if (change) + ieee80211_hw_config(local, change); +} + +static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr, + bool check_dup) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_sub_if_data *iter; + u64 new, mask, tmp; + u8 *m; + int ret = 0; + + if (is_zero_ether_addr(local->hw.wiphy->addr_mask)) + return 0; + + m = addr; + new = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | + ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | + ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); + + m = local->hw.wiphy->addr_mask; + mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | + ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | + ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); + + if (!check_dup) + return ret; + + mutex_lock(&local->iflist_mtx); + list_for_each_entry(iter, &local->interfaces, list) { + if (iter == sdata) + continue; + + if (iter->vif.type == NL80211_IFTYPE_MONITOR && + !(iter->u.mntr.flags & MONITOR_FLAG_ACTIVE)) + continue; + + m = iter->vif.addr; + tmp = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | + ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | + ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); + + if ((new & ~mask) != (tmp & ~mask)) { + ret = -EINVAL; + break; + } + } + mutex_unlock(&local->iflist_mtx); + + return ret; +} + +static int ieee80211_can_powered_addr_change(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_roc_work *roc; + struct ieee80211_local *local = sdata->local; + struct ieee80211_sub_if_data *scan_sdata; + int ret = 0; + + /* To be the most flexible here we want to only limit changing the + * address if the specific interface is doing offchannel work or + * scanning. + */ + if (netif_carrier_ok(sdata->dev)) + return -EBUSY; + + mutex_lock(&local->mtx); + + /* First check no ROC work is happening on this iface */ + list_for_each_entry(roc, &local->roc_list, list) { + if (roc->sdata != sdata) + continue; + + if (roc->started) { + ret = -EBUSY; + goto unlock; + } + } + + /* And if this iface is scanning */ + if (local->scanning) { + scan_sdata = rcu_dereference_protected(local->scan_sdata, + lockdep_is_held(&local->mtx)); + if (sdata == scan_sdata) + ret = -EBUSY; + } + + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + /* More interface types could be added here but changing the + * address while powered makes the most sense in client modes. + */ + break; + default: + ret = -EOPNOTSUPP; + } + +unlock: + mutex_unlock(&local->mtx); + return ret; +} + +static int ieee80211_change_mac(struct net_device *dev, void *addr) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sockaddr *sa = addr; + bool check_dup = true; + bool live = false; + int ret; + + if (ieee80211_sdata_running(sdata)) { + ret = ieee80211_can_powered_addr_change(sdata); + if (ret) + return ret; + + live = true; + } + + if (sdata->vif.type == NL80211_IFTYPE_MONITOR && + !(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE)) + check_dup = false; + + ret = ieee80211_verify_mac(sdata, sa->sa_data, check_dup); + if (ret) + return ret; + + if (live) + drv_remove_interface(local, sdata); + ret = eth_mac_addr(dev, sa); + + if (ret == 0) { + memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN); + ether_addr_copy(sdata->vif.bss_conf.addr, sdata->vif.addr); + } + + /* Regardless of eth_mac_addr() return we still want to add the + * interface back. This should not fail... + */ + if (live) + WARN_ON(drv_add_interface(local, sdata)); + + return ret; +} + +static inline int identical_mac_addr_allowed(int type1, int type2) +{ + return type1 == NL80211_IFTYPE_MONITOR || + type2 == NL80211_IFTYPE_MONITOR || + type1 == NL80211_IFTYPE_P2P_DEVICE || + type2 == NL80211_IFTYPE_P2P_DEVICE || + (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_AP_VLAN) || + (type1 == NL80211_IFTYPE_AP_VLAN && + (type2 == NL80211_IFTYPE_AP || + type2 == NL80211_IFTYPE_AP_VLAN)); +} + +static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata, + enum nl80211_iftype iftype) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_sub_if_data *nsdata; + int ret; + + ASSERT_RTNL(); + + /* we hold the RTNL here so can safely walk the list */ + list_for_each_entry(nsdata, &local->interfaces, list) { + if (nsdata != sdata && ieee80211_sdata_running(nsdata)) { + /* + * Only OCB and monitor mode may coexist + */ + if ((sdata->vif.type == NL80211_IFTYPE_OCB && + nsdata->vif.type != NL80211_IFTYPE_MONITOR) || + (sdata->vif.type != NL80211_IFTYPE_MONITOR && + nsdata->vif.type == NL80211_IFTYPE_OCB)) + return -EBUSY; + + /* + * Allow only a single IBSS interface to be up at any + * time. This is restricted because beacon distribution + * cannot work properly if both are in the same IBSS. + * + * To remove this restriction we'd have to disallow them + * from setting the same SSID on different IBSS interfaces + * belonging to the same hardware. Then, however, we're + * faced with having to adopt two different TSF timers... + */ + if (iftype == NL80211_IFTYPE_ADHOC && + nsdata->vif.type == NL80211_IFTYPE_ADHOC) + return -EBUSY; + /* + * will not add another interface while any channel + * switch is active. + */ + if (nsdata->vif.bss_conf.csa_active) + return -EBUSY; + + /* + * The remaining checks are only performed for interfaces + * with the same MAC address. + */ + if (!ether_addr_equal(sdata->vif.addr, + nsdata->vif.addr)) + continue; + + /* + * check whether it may have the same address + */ + if (!identical_mac_addr_allowed(iftype, + nsdata->vif.type)) + return -ENOTUNIQ; + + /* No support for VLAN with MLO yet */ + if (iftype == NL80211_IFTYPE_AP_VLAN && + sdata->wdev.use_4addr && + nsdata->vif.type == NL80211_IFTYPE_AP && + nsdata->vif.valid_links) + return -EOPNOTSUPP; + + /* + * can only add VLANs to enabled APs + */ + if (iftype == NL80211_IFTYPE_AP_VLAN && + nsdata->vif.type == NL80211_IFTYPE_AP) + sdata->bss = &nsdata->u.ap; + } + } + + mutex_lock(&local->chanctx_mtx); + ret = ieee80211_check_combinations(sdata, NULL, 0, 0); + mutex_unlock(&local->chanctx_mtx); + return ret; +} + +static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata, + enum nl80211_iftype iftype) +{ + int n_queues = sdata->local->hw.queues; + int i; + + if (iftype == NL80211_IFTYPE_NAN) + return 0; + + if (iftype != NL80211_IFTYPE_P2P_DEVICE) { + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + if (WARN_ON_ONCE(sdata->vif.hw_queue[i] == + IEEE80211_INVAL_HW_QUEUE)) + return -EINVAL; + if (WARN_ON_ONCE(sdata->vif.hw_queue[i] >= + n_queues)) + return -EINVAL; + } + } + + if ((iftype != NL80211_IFTYPE_AP && + iftype != NL80211_IFTYPE_P2P_GO && + iftype != NL80211_IFTYPE_MESH_POINT) || + !ieee80211_hw_check(&sdata->local->hw, QUEUE_CONTROL)) { + sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE; + return 0; + } + + if (WARN_ON_ONCE(sdata->vif.cab_queue == IEEE80211_INVAL_HW_QUEUE)) + return -EINVAL; + + if (WARN_ON_ONCE(sdata->vif.cab_queue >= n_queues)) + return -EINVAL; + + return 0; +} + +static int ieee80211_open(struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + int err; + + /* fail early if user set an invalid address */ + if (!is_valid_ether_addr(dev->dev_addr)) + return -EADDRNOTAVAIL; + + err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type); + if (err) + return err; + + wiphy_lock(sdata->local->hw.wiphy); + err = ieee80211_do_open(&sdata->wdev, true); + wiphy_unlock(sdata->local->hw.wiphy); + + return err; +} + +static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_down) +{ + struct ieee80211_local *local = sdata->local; + unsigned long flags; + struct sk_buff *skb, *tmp; + u32 hw_reconf_flags = 0; + int i, flushed; + struct ps_data *ps; + struct cfg80211_chan_def chandef; + bool cancel_scan; + struct cfg80211_nan_func *func; + + clear_bit(SDATA_STATE_RUNNING, &sdata->state); + synchronize_rcu(); /* flush _ieee80211_wake_txqs() */ + + cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata; + if (cancel_scan) + ieee80211_scan_cancel(local); + + ieee80211_roc_purge(local, sdata); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + ieee80211_mgd_stop(sdata); + break; + case NL80211_IFTYPE_ADHOC: + ieee80211_ibss_stop(sdata); + break; + case NL80211_IFTYPE_MONITOR: + if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) + break; + list_del_rcu(&sdata->u.mntr.list); + break; + default: + break; + } + + /* + * Remove all stations associated with this interface. + * + * This must be done before calling ops->remove_interface() + * because otherwise we can later invoke ops->sta_notify() + * whenever the STAs are removed, and that invalidates driver + * assumptions about always getting a vif pointer that is valid + * (because if we remove a STA after ops->remove_interface() + * the driver will have removed the vif info already!) + * + * For AP_VLANs stations may exist since there's nothing else that + * would have removed them, but in other modes there shouldn't + * be any stations. + */ + flushed = sta_info_flush(sdata); + WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP_VLAN && flushed > 0); + + /* don't count this interface for allmulti while it is down */ + if (sdata->flags & IEEE80211_SDATA_ALLMULTI) + atomic_dec(&local->iff_allmultis); + + if (sdata->vif.type == NL80211_IFTYPE_AP) { + local->fif_pspoll--; + local->fif_probe_req--; + } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { + local->fif_probe_req--; + } + + if (sdata->dev) { + netif_addr_lock_bh(sdata->dev); + spin_lock_bh(&local->filter_lock); + __hw_addr_unsync(&local->mc_list, &sdata->dev->mc, + sdata->dev->addr_len); + spin_unlock_bh(&local->filter_lock); + netif_addr_unlock_bh(sdata->dev); + } + + del_timer_sync(&local->dynamic_ps_timer); + cancel_work_sync(&local->dynamic_ps_enable_work); + + cancel_work_sync(&sdata->recalc_smps); + + sdata_lock(sdata); + WARN(ieee80211_vif_is_mld(&sdata->vif), + "destroying interface with valid links 0x%04x\n", + sdata->vif.valid_links); + + mutex_lock(&local->mtx); + sdata->vif.bss_conf.csa_active = false; + if (sdata->vif.type == NL80211_IFTYPE_STATION) + sdata->deflink.u.mgd.csa_waiting_bcn = false; + if (sdata->deflink.csa_block_tx) { + ieee80211_wake_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_CSA); + sdata->deflink.csa_block_tx = false; + } + mutex_unlock(&local->mtx); + sdata_unlock(sdata); + + cancel_work_sync(&sdata->deflink.csa_finalize_work); + cancel_work_sync(&sdata->deflink.color_change_finalize_work); + + cancel_delayed_work_sync(&sdata->deflink.dfs_cac_timer_work); + + if (sdata->wdev.cac_started) { + chandef = sdata->vif.bss_conf.chandef; + WARN_ON(local->suspended); + mutex_lock(&local->mtx); + ieee80211_link_release_channel(&sdata->deflink); + mutex_unlock(&local->mtx); + cfg80211_cac_event(sdata->dev, &chandef, + NL80211_RADAR_CAC_ABORTED, + GFP_KERNEL); + } + + if (sdata->vif.type == NL80211_IFTYPE_AP) { + WARN_ON(!list_empty(&sdata->u.ap.vlans)); + } else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + /* remove all packets in parent bc_buf pointing to this dev */ + ps = &sdata->bss->ps; + + spin_lock_irqsave(&ps->bc_buf.lock, flags); + skb_queue_walk_safe(&ps->bc_buf, skb, tmp) { + if (skb->dev == sdata->dev) { + __skb_unlink(skb, &ps->bc_buf); + local->total_ps_buffered--; + ieee80211_free_txskb(&local->hw, skb); + } + } + spin_unlock_irqrestore(&ps->bc_buf.lock, flags); + } + + if (going_down) + local->open_count--; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + mutex_lock(&local->mtx); + list_del(&sdata->u.vlan.list); + mutex_unlock(&local->mtx); + RCU_INIT_POINTER(sdata->vif.bss_conf.chanctx_conf, NULL); + /* see comment in the default case below */ + ieee80211_free_keys(sdata, true); + /* no need to tell driver */ + break; + case NL80211_IFTYPE_MONITOR: + if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) { + local->cooked_mntrs--; + break; + } + + local->monitors--; + if (local->monitors == 0) { + local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR; + hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; + } + + ieee80211_adjust_monitor_flags(sdata, -1); + break; + case NL80211_IFTYPE_NAN: + /* clean all the functions */ + spin_lock_bh(&sdata->u.nan.func_lock); + + idr_for_each_entry(&sdata->u.nan.function_inst_ids, func, i) { + idr_remove(&sdata->u.nan.function_inst_ids, i); + cfg80211_free_nan_func(func); + } + idr_destroy(&sdata->u.nan.function_inst_ids); + + spin_unlock_bh(&sdata->u.nan.func_lock); + break; + case NL80211_IFTYPE_P2P_DEVICE: + /* relies on synchronize_rcu() below */ + RCU_INIT_POINTER(local->p2p_sdata, NULL); + fallthrough; + default: + wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->work); + /* + * When we get here, the interface is marked down. + * Free the remaining keys, if there are any + * (which can happen in AP mode if userspace sets + * keys before the interface is operating) + * + * Force the key freeing to always synchronize_net() + * to wait for the RX path in case it is using this + * interface enqueuing frames at this very time on + * another CPU. + */ + ieee80211_free_keys(sdata, true); + skb_queue_purge(&sdata->skb_queue); + skb_queue_purge(&sdata->status_queue); + } + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { + skb_queue_walk_safe(&local->pending[i], skb, tmp) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + if (info->control.vif == &sdata->vif) { + __skb_unlink(skb, &local->pending[i]); + ieee80211_free_txskb(&local->hw, skb); + } + } + } + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + ieee80211_txq_remove_vlan(local, sdata); + + sdata->bss = NULL; + + if (local->open_count == 0) + ieee80211_clear_tx_pending(local); + + sdata->vif.bss_conf.beacon_int = 0; + + /* + * If the interface goes down while suspended, presumably because + * the device was unplugged and that happens before our resume, + * then the driver is already unconfigured and the remainder of + * this function isn't needed. + * XXX: what about WoWLAN? If the device has software state, e.g. + * memory allocated, it might expect teardown commands from + * mac80211 here? + */ + if (local->suspended) { + WARN_ON(local->wowlan); + WARN_ON(rcu_access_pointer(local->monitor_sdata)); + return; + } + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + break; + case NL80211_IFTYPE_MONITOR: + if (local->monitors == 0) + ieee80211_del_virtual_monitor(local); + + mutex_lock(&local->mtx); + ieee80211_recalc_idle(local); + mutex_unlock(&local->mtx); + + if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE)) + break; + + fallthrough; + default: + if (going_down) + drv_remove_interface(local, sdata); + } + + ieee80211_recalc_ps(local); + + if (cancel_scan) + wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work); + + if (local->open_count == 0) { + ieee80211_stop_device(local); + + /* no reconfiguring after stop! */ + return; + } + + /* do after stop to avoid reconfiguring when we stop anyway */ + ieee80211_configure_filter(local); + ieee80211_hw_config(local, hw_reconf_flags); + + if (local->monitors == local->open_count) + ieee80211_add_virtual_monitor(local); +} + +static void ieee80211_stop_mbssid(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_sub_if_data *tx_sdata, *non_tx_sdata, *tmp_sdata; + struct ieee80211_vif *tx_vif = sdata->vif.mbssid_tx_vif; + + if (!tx_vif) + return; + + tx_sdata = vif_to_sdata(tx_vif); + sdata->vif.mbssid_tx_vif = NULL; + + list_for_each_entry_safe(non_tx_sdata, tmp_sdata, + &tx_sdata->local->interfaces, list) { + if (non_tx_sdata != sdata && non_tx_sdata != tx_sdata && + non_tx_sdata->vif.mbssid_tx_vif == tx_vif && + ieee80211_sdata_running(non_tx_sdata)) { + non_tx_sdata->vif.mbssid_tx_vif = NULL; + dev_close(non_tx_sdata->wdev.netdev); + } + } + + if (sdata != tx_sdata && ieee80211_sdata_running(tx_sdata)) { + tx_sdata->vif.mbssid_tx_vif = NULL; + dev_close(tx_sdata->wdev.netdev); + } +} + +static int ieee80211_stop(struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + /* close dependent VLAN and MBSSID interfaces before locking wiphy */ + if (sdata->vif.type == NL80211_IFTYPE_AP) { + struct ieee80211_sub_if_data *vlan, *tmpsdata; + + list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, + u.vlan.list) + dev_close(vlan->dev); + + ieee80211_stop_mbssid(sdata); + } + + cancel_work_sync(&sdata->activate_links_work); + + wiphy_lock(sdata->local->hw.wiphy); + ieee80211_do_stop(sdata, true); + wiphy_unlock(sdata->local->hw.wiphy); + + return 0; +} + +static void ieee80211_set_multicast_list(struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + int allmulti, sdata_allmulti; + + allmulti = !!(dev->flags & IFF_ALLMULTI); + sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI); + + if (allmulti != sdata_allmulti) { + if (dev->flags & IFF_ALLMULTI) + atomic_inc(&local->iff_allmultis); + else + atomic_dec(&local->iff_allmultis); + sdata->flags ^= IEEE80211_SDATA_ALLMULTI; + } + + spin_lock_bh(&local->filter_lock); + __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len); + spin_unlock_bh(&local->filter_lock); + ieee80211_queue_work(&local->hw, &local->reconfig_filter); +} + +/* + * Called when the netdev is removed or, by the code below, before + * the interface type changes. + */ +static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata) +{ + /* free extra data */ + ieee80211_free_keys(sdata, false); + + ieee80211_debugfs_remove_netdev(sdata); + + ieee80211_destroy_frag_cache(&sdata->frags); + + if (ieee80211_vif_is_mesh(&sdata->vif)) + ieee80211_mesh_teardown_sdata(sdata); + + ieee80211_vif_clear_links(sdata); + ieee80211_link_stop(&sdata->deflink); +} + +static void ieee80211_uninit(struct net_device *dev) +{ + ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev)); +} + +static void +ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + dev_fetch_sw_netstats(stats, dev->tstats); +} + +static int ieee80211_netdev_setup_tc(struct net_device *dev, + enum tc_setup_type type, void *type_data) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + + return drv_net_setup_tc(local, sdata, dev, type, type_data); +} + +static const struct net_device_ops ieee80211_dataif_ops = { + .ndo_open = ieee80211_open, + .ndo_stop = ieee80211_stop, + .ndo_uninit = ieee80211_uninit, + .ndo_start_xmit = ieee80211_subif_start_xmit, + .ndo_set_rx_mode = ieee80211_set_multicast_list, + .ndo_set_mac_address = ieee80211_change_mac, + .ndo_get_stats64 = ieee80211_get_stats64, + .ndo_setup_tc = ieee80211_netdev_setup_tc, +}; + +static u16 ieee80211_monitor_select_queue(struct net_device *dev, + struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr; + int len_rthdr; + + if (local->hw.queues < IEEE80211_NUM_ACS) + return 0; + + /* reset flags and info before parsing radiotap header */ + memset(info, 0, sizeof(*info)); + + if (!ieee80211_parse_tx_radiotap(skb, dev)) + return 0; /* doesn't matter, frame will be dropped */ + + len_rthdr = ieee80211_get_radiotap_len(skb->data); + hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); + if (skb->len < len_rthdr + 2 || + skb->len < len_rthdr + ieee80211_hdrlen(hdr->frame_control)) + return 0; /* doesn't matter, frame will be dropped */ + + return ieee80211_select_queue_80211(sdata, skb, hdr); +} + +static const struct net_device_ops ieee80211_monitorif_ops = { + .ndo_open = ieee80211_open, + .ndo_stop = ieee80211_stop, + .ndo_uninit = ieee80211_uninit, + .ndo_start_xmit = ieee80211_monitor_start_xmit, + .ndo_set_rx_mode = ieee80211_set_multicast_list, + .ndo_set_mac_address = ieee80211_change_mac, + .ndo_select_queue = ieee80211_monitor_select_queue, + .ndo_get_stats64 = ieee80211_get_stats64, +}; + +static int ieee80211_netdev_fill_forward_path(struct net_device_path_ctx *ctx, + struct net_device_path *path) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_local *local; + struct sta_info *sta; + int ret = -ENOENT; + + sdata = IEEE80211_DEV_TO_SUB_IF(ctx->dev); + local = sdata->local; + + if (!local->ops->net_fill_forward_path) + return -EOPNOTSUPP; + + rcu_read_lock(); + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + sta = rcu_dereference(sdata->u.vlan.sta); + if (sta) + break; + if (sdata->wdev.use_4addr) + goto out; + if (is_multicast_ether_addr(ctx->daddr)) + goto out; + sta = sta_info_get_bss(sdata, ctx->daddr); + break; + case NL80211_IFTYPE_AP: + if (is_multicast_ether_addr(ctx->daddr)) + goto out; + sta = sta_info_get(sdata, ctx->daddr); + break; + case NL80211_IFTYPE_STATION: + if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) { + sta = sta_info_get(sdata, ctx->daddr); + if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { + if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) + goto out; + + break; + } + } + + sta = sta_info_get(sdata, sdata->deflink.u.mgd.bssid); + break; + default: + goto out; + } + + if (!sta) + goto out; + + ret = drv_net_fill_forward_path(local, sdata, &sta->sta, ctx, path); +out: + rcu_read_unlock(); + + return ret; +} + +static const struct net_device_ops ieee80211_dataif_8023_ops = { + .ndo_open = ieee80211_open, + .ndo_stop = ieee80211_stop, + .ndo_uninit = ieee80211_uninit, + .ndo_start_xmit = ieee80211_subif_start_xmit_8023, + .ndo_set_rx_mode = ieee80211_set_multicast_list, + .ndo_set_mac_address = ieee80211_change_mac, + .ndo_get_stats64 = ieee80211_get_stats64, + .ndo_fill_forward_path = ieee80211_netdev_fill_forward_path, + .ndo_setup_tc = ieee80211_netdev_setup_tc, +}; + +static bool ieee80211_iftype_supports_hdr_offload(enum nl80211_iftype iftype) +{ + switch (iftype) { + /* P2P GO and client are mapped to AP/STATION types */ + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_STATION: + return true; + default: + return false; + } +} + +static bool ieee80211_set_sdata_offload_flags(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + u32 flags; + + flags = sdata->vif.offload_flags; + + if (ieee80211_hw_check(&local->hw, SUPPORTS_TX_ENCAP_OFFLOAD) && + ieee80211_iftype_supports_hdr_offload(sdata->vif.type)) { + flags |= IEEE80211_OFFLOAD_ENCAP_ENABLED; + + if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG) && + local->hw.wiphy->frag_threshold != (u32)-1) + flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; + + if (local->monitors) + flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; + } else { + flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; + } + + if (ieee80211_hw_check(&local->hw, SUPPORTS_RX_DECAP_OFFLOAD) && + ieee80211_iftype_supports_hdr_offload(sdata->vif.type)) { + flags |= IEEE80211_OFFLOAD_DECAP_ENABLED; + + if (local->monitors && + !ieee80211_hw_check(&local->hw, SUPPORTS_CONC_MON_RX_DECAP)) + flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED; + } else { + flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED; + } + + if (sdata->vif.offload_flags == flags) + return false; + + sdata->vif.offload_flags = flags; + ieee80211_check_fast_rx_iface(sdata); + return true; +} + +static void ieee80211_set_vif_encap_ops(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_sub_if_data *bss = sdata; + bool enabled; + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + if (!sdata->bss) + return; + + bss = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); + } + + if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_ENCAP_OFFLOAD) || + !ieee80211_iftype_supports_hdr_offload(bss->vif.type)) + return; + + enabled = bss->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED; + if (sdata->wdev.use_4addr && + !(bss->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_4ADDR)) + enabled = false; + + sdata->dev->netdev_ops = enabled ? &ieee80211_dataif_8023_ops : + &ieee80211_dataif_ops; +} + +static void ieee80211_recalc_sdata_offload(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_sub_if_data *vsdata; + + if (ieee80211_set_sdata_offload_flags(sdata)) { + drv_update_vif_offload(local, sdata); + ieee80211_set_vif_encap_ops(sdata); + } + + list_for_each_entry(vsdata, &local->interfaces, list) { + if (vsdata->vif.type != NL80211_IFTYPE_AP_VLAN || + vsdata->bss != &sdata->u.ap) + continue; + + ieee80211_set_vif_encap_ops(vsdata); + } +} + +void ieee80211_recalc_offload(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + + if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_ENCAP_OFFLOAD)) + return; + + mutex_lock(&local->iflist_mtx); + + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + + ieee80211_recalc_sdata_offload(sdata); + } + + mutex_unlock(&local->iflist_mtx); +} + +void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, + const int offset) +{ + struct ieee80211_local *local = sdata->local; + u32 flags = sdata->u.mntr.flags; + +#define ADJUST(_f, _s) do { \ + if (flags & MONITOR_FLAG_##_f) \ + local->fif_##_s += offset; \ + } while (0) + + ADJUST(FCSFAIL, fcsfail); + ADJUST(PLCPFAIL, plcpfail); + ADJUST(CONTROL, control); + ADJUST(CONTROL, pspoll); + ADJUST(OTHER_BSS, other_bss); + +#undef ADJUST +} + +static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + int i; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) + sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE; + else if (local->hw.queues >= IEEE80211_NUM_ACS) + sdata->vif.hw_queue[i] = i; + else + sdata->vif.hw_queue[i] = 0; + } + sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE; +} + +static void ieee80211_sdata_init(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + sdata->local = local; + + /* + * Initialize the default link, so we can use link_id 0 for non-MLD, + * and that continues to work for non-MLD-aware drivers that use just + * vif.bss_conf instead of vif.link_conf. + * + * Note that we never change this, so if link ID 0 isn't used in an + * MLD connection, we get a separate allocation for it. + */ + ieee80211_link_init(sdata, -1, &sdata->deflink, &sdata->vif.bss_conf); +} + +int ieee80211_add_virtual_monitor(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + int ret; + + if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) + return 0; + + ASSERT_RTNL(); + lockdep_assert_wiphy(local->hw.wiphy); + + if (local->monitor_sdata) + return 0; + + sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); + if (!sdata) + return -ENOMEM; + + /* set up data */ + sdata->vif.type = NL80211_IFTYPE_MONITOR; + snprintf(sdata->name, IFNAMSIZ, "%s-monitor", + wiphy_name(local->hw.wiphy)); + sdata->wdev.iftype = NL80211_IFTYPE_MONITOR; + mutex_init(&sdata->wdev.mtx); + + ieee80211_sdata_init(local, sdata); + + ieee80211_set_default_queues(sdata); + + ret = drv_add_interface(local, sdata); + if (WARN_ON(ret)) { + /* ok .. stupid driver, it asked for this! */ + kfree(sdata); + return ret; + } + + set_bit(SDATA_STATE_RUNNING, &sdata->state); + + ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR); + if (ret) { + kfree(sdata); + return ret; + } + + mutex_lock(&local->iflist_mtx); + rcu_assign_pointer(local->monitor_sdata, sdata); + mutex_unlock(&local->iflist_mtx); + + sdata_lock(sdata); + mutex_lock(&local->mtx); + ret = ieee80211_link_use_channel(&sdata->deflink, &local->monitor_chandef, + IEEE80211_CHANCTX_EXCLUSIVE); + mutex_unlock(&local->mtx); + sdata_unlock(sdata); + if (ret) { + mutex_lock(&local->iflist_mtx); + RCU_INIT_POINTER(local->monitor_sdata, NULL); + mutex_unlock(&local->iflist_mtx); + synchronize_net(); + drv_remove_interface(local, sdata); + mutex_destroy(&sdata->wdev.mtx); + kfree(sdata); + return ret; + } + + skb_queue_head_init(&sdata->skb_queue); + skb_queue_head_init(&sdata->status_queue); + wiphy_work_init(&sdata->work, ieee80211_iface_work); + + return 0; +} + +void ieee80211_del_virtual_monitor(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + + if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) + return; + + ASSERT_RTNL(); + lockdep_assert_wiphy(local->hw.wiphy); + + mutex_lock(&local->iflist_mtx); + + sdata = rcu_dereference_protected(local->monitor_sdata, + lockdep_is_held(&local->iflist_mtx)); + if (!sdata) { + mutex_unlock(&local->iflist_mtx); + return; + } + + RCU_INIT_POINTER(local->monitor_sdata, NULL); + mutex_unlock(&local->iflist_mtx); + + synchronize_net(); + + sdata_lock(sdata); + mutex_lock(&local->mtx); + ieee80211_link_release_channel(&sdata->deflink); + mutex_unlock(&local->mtx); + sdata_unlock(sdata); + + drv_remove_interface(local, sdata); + + mutex_destroy(&sdata->wdev.mtx); + kfree(sdata); +} + +/* + * NOTE: Be very careful when changing this function, it must NOT return + * an error on interface type changes that have been pre-checked, so most + * checks should be in ieee80211_check_concurrent_iface. + */ +int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + struct net_device *dev = wdev->netdev; + struct ieee80211_local *local = sdata->local; + u64 changed = 0; + int res; + u32 hw_reconf_flags = 0; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: { + struct ieee80211_sub_if_data *master; + + if (!sdata->bss) + return -ENOLINK; + + mutex_lock(&local->mtx); + list_add(&sdata->u.vlan.list, &sdata->bss->vlans); + mutex_unlock(&local->mtx); + + master = container_of(sdata->bss, + struct ieee80211_sub_if_data, u.ap); + sdata->control_port_protocol = + master->control_port_protocol; + sdata->control_port_no_encrypt = + master->control_port_no_encrypt; + sdata->control_port_over_nl80211 = + master->control_port_over_nl80211; + sdata->control_port_no_preauth = + master->control_port_no_preauth; + sdata->vif.cab_queue = master->vif.cab_queue; + memcpy(sdata->vif.hw_queue, master->vif.hw_queue, + sizeof(sdata->vif.hw_queue)); + sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef; + + mutex_lock(&local->key_mtx); + sdata->crypto_tx_tailroom_needed_cnt += + master->crypto_tx_tailroom_needed_cnt; + mutex_unlock(&local->key_mtx); + + break; + } + case NL80211_IFTYPE_AP: + sdata->bss = &sdata->u.ap; + break; + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_P2P_DEVICE: + case NL80211_IFTYPE_OCB: + case NL80211_IFTYPE_NAN: + /* no special treatment */ + break; + case NL80211_IFTYPE_UNSPECIFIED: + case NUM_NL80211_IFTYPES: + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_WDS: + /* cannot happen */ + WARN_ON(1); + break; + } + + if (local->open_count == 0) { + /* here we can consider everything in good order (again) */ + local->reconfig_failure = false; + + res = drv_start(local); + if (res) + goto err_del_bss; + /* we're brought up, everything changes */ + hw_reconf_flags = ~0; + ieee80211_led_radio(local, true); + ieee80211_mod_tpt_led_trig(local, + IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); + } + + /* + * Copy the hopefully now-present MAC address to + * this interface, if it has the special null one. + */ + if (dev && is_zero_ether_addr(dev->dev_addr)) { + eth_hw_addr_set(dev, local->hw.wiphy->perm_addr); + memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); + + if (!is_valid_ether_addr(dev->dev_addr)) { + res = -EADDRNOTAVAIL; + goto err_stop; + } + } + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + /* no need to tell driver, but set carrier and chanctx */ + if (sdata->bss->active) { + ieee80211_link_vlan_copy_chanctx(&sdata->deflink); + netif_carrier_on(dev); + ieee80211_set_vif_encap_ops(sdata); + } else { + netif_carrier_off(dev); + } + break; + case NL80211_IFTYPE_MONITOR: + if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) { + local->cooked_mntrs++; + break; + } + + if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) { + res = drv_add_interface(local, sdata); + if (res) + goto err_stop; + } else if (local->monitors == 0 && local->open_count == 0) { + res = ieee80211_add_virtual_monitor(local); + if (res) + goto err_stop; + } + + /* must be before the call to ieee80211_configure_filter */ + local->monitors++; + if (local->monitors == 1) { + local->hw.conf.flags |= IEEE80211_CONF_MONITOR; + hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; + } + + ieee80211_adjust_monitor_flags(sdata, 1); + ieee80211_configure_filter(local); + ieee80211_recalc_offload(local); + mutex_lock(&local->mtx); + ieee80211_recalc_idle(local); + mutex_unlock(&local->mtx); + + netif_carrier_on(dev); + break; + default: + if (coming_up) { + ieee80211_del_virtual_monitor(local); + ieee80211_set_sdata_offload_flags(sdata); + + res = drv_add_interface(local, sdata); + if (res) + goto err_stop; + + ieee80211_set_vif_encap_ops(sdata); + res = ieee80211_check_queues(sdata, + ieee80211_vif_type_p2p(&sdata->vif)); + if (res) + goto err_del_interface; + } + + if (sdata->vif.type == NL80211_IFTYPE_AP) { + local->fif_pspoll++; + local->fif_probe_req++; + + ieee80211_configure_filter(local); + } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { + local->fif_probe_req++; + } + + if (sdata->vif.probe_req_reg) + drv_config_iface_filter(local, sdata, + FIF_PROBE_REQ, + FIF_PROBE_REQ); + + if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && + sdata->vif.type != NL80211_IFTYPE_NAN) + changed |= ieee80211_reset_erp_info(sdata); + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + changed); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_OCB: + netif_carrier_off(dev); + break; + case NL80211_IFTYPE_P2P_DEVICE: + case NL80211_IFTYPE_NAN: + break; + default: + /* not reached */ + WARN_ON(1); + } + + /* + * Set default queue parameters so drivers don't + * need to initialise the hardware if the hardware + * doesn't start up with sane defaults. + * Enable QoS for anything but station interfaces. + */ + ieee80211_set_wmm_default(&sdata->deflink, true, + sdata->vif.type != NL80211_IFTYPE_STATION); + } + + switch (sdata->vif.type) { + case NL80211_IFTYPE_P2P_DEVICE: + rcu_assign_pointer(local->p2p_sdata, sdata); + break; + case NL80211_IFTYPE_MONITOR: + if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) + break; + list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list); + break; + default: + break; + } + + /* + * set_multicast_list will be invoked by the networking core + * which will check whether any increments here were done in + * error and sync them down to the hardware as filter flags. + */ + if (sdata->flags & IEEE80211_SDATA_ALLMULTI) + atomic_inc(&local->iff_allmultis); + + if (coming_up) + local->open_count++; + + if (hw_reconf_flags) + ieee80211_hw_config(local, hw_reconf_flags); + + ieee80211_recalc_ps(local); + + set_bit(SDATA_STATE_RUNNING, &sdata->state); + + return 0; + err_del_interface: + drv_remove_interface(local, sdata); + err_stop: + if (!local->open_count) + drv_stop(local); + err_del_bss: + sdata->bss = NULL; + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + mutex_lock(&local->mtx); + list_del(&sdata->u.vlan.list); + mutex_unlock(&local->mtx); + } + /* might already be clear but that doesn't matter */ + clear_bit(SDATA_STATE_RUNNING, &sdata->state); + return res; +} + +static void ieee80211_if_free(struct net_device *dev) +{ + free_percpu(dev->tstats); +} + +static void ieee80211_if_setup(struct net_device *dev) +{ + ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + dev->priv_flags |= IFF_NO_QUEUE; + dev->netdev_ops = &ieee80211_dataif_ops; + dev->needs_free_netdev = true; + dev->priv_destructor = ieee80211_if_free; +} + +static void ieee80211_iface_process_skb(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + if (ieee80211_is_action(mgmt->frame_control) && + mgmt->u.action.category == WLAN_CATEGORY_BACK) { + struct sta_info *sta; + int len = skb->len; + + mutex_lock(&local->sta_mtx); + sta = sta_info_get_bss(sdata, mgmt->sa); + if (sta) { + switch (mgmt->u.action.u.addba_req.action_code) { + case WLAN_ACTION_ADDBA_REQ: + ieee80211_process_addba_request(local, sta, + mgmt, len); + break; + case WLAN_ACTION_ADDBA_RESP: + ieee80211_process_addba_resp(local, sta, + mgmt, len); + break; + case WLAN_ACTION_DELBA: + ieee80211_process_delba(sdata, sta, + mgmt, len); + break; + default: + WARN_ON(1); + break; + } + } + mutex_unlock(&local->sta_mtx); + } else if (ieee80211_is_action(mgmt->frame_control) && + mgmt->u.action.category == WLAN_CATEGORY_VHT) { + switch (mgmt->u.action.u.vht_group_notif.action_code) { + case WLAN_VHT_ACTION_OPMODE_NOTIF: { + struct ieee80211_rx_status *status; + enum nl80211_band band; + struct sta_info *sta; + u8 opmode; + + status = IEEE80211_SKB_RXCB(skb); + band = status->band; + opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; + + mutex_lock(&local->sta_mtx); + sta = sta_info_get_bss(sdata, mgmt->sa); + + if (sta) + ieee80211_vht_handle_opmode(sdata, + &sta->deflink, + opmode, band); + + mutex_unlock(&local->sta_mtx); + break; + } + case WLAN_VHT_ACTION_GROUPID_MGMT: + ieee80211_process_mu_groups(sdata, &sdata->deflink, + mgmt); + break; + default: + WARN_ON(1); + break; + } + } else if (ieee80211_is_action(mgmt->frame_control) && + mgmt->u.action.category == WLAN_CATEGORY_S1G) { + switch (mgmt->u.action.u.s1g.action_code) { + case WLAN_S1G_TWT_TEARDOWN: + case WLAN_S1G_TWT_SETUP: + ieee80211_s1g_rx_twt_action(sdata, skb); + break; + default: + break; + } + } else if (ieee80211_is_ext(mgmt->frame_control)) { + if (sdata->vif.type == NL80211_IFTYPE_STATION) + ieee80211_sta_rx_queued_ext(sdata, skb); + else + WARN_ON(1); + } else if (ieee80211_is_data_qos(mgmt->frame_control)) { + struct ieee80211_hdr *hdr = (void *)mgmt; + struct sta_info *sta; + + /* + * So the frame isn't mgmt, but frame_control + * is at the right place anyway, of course, so + * the if statement is correct. + * + * Warn if we have other data frame types here, + * they must not get here. + */ + WARN_ON(hdr->frame_control & + cpu_to_le16(IEEE80211_STYPE_NULLFUNC)); + WARN_ON(!(hdr->seq_ctrl & + cpu_to_le16(IEEE80211_SCTL_FRAG))); + /* + * This was a fragment of a frame, received while + * a block-ack session was active. That cannot be + * right, so terminate the session. + */ + mutex_lock(&local->sta_mtx); + sta = sta_info_get_bss(sdata, mgmt->sa); + if (sta) { + u16 tid = ieee80211_get_tid(hdr); + + __ieee80211_stop_rx_ba_session( + sta, tid, WLAN_BACK_RECIPIENT, + WLAN_REASON_QSTA_REQUIRE_SETUP, + true); + } + mutex_unlock(&local->sta_mtx); + } else switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + ieee80211_sta_rx_queued_mgmt(sdata, skb); + break; + case NL80211_IFTYPE_ADHOC: + ieee80211_ibss_rx_queued_mgmt(sdata, skb); + break; + case NL80211_IFTYPE_MESH_POINT: + if (!ieee80211_vif_is_mesh(&sdata->vif)) + break; + ieee80211_mesh_rx_queued_mgmt(sdata, skb); + break; + default: + WARN(1, "frame for unexpected interface type"); + break; + } +} + +static void ieee80211_iface_process_status(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + if (ieee80211_is_action(mgmt->frame_control) && + mgmt->u.action.category == WLAN_CATEGORY_S1G) { + switch (mgmt->u.action.u.s1g.action_code) { + case WLAN_S1G_TWT_TEARDOWN: + case WLAN_S1G_TWT_SETUP: + ieee80211_s1g_status_twt_action(sdata, skb); + break; + default: + break; + } + } +} + +static void ieee80211_iface_work(struct wiphy *wiphy, struct wiphy_work *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, work); + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + + if (!ieee80211_sdata_running(sdata)) + return; + + if (test_bit(SCAN_SW_SCANNING, &local->scanning)) + return; + + if (!ieee80211_can_run_worker(local)) + return; + + /* first process frames */ + while ((skb = skb_dequeue(&sdata->skb_queue))) { + kcov_remote_start_common(skb_get_kcov_handle(skb)); + + if (skb->protocol == cpu_to_be16(ETH_P_TDLS)) + ieee80211_process_tdls_channel_switch(sdata, skb); + else + ieee80211_iface_process_skb(local, sdata, skb); + + kfree_skb(skb); + kcov_remote_stop(); + } + + /* process status queue */ + while ((skb = skb_dequeue(&sdata->status_queue))) { + kcov_remote_start_common(skb_get_kcov_handle(skb)); + + ieee80211_iface_process_status(sdata, skb); + kfree_skb(skb); + + kcov_remote_stop(); + } + + /* then other type-dependent work */ + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + ieee80211_sta_work(sdata); + break; + case NL80211_IFTYPE_ADHOC: + ieee80211_ibss_work(sdata); + break; + case NL80211_IFTYPE_MESH_POINT: + if (!ieee80211_vif_is_mesh(&sdata->vif)) + break; + ieee80211_mesh_work(sdata); + break; + case NL80211_IFTYPE_OCB: + ieee80211_ocb_work(sdata); + break; + default: + break; + } +} + +static void ieee80211_recalc_smps_work(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, recalc_smps); + + ieee80211_recalc_smps(sdata, &sdata->deflink); +} + +static void ieee80211_activate_links_work(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, + activate_links_work); + + ieee80211_set_active_links(&sdata->vif, sdata->desired_active_links); +} + +/* + * Helper function to initialise an interface to a specific type. + */ +static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, + enum nl80211_iftype type) +{ + static const u8 bssid_wildcard[ETH_ALEN] = {0xff, 0xff, 0xff, + 0xff, 0xff, 0xff}; + + /* clear type-dependent unions */ + memset(&sdata->u, 0, sizeof(sdata->u)); + memset(&sdata->deflink.u, 0, sizeof(sdata->deflink.u)); + + /* and set some type-dependent values */ + sdata->vif.type = type; + sdata->vif.p2p = false; + sdata->wdev.iftype = type; + + sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE); + sdata->control_port_no_encrypt = false; + sdata->control_port_over_nl80211 = false; + sdata->control_port_no_preauth = false; + sdata->vif.cfg.idle = true; + sdata->vif.bss_conf.txpower = INT_MIN; /* unset */ + + sdata->noack_map = 0; + + /* only monitor/p2p-device differ */ + if (sdata->dev) { + sdata->dev->netdev_ops = &ieee80211_dataif_ops; + sdata->dev->type = ARPHRD_ETHER; + } + + skb_queue_head_init(&sdata->skb_queue); + skb_queue_head_init(&sdata->status_queue); + wiphy_work_init(&sdata->work, ieee80211_iface_work); + INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work); + INIT_WORK(&sdata->activate_links_work, ieee80211_activate_links_work); + + switch (type) { + case NL80211_IFTYPE_P2P_GO: + type = NL80211_IFTYPE_AP; + sdata->vif.type = type; + sdata->vif.p2p = true; + fallthrough; + case NL80211_IFTYPE_AP: + skb_queue_head_init(&sdata->u.ap.ps.bc_buf); + INIT_LIST_HEAD(&sdata->u.ap.vlans); + sdata->vif.bss_conf.bssid = sdata->vif.addr; + break; + case NL80211_IFTYPE_P2P_CLIENT: + type = NL80211_IFTYPE_STATION; + sdata->vif.type = type; + sdata->vif.p2p = true; + fallthrough; + case NL80211_IFTYPE_STATION: + sdata->vif.bss_conf.bssid = sdata->deflink.u.mgd.bssid; + ieee80211_sta_setup_sdata(sdata); + break; + case NL80211_IFTYPE_OCB: + sdata->vif.bss_conf.bssid = bssid_wildcard; + ieee80211_ocb_setup_sdata(sdata); + break; + case NL80211_IFTYPE_ADHOC: + sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid; + ieee80211_ibss_setup_sdata(sdata); + break; + case NL80211_IFTYPE_MESH_POINT: + if (ieee80211_vif_is_mesh(&sdata->vif)) + ieee80211_mesh_init_sdata(sdata); + break; + case NL80211_IFTYPE_MONITOR: + sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP; + sdata->dev->netdev_ops = &ieee80211_monitorif_ops; + sdata->u.mntr.flags = MONITOR_FLAG_CONTROL | + MONITOR_FLAG_OTHER_BSS; + break; + case NL80211_IFTYPE_NAN: + idr_init(&sdata->u.nan.function_inst_ids); + spin_lock_init(&sdata->u.nan.func_lock); + sdata->vif.bss_conf.bssid = sdata->vif.addr; + break; + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_P2P_DEVICE: + sdata->vif.bss_conf.bssid = sdata->vif.addr; + break; + case NL80211_IFTYPE_UNSPECIFIED: + case NL80211_IFTYPE_WDS: + case NUM_NL80211_IFTYPES: + WARN_ON(1); + break; + } + + /* need to do this after the switch so vif.type is correct */ + ieee80211_link_setup(&sdata->deflink); + + ieee80211_debugfs_add_netdev(sdata); +} + +static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, + enum nl80211_iftype type) +{ + struct ieee80211_local *local = sdata->local; + int ret, err; + enum nl80211_iftype internal_type = type; + bool p2p = false; + + ASSERT_RTNL(); + + if (!local->ops->change_interface) + return -EBUSY; + + /* for now, don't support changing while links exist */ + if (ieee80211_vif_is_mld(&sdata->vif)) + return -EBUSY; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP: + if (!list_empty(&sdata->u.ap.vlans)) + return -EBUSY; + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_OCB: + /* + * Could maybe also all others here? + * Just not sure how that interacts + * with the RX/config path e.g. for + * mesh. + */ + break; + default: + return -EBUSY; + } + + switch (type) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_OCB: + /* + * Could probably support everything + * but here. + */ + break; + case NL80211_IFTYPE_P2P_CLIENT: + p2p = true; + internal_type = NL80211_IFTYPE_STATION; + break; + case NL80211_IFTYPE_P2P_GO: + p2p = true; + internal_type = NL80211_IFTYPE_AP; + break; + default: + return -EBUSY; + } + + ret = ieee80211_check_concurrent_iface(sdata, internal_type); + if (ret) + return ret; + + ieee80211_stop_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); + /* do_stop will synchronize_rcu() first thing */ + ieee80211_do_stop(sdata, false); + + ieee80211_teardown_sdata(sdata); + + ieee80211_set_sdata_offload_flags(sdata); + ret = drv_change_interface(local, sdata, internal_type, p2p); + if (ret) + type = ieee80211_vif_type_p2p(&sdata->vif); + + /* + * Ignore return value here, there's not much we can do since + * the driver changed the interface type internally already. + * The warnings will hopefully make driver authors fix it :-) + */ + ieee80211_check_queues(sdata, type); + + ieee80211_setup_sdata(sdata, type); + ieee80211_set_vif_encap_ops(sdata); + + err = ieee80211_do_open(&sdata->wdev, false); + WARN(err, "type change: do_open returned %d", err); + + ieee80211_wake_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); + return ret; +} + +int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, + enum nl80211_iftype type) +{ + int ret; + + ASSERT_RTNL(); + + if (type == ieee80211_vif_type_p2p(&sdata->vif)) + return 0; + + if (ieee80211_sdata_running(sdata)) { + ret = ieee80211_runtime_change_iftype(sdata, type); + if (ret) + return ret; + } else { + /* Purge and reset type-dependent state. */ + ieee80211_teardown_sdata(sdata); + ieee80211_setup_sdata(sdata, type); + } + + /* reset some values that shouldn't be kept across type changes */ + if (type == NL80211_IFTYPE_STATION) + sdata->u.mgd.use_4addr = false; + + return 0; +} + +static void ieee80211_assign_perm_addr(struct ieee80211_local *local, + u8 *perm_addr, enum nl80211_iftype type) +{ + struct ieee80211_sub_if_data *sdata; + u64 mask, start, addr, val, inc; + u8 *m; + u8 tmp_addr[ETH_ALEN]; + int i; + + /* default ... something at least */ + memcpy(perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN); + + if (is_zero_ether_addr(local->hw.wiphy->addr_mask) && + local->hw.wiphy->n_addresses <= 1) + return; + + mutex_lock(&local->iflist_mtx); + + switch (type) { + case NL80211_IFTYPE_MONITOR: + /* doesn't matter */ + break; + case NL80211_IFTYPE_AP_VLAN: + /* match up with an AP interface */ + list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata->vif.type != NL80211_IFTYPE_AP) + continue; + memcpy(perm_addr, sdata->vif.addr, ETH_ALEN); + break; + } + /* keep default if no AP interface present */ + break; + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_GO: + if (ieee80211_hw_check(&local->hw, P2P_DEV_ADDR_FOR_INTF)) { + list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) + continue; + if (!ieee80211_sdata_running(sdata)) + continue; + memcpy(perm_addr, sdata->vif.addr, ETH_ALEN); + goto out_unlock; + } + } + fallthrough; + default: + /* assign a new address if possible -- try n_addresses first */ + for (i = 0; i < local->hw.wiphy->n_addresses; i++) { + bool used = false; + + list_for_each_entry(sdata, &local->interfaces, list) { + if (ether_addr_equal(local->hw.wiphy->addresses[i].addr, + sdata->vif.addr)) { + used = true; + break; + } + } + + if (!used) { + memcpy(perm_addr, + local->hw.wiphy->addresses[i].addr, + ETH_ALEN); + break; + } + } + + /* try mask if available */ + if (is_zero_ether_addr(local->hw.wiphy->addr_mask)) + break; + + m = local->hw.wiphy->addr_mask; + mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | + ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | + ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); + + if (__ffs64(mask) + hweight64(mask) != fls64(mask)) { + /* not a contiguous mask ... not handled now! */ + pr_info("not contiguous\n"); + break; + } + + /* + * Pick address of existing interface in case user changed + * MAC address manually, default to perm_addr. + */ + m = local->hw.wiphy->perm_addr; + list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) + continue; + m = sdata->vif.addr; + break; + } + start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | + ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | + ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); + + inc = 1ULL<<__ffs64(mask); + val = (start & mask); + addr = (start & ~mask) | (val & mask); + do { + bool used = false; + + tmp_addr[5] = addr >> 0*8; + tmp_addr[4] = addr >> 1*8; + tmp_addr[3] = addr >> 2*8; + tmp_addr[2] = addr >> 3*8; + tmp_addr[1] = addr >> 4*8; + tmp_addr[0] = addr >> 5*8; + + val += inc; + + list_for_each_entry(sdata, &local->interfaces, list) { + if (ether_addr_equal(tmp_addr, sdata->vif.addr)) { + used = true; + break; + } + } + + if (!used) { + memcpy(perm_addr, tmp_addr, ETH_ALEN); + break; + } + addr = (start & ~mask) | (val & mask); + } while (addr != start); + + break; + } + + out_unlock: + mutex_unlock(&local->iflist_mtx); +} + +int ieee80211_if_add(struct ieee80211_local *local, const char *name, + unsigned char name_assign_type, + struct wireless_dev **new_wdev, enum nl80211_iftype type, + struct vif_params *params) +{ + struct net_device *ndev = NULL; + struct ieee80211_sub_if_data *sdata = NULL; + struct txq_info *txqi; + int ret, i; + + ASSERT_RTNL(); + + if (type == NL80211_IFTYPE_P2P_DEVICE || type == NL80211_IFTYPE_NAN) { + struct wireless_dev *wdev; + + sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, + GFP_KERNEL); + if (!sdata) + return -ENOMEM; + wdev = &sdata->wdev; + + sdata->dev = NULL; + strscpy(sdata->name, name, IFNAMSIZ); + ieee80211_assign_perm_addr(local, wdev->address, type); + memcpy(sdata->vif.addr, wdev->address, ETH_ALEN); + ether_addr_copy(sdata->vif.bss_conf.addr, sdata->vif.addr); + } else { + int size = ALIGN(sizeof(*sdata) + local->hw.vif_data_size, + sizeof(void *)); + int txq_size = 0; + + if (type != NL80211_IFTYPE_AP_VLAN && + (type != NL80211_IFTYPE_MONITOR || + (params->flags & MONITOR_FLAG_ACTIVE))) + txq_size += sizeof(struct txq_info) + + local->hw.txq_data_size; + + ndev = alloc_netdev_mqs(size + txq_size, + name, name_assign_type, + ieee80211_if_setup, 1, 1); + if (!ndev) + return -ENOMEM; + + dev_net_set(ndev, wiphy_net(local->hw.wiphy)); + + ndev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!ndev->tstats) { + free_netdev(ndev); + return -ENOMEM; + } + + ndev->needed_headroom = local->tx_headroom + + 4*6 /* four MAC addresses */ + + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */ + + 6 /* mesh */ + + 8 /* rfc1042/bridge tunnel */ + - ETH_HLEN /* ethernet hard_header_len */ + + IEEE80211_ENCRYPT_HEADROOM; + ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; + + ret = dev_alloc_name(ndev, ndev->name); + if (ret < 0) { + ieee80211_if_free(ndev); + free_netdev(ndev); + return ret; + } + + ieee80211_assign_perm_addr(local, ndev->perm_addr, type); + if (is_valid_ether_addr(params->macaddr)) + eth_hw_addr_set(ndev, params->macaddr); + else + eth_hw_addr_set(ndev, ndev->perm_addr); + SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); + + /* don't use IEEE80211_DEV_TO_SUB_IF -- it checks too much */ + sdata = netdev_priv(ndev); + ndev->ieee80211_ptr = &sdata->wdev; + memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN); + ether_addr_copy(sdata->vif.bss_conf.addr, sdata->vif.addr); + memcpy(sdata->name, ndev->name, IFNAMSIZ); + + if (txq_size) { + txqi = netdev_priv(ndev) + size; + ieee80211_txq_init(sdata, NULL, txqi, 0); + } + + sdata->dev = ndev; + } + + /* initialise type-independent data */ + sdata->wdev.wiphy = local->hw.wiphy; + + ieee80211_sdata_init(local, sdata); + + ieee80211_init_frag_cache(&sdata->frags); + + INIT_LIST_HEAD(&sdata->key_list); + + INIT_DELAYED_WORK(&sdata->dec_tailroom_needed_wk, + ieee80211_delayed_tailroom_dec); + + for (i = 0; i < NUM_NL80211_BANDS; i++) { + struct ieee80211_supported_band *sband; + sband = local->hw.wiphy->bands[i]; + sdata->rc_rateidx_mask[i] = + sband ? (1 << sband->n_bitrates) - 1 : 0; + if (sband) { + __le16 cap; + u16 *vht_rate_mask; + + memcpy(sdata->rc_rateidx_mcs_mask[i], + sband->ht_cap.mcs.rx_mask, + sizeof(sdata->rc_rateidx_mcs_mask[i])); + + cap = sband->vht_cap.vht_mcs.rx_mcs_map; + vht_rate_mask = sdata->rc_rateidx_vht_mcs_mask[i]; + ieee80211_get_vht_mask_from_cap(cap, vht_rate_mask); + } else { + memset(sdata->rc_rateidx_mcs_mask[i], 0, + sizeof(sdata->rc_rateidx_mcs_mask[i])); + memset(sdata->rc_rateidx_vht_mcs_mask[i], 0, + sizeof(sdata->rc_rateidx_vht_mcs_mask[i])); + } + } + + ieee80211_set_default_queues(sdata); + + sdata->deflink.ap_power_level = IEEE80211_UNSET_POWER_LEVEL; + sdata->deflink.user_power_level = local->user_power_level; + + /* setup type-dependent data */ + ieee80211_setup_sdata(sdata, type); + + if (ndev) { + ndev->ieee80211_ptr->use_4addr = params->use_4addr; + if (type == NL80211_IFTYPE_STATION) + sdata->u.mgd.use_4addr = params->use_4addr; + + ndev->features |= local->hw.netdev_features; + ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + ndev->hw_features |= ndev->features & + MAC80211_SUPPORTED_FEATURES_TX; + sdata->vif.netdev_features = local->hw.netdev_features; + + netdev_set_default_ethtool_ops(ndev, &ieee80211_ethtool_ops); + + /* MTU range is normally 256 - 2304, where the upper limit is + * the maximum MSDU size. Monitor interfaces send and receive + * MPDU and A-MSDU frames which may be much larger so we do + * not impose an upper limit in that case. + */ + ndev->min_mtu = 256; + if (type == NL80211_IFTYPE_MONITOR) + ndev->max_mtu = 0; + else + ndev->max_mtu = local->hw.max_mtu; + + ret = cfg80211_register_netdevice(ndev); + if (ret) { + free_netdev(ndev); + return ret; + } + } + + mutex_lock(&local->iflist_mtx); + list_add_tail_rcu(&sdata->list, &local->interfaces); + mutex_unlock(&local->iflist_mtx); + + if (new_wdev) + *new_wdev = &sdata->wdev; + + return 0; +} + +void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) +{ + ASSERT_RTNL(); + + mutex_lock(&sdata->local->iflist_mtx); + list_del_rcu(&sdata->list); + mutex_unlock(&sdata->local->iflist_mtx); + + if (sdata->vif.txq) + ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq)); + + synchronize_rcu(); + + cfg80211_unregister_wdev(&sdata->wdev); + + if (!sdata->dev) { + ieee80211_teardown_sdata(sdata); + kfree(sdata); + } +} + +void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata) +{ + if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state))) + return; + ieee80211_do_stop(sdata, true); +} + +void ieee80211_remove_interfaces(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata, *tmp; + LIST_HEAD(unreg_list); + + ASSERT_RTNL(); + + /* Before destroying the interfaces, make sure they're all stopped so + * that the hardware is stopped. Otherwise, the driver might still be + * iterating the interfaces during the shutdown, e.g. from a worker + * or from RX processing or similar, and if it does so (using atomic + * iteration) while we're manipulating the list, the iteration will + * crash. + * + * After this, the hardware should be stopped and the driver should + * have stopped all of its activities, so that we can do RCU-unaware + * manipulations of the interface list below. + */ + cfg80211_shutdown_all_interfaces(local->hw.wiphy); + + WARN(local->open_count, "%s: open count remains %d\n", + wiphy_name(local->hw.wiphy), local->open_count); + + ieee80211_txq_teardown_flows(local); + + mutex_lock(&local->iflist_mtx); + list_splice_init(&local->interfaces, &unreg_list); + mutex_unlock(&local->iflist_mtx); + + wiphy_lock(local->hw.wiphy); + list_for_each_entry_safe(sdata, tmp, &unreg_list, list) { + bool netdev = sdata->dev; + + list_del(&sdata->list); + cfg80211_unregister_wdev(&sdata->wdev); + + if (!netdev) + kfree(sdata); + } + wiphy_unlock(local->hw.wiphy); +} + +static int netdev_notify(struct notifier_block *nb, + unsigned long state, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct ieee80211_sub_if_data *sdata; + + if (state != NETDEV_CHANGENAME) + return NOTIFY_DONE; + + if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy) + return NOTIFY_DONE; + + if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) + return NOTIFY_DONE; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + memcpy(sdata->name, dev->name, IFNAMSIZ); + ieee80211_debugfs_rename_netdev(sdata); + + return NOTIFY_OK; +} + +static struct notifier_block mac80211_netdev_notifier = { + .notifier_call = netdev_notify, +}; + +int ieee80211_iface_init(void) +{ + return register_netdevice_notifier(&mac80211_netdev_notifier); +} + +void ieee80211_iface_exit(void) +{ + unregister_netdevice_notifier(&mac80211_netdev_notifier); +} + +void ieee80211_vif_inc_num_mcast(struct ieee80211_sub_if_data *sdata) +{ + if (sdata->vif.type == NL80211_IFTYPE_AP) + atomic_inc(&sdata->u.ap.num_mcast_sta); + else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + atomic_inc(&sdata->u.vlan.num_mcast_sta); +} + +void ieee80211_vif_dec_num_mcast(struct ieee80211_sub_if_data *sdata) +{ + if (sdata->vif.type == NL80211_IFTYPE_AP) + atomic_dec(&sdata->u.ap.num_mcast_sta); + else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + atomic_dec(&sdata->u.vlan.num_mcast_sta); +} diff --git a/net/mac80211/key.c b/net/mac80211/key.c new file mode 100644 index 0000000000..a2db0585dc --- /dev/null +++ b/net/mac80211/key.c @@ -0,0 +1,1494 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright 2015-2017 Intel Deutschland GmbH + * Copyright 2018-2020, 2022-2023 Intel Corporation + */ + +#include <crypto/utils.h> +#include <linux/if_ether.h> +#include <linux/etherdevice.h> +#include <linux/list.h> +#include <linux/rcupdate.h> +#include <linux/rtnetlink.h> +#include <linux/slab.h> +#include <linux/export.h> +#include <net/mac80211.h> +#include <asm/unaligned.h> +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "debugfs_key.h" +#include "aes_ccm.h" +#include "aes_cmac.h" +#include "aes_gmac.h" +#include "aes_gcm.h" + + +/** + * DOC: Key handling basics + * + * Key handling in mac80211 is done based on per-interface (sub_if_data) + * keys and per-station keys. Since each station belongs to an interface, + * each station key also belongs to that interface. + * + * Hardware acceleration is done on a best-effort basis for algorithms + * that are implemented in software, for each key the hardware is asked + * to enable that key for offloading but if it cannot do that the key is + * simply kept for software encryption (unless it is for an algorithm + * that isn't implemented in software). + * There is currently no way of knowing whether a key is handled in SW + * or HW except by looking into debugfs. + * + * All key management is internally protected by a mutex. Within all + * other parts of mac80211, key references are, just as STA structure + * references, protected by RCU. Note, however, that some things are + * unprotected, namely the key->sta dereferences within the hardware + * acceleration functions. This means that sta_info_destroy() must + * remove the key which waits for an RCU grace period. + */ + +static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + +static void assert_key_lock(struct ieee80211_local *local) +{ + lockdep_assert_held(&local->key_mtx); +} + +static void +update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta) +{ + struct ieee80211_sub_if_data *vlan; + + if (sdata->vif.type != NL80211_IFTYPE_AP) + return; + + /* crypto_tx_tailroom_needed_cnt is protected by this */ + assert_key_lock(sdata->local); + + rcu_read_lock(); + + list_for_each_entry_rcu(vlan, &sdata->u.ap.vlans, u.vlan.list) + vlan->crypto_tx_tailroom_needed_cnt += delta; + + rcu_read_unlock(); +} + +static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) +{ + /* + * When this count is zero, SKB resizing for allocating tailroom + * for IV or MMIC is skipped. But, this check has created two race + * cases in xmit path while transiting from zero count to one: + * + * 1. SKB resize was skipped because no key was added but just before + * the xmit key is added and SW encryption kicks off. + * + * 2. SKB resize was skipped because all the keys were hw planted but + * just before xmit one of the key is deleted and SW encryption kicks + * off. + * + * In both the above case SW encryption will find not enough space for + * tailroom and exits with WARN_ON. (See WARN_ONs at wpa.c) + * + * Solution has been explained at + * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net + */ + + assert_key_lock(sdata->local); + + update_vlan_tailroom_need_count(sdata, 1); + + if (!sdata->crypto_tx_tailroom_needed_cnt++) { + /* + * Flush all XMIT packets currently using HW encryption or no + * encryption at all if the count transition is from 0 -> 1. + */ + synchronize_net(); + } +} + +static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata, + int delta) +{ + assert_key_lock(sdata->local); + + WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta); + + update_vlan_tailroom_need_count(sdata, -delta); + sdata->crypto_tx_tailroom_needed_cnt -= delta; +} + +static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) +{ + struct ieee80211_sub_if_data *sdata = key->sdata; + struct sta_info *sta; + int ret = -EOPNOTSUPP; + + might_sleep(); + + if (key->flags & KEY_FLAG_TAINTED) { + /* If we get here, it's during resume and the key is + * tainted so shouldn't be used/programmed any more. + * However, its flags may still indicate that it was + * programmed into the device (since we're in resume) + * so clear that flag now to avoid trying to remove + * it again later. + */ + if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && + !(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | + IEEE80211_KEY_FLAG_PUT_MIC_SPACE | + IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) + increment_tailroom_need_count(sdata); + + key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; + return -EINVAL; + } + + if (!key->local->ops->set_key) + goto out_unsupported; + + assert_key_lock(key->local); + + sta = key->sta; + + /* + * If this is a per-STA GTK, check if it + * is supported; if not, return. + */ + if (sta && !(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE) && + !ieee80211_hw_check(&key->local->hw, SUPPORTS_PER_STA_GTK)) + goto out_unsupported; + + if (sta && !sta->uploaded) + goto out_unsupported; + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + /* + * The driver doesn't know anything about VLAN interfaces. + * Hence, don't send GTKs for VLAN interfaces to the driver. + */ + if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) { + ret = 1; + goto out_unsupported; + } + } + + if (key->conf.link_id >= 0 && sdata->vif.active_links && + !(sdata->vif.active_links & BIT(key->conf.link_id))) + return 0; + + ret = drv_set_key(key->local, SET_KEY, sdata, + sta ? &sta->sta : NULL, &key->conf); + + if (!ret) { + key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; + + if (!(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | + IEEE80211_KEY_FLAG_PUT_MIC_SPACE | + IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) + decrease_tailroom_need_count(sdata, 1); + + WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)); + + WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) && + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)); + + return 0; + } + + if (ret != -ENOSPC && ret != -EOPNOTSUPP && ret != 1) + sdata_err(sdata, + "failed to set key (%d, %pM) to hardware (%d)\n", + key->conf.keyidx, + sta ? sta->sta.addr : bcast_addr, ret); + + out_unsupported: + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + /* all of these we can do in software - if driver can */ + if (ret == 1) + return 0; + if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) + return -EINVAL; + return 0; + default: + return -EINVAL; + } +} + +static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) +{ + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + int ret; + + might_sleep(); + + if (!key || !key->local->ops->set_key) + return; + + assert_key_lock(key->local); + + if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) + return; + + sta = key->sta; + sdata = key->sdata; + + if (key->conf.link_id >= 0 && sdata->vif.active_links && + !(sdata->vif.active_links & BIT(key->conf.link_id))) + return; + + if (!(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | + IEEE80211_KEY_FLAG_PUT_MIC_SPACE | + IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) + increment_tailroom_need_count(sdata); + + key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; + ret = drv_set_key(key->local, DISABLE_KEY, sdata, + sta ? &sta->sta : NULL, &key->conf); + + if (ret) + sdata_err(sdata, + "failed to remove key (%d, %pM) from hardware (%d)\n", + key->conf.keyidx, + sta ? sta->sta.addr : bcast_addr, ret); +} + +static int _ieee80211_set_tx_key(struct ieee80211_key *key, bool force) +{ + struct sta_info *sta = key->sta; + struct ieee80211_local *local = key->local; + + assert_key_lock(local); + + set_sta_flag(sta, WLAN_STA_USES_ENCRYPTION); + + sta->ptk_idx = key->conf.keyidx; + + if (force || !ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT)) + clear_sta_flag(sta, WLAN_STA_BLOCK_BA); + ieee80211_check_fast_xmit(sta); + + return 0; +} + +int ieee80211_set_tx_key(struct ieee80211_key *key) +{ + return _ieee80211_set_tx_key(key, false); +} + +static void ieee80211_pairwise_rekey(struct ieee80211_key *old, + struct ieee80211_key *new) +{ + struct ieee80211_local *local = new->local; + struct sta_info *sta = new->sta; + int i; + + assert_key_lock(local); + + if (new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX) { + /* Extended Key ID key install, initial one or rekey */ + + if (sta->ptk_idx != INVALID_PTK_KEYIDX && + !ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT)) { + /* Aggregation Sessions with Extended Key ID must not + * mix MPDUs with different keyIDs within one A-MPDU. + * Tear down running Tx aggregation sessions and block + * new Rx/Tx aggregation requests during rekey to + * ensure there are no A-MPDUs when the driver is not + * supporting A-MPDU key borders. (Blocking Tx only + * would be sufficient but WLAN_STA_BLOCK_BA gets the + * job done for the few ms we need it.) + */ + set_sta_flag(sta, WLAN_STA_BLOCK_BA); + mutex_lock(&sta->ampdu_mlme.mtx); + for (i = 0; i < IEEE80211_NUM_TIDS; i++) + ___ieee80211_stop_tx_ba_session(sta, i, + AGG_STOP_LOCAL_REQUEST); + mutex_unlock(&sta->ampdu_mlme.mtx); + } + } else if (old) { + /* Rekey without Extended Key ID. + * Aggregation sessions are OK when running on SW crypto. + * A broken remote STA may cause issues not observed with HW + * crypto, though. + */ + if (!(old->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) + return; + + /* Stop Tx till we are on the new key */ + old->flags |= KEY_FLAG_TAINTED; + ieee80211_clear_fast_xmit(sta); + if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) { + set_sta_flag(sta, WLAN_STA_BLOCK_BA); + ieee80211_sta_tear_down_BA_sessions(sta, + AGG_STOP_LOCAL_REQUEST); + } + if (!wiphy_ext_feature_isset(local->hw.wiphy, + NL80211_EXT_FEATURE_CAN_REPLACE_PTK0)) { + pr_warn_ratelimited("Rekeying PTK for STA %pM but driver can't safely do that.", + sta->sta.addr); + /* Flushing the driver queues *may* help prevent + * the clear text leaks and freezes. + */ + ieee80211_flush_queues(local, old->sdata, false); + } + } +} + +static void __ieee80211_set_default_key(struct ieee80211_link_data *link, + int idx, bool uni, bool multi) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_key *key = NULL; + + assert_key_lock(sdata->local); + + if (idx >= 0 && idx < NUM_DEFAULT_KEYS) { + key = key_mtx_dereference(sdata->local, sdata->keys[idx]); + if (!key) + key = key_mtx_dereference(sdata->local, link->gtk[idx]); + } + + if (uni) { + rcu_assign_pointer(sdata->default_unicast_key, key); + ieee80211_check_fast_xmit_iface(sdata); + if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN) + drv_set_default_unicast_key(sdata->local, sdata, idx); + } + + if (multi) + rcu_assign_pointer(link->default_multicast_key, key); + + ieee80211_debugfs_key_update_default(sdata); +} + +void ieee80211_set_default_key(struct ieee80211_link_data *link, int idx, + bool uni, bool multi) +{ + mutex_lock(&link->sdata->local->key_mtx); + __ieee80211_set_default_key(link, idx, uni, multi); + mutex_unlock(&link->sdata->local->key_mtx); +} + +static void +__ieee80211_set_default_mgmt_key(struct ieee80211_link_data *link, int idx) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_key *key = NULL; + + assert_key_lock(sdata->local); + + if (idx >= NUM_DEFAULT_KEYS && + idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) + key = key_mtx_dereference(sdata->local, link->gtk[idx]); + + rcu_assign_pointer(link->default_mgmt_key, key); + + ieee80211_debugfs_key_update_default(sdata); +} + +void ieee80211_set_default_mgmt_key(struct ieee80211_link_data *link, + int idx) +{ + mutex_lock(&link->sdata->local->key_mtx); + __ieee80211_set_default_mgmt_key(link, idx); + mutex_unlock(&link->sdata->local->key_mtx); +} + +static void +__ieee80211_set_default_beacon_key(struct ieee80211_link_data *link, int idx) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_key *key = NULL; + + assert_key_lock(sdata->local); + + if (idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS && + idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + + NUM_DEFAULT_BEACON_KEYS) + key = key_mtx_dereference(sdata->local, link->gtk[idx]); + + rcu_assign_pointer(link->default_beacon_key, key); + + ieee80211_debugfs_key_update_default(sdata); +} + +void ieee80211_set_default_beacon_key(struct ieee80211_link_data *link, + int idx) +{ + mutex_lock(&link->sdata->local->key_mtx); + __ieee80211_set_default_beacon_key(link, idx); + mutex_unlock(&link->sdata->local->key_mtx); +} + +static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link, + struct sta_info *sta, + bool pairwise, + struct ieee80211_key *old, + struct ieee80211_key *new) +{ + struct link_sta_info *link_sta = sta ? &sta->deflink : NULL; + int link_id; + int idx; + int ret = 0; + bool defunikey, defmultikey, defmgmtkey, defbeaconkey; + bool is_wep; + + /* caller must provide at least one old/new */ + if (WARN_ON(!new && !old)) + return 0; + + if (new) { + idx = new->conf.keyidx; + is_wep = new->conf.cipher == WLAN_CIPHER_SUITE_WEP40 || + new->conf.cipher == WLAN_CIPHER_SUITE_WEP104; + link_id = new->conf.link_id; + } else { + idx = old->conf.keyidx; + is_wep = old->conf.cipher == WLAN_CIPHER_SUITE_WEP40 || + old->conf.cipher == WLAN_CIPHER_SUITE_WEP104; + link_id = old->conf.link_id; + } + + if (WARN(old && old->conf.link_id != link_id, + "old link ID %d doesn't match new link ID %d\n", + old->conf.link_id, link_id)) + return -EINVAL; + + if (link_id >= 0) { + if (!link) { + link = sdata_dereference(sdata->link[link_id], sdata); + if (!link) + return -ENOLINK; + } + + if (sta) { + link_sta = rcu_dereference_protected(sta->link[link_id], + lockdep_is_held(&sta->local->sta_mtx)); + if (!link_sta) + return -ENOLINK; + } + } else { + link = &sdata->deflink; + } + + if ((is_wep || pairwise) && idx >= NUM_DEFAULT_KEYS) + return -EINVAL; + + WARN_ON(new && old && new->conf.keyidx != old->conf.keyidx); + + if (new && sta && pairwise) { + /* Unicast rekey needs special handling. With Extended Key ID + * old is still NULL for the first rekey. + */ + ieee80211_pairwise_rekey(old, new); + } + + if (old) { + if (old->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { + ieee80211_key_disable_hw_accel(old); + + if (new) + ret = ieee80211_key_enable_hw_accel(new); + } + } else { + if (!new->local->wowlan) { + ret = ieee80211_key_enable_hw_accel(new); + } else { + assert_key_lock(new->local); + new->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; + } + } + + if (ret) + return ret; + + if (new) + list_add_tail_rcu(&new->list, &sdata->key_list); + + if (sta) { + if (pairwise) { + rcu_assign_pointer(sta->ptk[idx], new); + if (new && + !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX)) + _ieee80211_set_tx_key(new, true); + } else { + rcu_assign_pointer(link_sta->gtk[idx], new); + } + /* Only needed for transition from no key -> key. + * Still triggers unnecessary when using Extended Key ID + * and installing the second key ID the first time. + */ + if (new && !old) + ieee80211_check_fast_rx(sta); + } else { + defunikey = old && + old == key_mtx_dereference(sdata->local, + sdata->default_unicast_key); + defmultikey = old && + old == key_mtx_dereference(sdata->local, + link->default_multicast_key); + defmgmtkey = old && + old == key_mtx_dereference(sdata->local, + link->default_mgmt_key); + defbeaconkey = old && + old == key_mtx_dereference(sdata->local, + link->default_beacon_key); + + if (defunikey && !new) + __ieee80211_set_default_key(link, -1, true, false); + if (defmultikey && !new) + __ieee80211_set_default_key(link, -1, false, true); + if (defmgmtkey && !new) + __ieee80211_set_default_mgmt_key(link, -1); + if (defbeaconkey && !new) + __ieee80211_set_default_beacon_key(link, -1); + + if (is_wep || pairwise) + rcu_assign_pointer(sdata->keys[idx], new); + else + rcu_assign_pointer(link->gtk[idx], new); + + if (defunikey && new) + __ieee80211_set_default_key(link, new->conf.keyidx, + true, false); + if (defmultikey && new) + __ieee80211_set_default_key(link, new->conf.keyidx, + false, true); + if (defmgmtkey && new) + __ieee80211_set_default_mgmt_key(link, + new->conf.keyidx); + if (defbeaconkey && new) + __ieee80211_set_default_beacon_key(link, + new->conf.keyidx); + } + + if (old) + list_del_rcu(&old->list); + + return 0; +} + +struct ieee80211_key * +ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, + const u8 *key_data, + size_t seq_len, const u8 *seq) +{ + struct ieee80211_key *key; + int i, j, err; + + if (WARN_ON(idx < 0 || + idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + + NUM_DEFAULT_BEACON_KEYS)) + return ERR_PTR(-EINVAL); + + key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL); + if (!key) + return ERR_PTR(-ENOMEM); + + /* + * Default to software encryption; we'll later upload the + * key to the hardware if possible. + */ + key->conf.flags = 0; + key->flags = 0; + + key->conf.link_id = -1; + key->conf.cipher = cipher; + key->conf.keyidx = idx; + key->conf.keylen = key_len; + switch (cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + key->conf.iv_len = IEEE80211_WEP_IV_LEN; + key->conf.icv_len = IEEE80211_WEP_ICV_LEN; + break; + case WLAN_CIPHER_SUITE_TKIP: + key->conf.iv_len = IEEE80211_TKIP_IV_LEN; + key->conf.icv_len = IEEE80211_TKIP_ICV_LEN; + if (seq) { + for (i = 0; i < IEEE80211_NUM_TIDS; i++) { + key->u.tkip.rx[i].iv32 = + get_unaligned_le32(&seq[2]); + key->u.tkip.rx[i].iv16 = + get_unaligned_le16(seq); + } + } + spin_lock_init(&key->u.tkip.txlock); + break; + case WLAN_CIPHER_SUITE_CCMP: + key->conf.iv_len = IEEE80211_CCMP_HDR_LEN; + key->conf.icv_len = IEEE80211_CCMP_MIC_LEN; + if (seq) { + for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) + for (j = 0; j < IEEE80211_CCMP_PN_LEN; j++) + key->u.ccmp.rx_pn[i][j] = + seq[IEEE80211_CCMP_PN_LEN - j - 1]; + } + /* + * Initialize AES key state here as an optimization so that + * it does not need to be initialized for every packet. + */ + key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt( + key_data, key_len, IEEE80211_CCMP_MIC_LEN); + if (IS_ERR(key->u.ccmp.tfm)) { + err = PTR_ERR(key->u.ccmp.tfm); + kfree(key); + return ERR_PTR(err); + } + break; + case WLAN_CIPHER_SUITE_CCMP_256: + key->conf.iv_len = IEEE80211_CCMP_256_HDR_LEN; + key->conf.icv_len = IEEE80211_CCMP_256_MIC_LEN; + for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++) + for (j = 0; j < IEEE80211_CCMP_256_PN_LEN; j++) + key->u.ccmp.rx_pn[i][j] = + seq[IEEE80211_CCMP_256_PN_LEN - j - 1]; + /* Initialize AES key state here as an optimization so that + * it does not need to be initialized for every packet. + */ + key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt( + key_data, key_len, IEEE80211_CCMP_256_MIC_LEN); + if (IS_ERR(key->u.ccmp.tfm)) { + err = PTR_ERR(key->u.ccmp.tfm); + kfree(key); + return ERR_PTR(err); + } + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + key->conf.iv_len = 0; + if (cipher == WLAN_CIPHER_SUITE_AES_CMAC) + key->conf.icv_len = sizeof(struct ieee80211_mmie); + else + key->conf.icv_len = sizeof(struct ieee80211_mmie_16); + if (seq) + for (j = 0; j < IEEE80211_CMAC_PN_LEN; j++) + key->u.aes_cmac.rx_pn[j] = + seq[IEEE80211_CMAC_PN_LEN - j - 1]; + /* + * Initialize AES key state here as an optimization so that + * it does not need to be initialized for every packet. + */ + key->u.aes_cmac.tfm = + ieee80211_aes_cmac_key_setup(key_data, key_len); + if (IS_ERR(key->u.aes_cmac.tfm)) { + err = PTR_ERR(key->u.aes_cmac.tfm); + kfree(key); + return ERR_PTR(err); + } + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + key->conf.iv_len = 0; + key->conf.icv_len = sizeof(struct ieee80211_mmie_16); + if (seq) + for (j = 0; j < IEEE80211_GMAC_PN_LEN; j++) + key->u.aes_gmac.rx_pn[j] = + seq[IEEE80211_GMAC_PN_LEN - j - 1]; + /* Initialize AES key state here as an optimization so that + * it does not need to be initialized for every packet. + */ + key->u.aes_gmac.tfm = + ieee80211_aes_gmac_key_setup(key_data, key_len); + if (IS_ERR(key->u.aes_gmac.tfm)) { + err = PTR_ERR(key->u.aes_gmac.tfm); + kfree(key); + return ERR_PTR(err); + } + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + key->conf.iv_len = IEEE80211_GCMP_HDR_LEN; + key->conf.icv_len = IEEE80211_GCMP_MIC_LEN; + for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++) + for (j = 0; j < IEEE80211_GCMP_PN_LEN; j++) + key->u.gcmp.rx_pn[i][j] = + seq[IEEE80211_GCMP_PN_LEN - j - 1]; + /* Initialize AES key state here as an optimization so that + * it does not need to be initialized for every packet. + */ + key->u.gcmp.tfm = ieee80211_aes_gcm_key_setup_encrypt(key_data, + key_len); + if (IS_ERR(key->u.gcmp.tfm)) { + err = PTR_ERR(key->u.gcmp.tfm); + kfree(key); + return ERR_PTR(err); + } + break; + } + memcpy(key->conf.key, key_data, key_len); + INIT_LIST_HEAD(&key->list); + + return key; +} + +static void ieee80211_key_free_common(struct ieee80211_key *key) +{ + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + ieee80211_aes_key_free(key->u.ccmp.tfm); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + ieee80211_aes_gmac_key_free(key->u.aes_gmac.tfm); + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + ieee80211_aes_gcm_key_free(key->u.gcmp.tfm); + break; + } + kfree_sensitive(key); +} + +static void __ieee80211_key_destroy(struct ieee80211_key *key, + bool delay_tailroom) +{ + if (key->local) { + struct ieee80211_sub_if_data *sdata = key->sdata; + + ieee80211_debugfs_key_remove(key); + + if (delay_tailroom) { + /* see ieee80211_delayed_tailroom_dec */ + sdata->crypto_tx_tailroom_pending_dec++; + schedule_delayed_work(&sdata->dec_tailroom_needed_wk, + HZ/2); + } else { + decrease_tailroom_need_count(sdata, 1); + } + } + + ieee80211_key_free_common(key); +} + +static void ieee80211_key_destroy(struct ieee80211_key *key, + bool delay_tailroom) +{ + if (!key) + return; + + /* + * Synchronize so the TX path and rcu key iterators + * can no longer be using this key before we free/remove it. + */ + synchronize_net(); + + __ieee80211_key_destroy(key, delay_tailroom); +} + +void ieee80211_key_free_unused(struct ieee80211_key *key) +{ + if (!key) + return; + + WARN_ON(key->sdata || key->local); + ieee80211_key_free_common(key); +} + +static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata, + struct ieee80211_key *old, + struct ieee80211_key *new) +{ + u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP]; + u8 *tk_old, *tk_new; + + if (!old || new->conf.keylen != old->conf.keylen) + return false; + + tk_old = old->conf.key; + tk_new = new->conf.key; + + /* + * In station mode, don't compare the TX MIC key, as it's never used + * and offloaded rekeying may not care to send it to the host. This + * is the case in iwlwifi, for example. + */ + if (sdata->vif.type == NL80211_IFTYPE_STATION && + new->conf.cipher == WLAN_CIPHER_SUITE_TKIP && + new->conf.keylen == WLAN_KEY_LEN_TKIP && + !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) { + memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP); + memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP); + memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); + memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); + tk_old = tkip_old; + tk_new = tkip_new; + } + + return !crypto_memneq(tk_old, tk_new, new->conf.keylen); +} + +int ieee80211_key_link(struct ieee80211_key *key, + struct ieee80211_link_data *link, + struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + static atomic_t key_color = ATOMIC_INIT(0); + struct ieee80211_key *old_key = NULL; + int idx = key->conf.keyidx; + bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; + /* + * We want to delay tailroom updates only for station - in that + * case it helps roaming speed, but in other cases it hurts and + * can cause warnings to appear. + */ + bool delay_tailroom = sdata->vif.type == NL80211_IFTYPE_STATION; + int ret; + + mutex_lock(&sdata->local->key_mtx); + + if (sta && pairwise) { + struct ieee80211_key *alt_key; + + old_key = key_mtx_dereference(sdata->local, sta->ptk[idx]); + alt_key = key_mtx_dereference(sdata->local, sta->ptk[idx ^ 1]); + + /* The rekey code assumes that the old and new key are using + * the same cipher. Enforce the assumption for pairwise keys. + */ + if ((alt_key && alt_key->conf.cipher != key->conf.cipher) || + (old_key && old_key->conf.cipher != key->conf.cipher)) { + ret = -EOPNOTSUPP; + goto out; + } + } else if (sta) { + struct link_sta_info *link_sta = &sta->deflink; + int link_id = key->conf.link_id; + + if (link_id >= 0) { + link_sta = rcu_dereference_protected(sta->link[link_id], + lockdep_is_held(&sta->local->sta_mtx)); + if (!link_sta) { + ret = -ENOLINK; + goto out; + } + } + + old_key = key_mtx_dereference(sdata->local, link_sta->gtk[idx]); + } else { + if (idx < NUM_DEFAULT_KEYS) + old_key = key_mtx_dereference(sdata->local, + sdata->keys[idx]); + if (!old_key) + old_key = key_mtx_dereference(sdata->local, + link->gtk[idx]); + } + + /* Non-pairwise keys must also not switch the cipher on rekey */ + if (!pairwise) { + if (old_key && old_key->conf.cipher != key->conf.cipher) { + ret = -EOPNOTSUPP; + goto out; + } + } + + /* + * Silently accept key re-installation without really installing the + * new version of the key to avoid nonce reuse or replay issues. + */ + if (ieee80211_key_identical(sdata, old_key, key)) { + ret = -EALREADY; + goto out; + } + + key->local = sdata->local; + key->sdata = sdata; + key->sta = sta; + + /* + * Assign a unique ID to every key so we can easily prevent mixed + * key and fragment cache attacks. + */ + key->color = atomic_inc_return(&key_color); + + increment_tailroom_need_count(sdata); + + ret = ieee80211_key_replace(sdata, link, sta, pairwise, old_key, key); + + if (!ret) { + ieee80211_debugfs_key_add(key); + ieee80211_key_destroy(old_key, delay_tailroom); + } else { + ieee80211_key_free(key, delay_tailroom); + } + + key = NULL; + + out: + ieee80211_key_free_unused(key); + mutex_unlock(&sdata->local->key_mtx); + + return ret; +} + +void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom) +{ + if (!key) + return; + + /* + * Replace key with nothingness if it was ever used. + */ + if (key->sdata) + ieee80211_key_replace(key->sdata, NULL, key->sta, + key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, + key, NULL); + ieee80211_key_destroy(key, delay_tailroom); +} + +void ieee80211_reenable_keys(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_key *key; + struct ieee80211_sub_if_data *vlan; + + lockdep_assert_wiphy(sdata->local->hw.wiphy); + + mutex_lock(&sdata->local->key_mtx); + + sdata->crypto_tx_tailroom_needed_cnt = 0; + sdata->crypto_tx_tailroom_pending_dec = 0; + + if (sdata->vif.type == NL80211_IFTYPE_AP) { + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { + vlan->crypto_tx_tailroom_needed_cnt = 0; + vlan->crypto_tx_tailroom_pending_dec = 0; + } + } + + if (ieee80211_sdata_running(sdata)) { + list_for_each_entry(key, &sdata->key_list, list) { + increment_tailroom_need_count(sdata); + ieee80211_key_enable_hw_accel(key); + } + } + + mutex_unlock(&sdata->local->key_mtx); +} + +void ieee80211_iter_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void (*iter)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data), + void *iter_data) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_key *key, *tmp; + struct ieee80211_sub_if_data *sdata; + + lockdep_assert_wiphy(hw->wiphy); + + mutex_lock(&local->key_mtx); + if (vif) { + sdata = vif_to_sdata(vif); + list_for_each_entry_safe(key, tmp, &sdata->key_list, list) + iter(hw, &sdata->vif, + key->sta ? &key->sta->sta : NULL, + &key->conf, iter_data); + } else { + list_for_each_entry(sdata, &local->interfaces, list) + list_for_each_entry_safe(key, tmp, + &sdata->key_list, list) + iter(hw, &sdata->vif, + key->sta ? &key->sta->sta : NULL, + &key->conf, iter_data); + } + mutex_unlock(&local->key_mtx); +} +EXPORT_SYMBOL(ieee80211_iter_keys); + +static void +_ieee80211_iter_keys_rcu(struct ieee80211_hw *hw, + struct ieee80211_sub_if_data *sdata, + void (*iter)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data), + void *iter_data) +{ + struct ieee80211_key *key; + + list_for_each_entry_rcu(key, &sdata->key_list, list) { + /* skip keys of station in removal process */ + if (key->sta && key->sta->removed) + continue; + if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) + continue; + + iter(hw, &sdata->vif, + key->sta ? &key->sta->sta : NULL, + &key->conf, iter_data); + } +} + +void ieee80211_iter_keys_rcu(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void (*iter)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data), + void *iter_data) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata; + + if (vif) { + sdata = vif_to_sdata(vif); + _ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data); + } else { + list_for_each_entry_rcu(sdata, &local->interfaces, list) + _ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data); + } +} +EXPORT_SYMBOL(ieee80211_iter_keys_rcu); + +static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata, + struct list_head *keys) +{ + struct ieee80211_key *key, *tmp; + + decrease_tailroom_need_count(sdata, + sdata->crypto_tx_tailroom_pending_dec); + sdata->crypto_tx_tailroom_pending_dec = 0; + + ieee80211_debugfs_key_remove_mgmt_default(sdata); + ieee80211_debugfs_key_remove_beacon_default(sdata); + + list_for_each_entry_safe(key, tmp, &sdata->key_list, list) { + ieee80211_key_replace(key->sdata, NULL, key->sta, + key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, + key, NULL); + list_add_tail(&key->list, keys); + } + + ieee80211_debugfs_key_update_default(sdata); +} + +void ieee80211_remove_link_keys(struct ieee80211_link_data *link, + struct list_head *keys) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_key *key, *tmp; + + mutex_lock(&local->key_mtx); + list_for_each_entry_safe(key, tmp, &sdata->key_list, list) { + if (key->conf.link_id != link->link_id) + continue; + ieee80211_key_replace(key->sdata, link, key->sta, + key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, + key, NULL); + list_add_tail(&key->list, keys); + } + mutex_unlock(&local->key_mtx); +} + +void ieee80211_free_key_list(struct ieee80211_local *local, + struct list_head *keys) +{ + struct ieee80211_key *key, *tmp; + + mutex_lock(&local->key_mtx); + list_for_each_entry_safe(key, tmp, keys, list) + __ieee80211_key_destroy(key, false); + mutex_unlock(&local->key_mtx); +} + +void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, + bool force_synchronize) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_sub_if_data *vlan; + struct ieee80211_sub_if_data *master; + struct ieee80211_key *key, *tmp; + LIST_HEAD(keys); + + cancel_delayed_work_sync(&sdata->dec_tailroom_needed_wk); + + mutex_lock(&local->key_mtx); + + ieee80211_free_keys_iface(sdata, &keys); + + if (sdata->vif.type == NL80211_IFTYPE_AP) { + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) + ieee80211_free_keys_iface(vlan, &keys); + } + + if (!list_empty(&keys) || force_synchronize) + synchronize_net(); + list_for_each_entry_safe(key, tmp, &keys, list) + __ieee80211_key_destroy(key, false); + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + if (sdata->bss) { + master = container_of(sdata->bss, + struct ieee80211_sub_if_data, + u.ap); + + WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt != + master->crypto_tx_tailroom_needed_cnt); + } + } else { + WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || + sdata->crypto_tx_tailroom_pending_dec); + } + + if (sdata->vif.type == NL80211_IFTYPE_AP) { + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) + WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || + vlan->crypto_tx_tailroom_pending_dec); + } + + mutex_unlock(&local->key_mtx); +} + +void ieee80211_free_sta_keys(struct ieee80211_local *local, + struct sta_info *sta) +{ + struct ieee80211_key *key; + int i; + + mutex_lock(&local->key_mtx); + for (i = 0; i < ARRAY_SIZE(sta->deflink.gtk); i++) { + key = key_mtx_dereference(local, sta->deflink.gtk[i]); + if (!key) + continue; + ieee80211_key_replace(key->sdata, NULL, key->sta, + key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, + key, NULL); + __ieee80211_key_destroy(key, key->sdata->vif.type == + NL80211_IFTYPE_STATION); + } + + for (i = 0; i < NUM_DEFAULT_KEYS; i++) { + key = key_mtx_dereference(local, sta->ptk[i]); + if (!key) + continue; + ieee80211_key_replace(key->sdata, NULL, key->sta, + key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, + key, NULL); + __ieee80211_key_destroy(key, key->sdata->vif.type == + NL80211_IFTYPE_STATION); + } + + mutex_unlock(&local->key_mtx); +} + +void ieee80211_delayed_tailroom_dec(struct work_struct *wk) +{ + struct ieee80211_sub_if_data *sdata; + + sdata = container_of(wk, struct ieee80211_sub_if_data, + dec_tailroom_needed_wk.work); + + /* + * The reason for the delayed tailroom needed decrementing is to + * make roaming faster: during roaming, all keys are first deleted + * and then new keys are installed. The first new key causes the + * crypto_tx_tailroom_needed_cnt to go from 0 to 1, which invokes + * the cost of synchronize_net() (which can be slow). Avoid this + * by deferring the crypto_tx_tailroom_needed_cnt decrementing on + * key removal for a while, so if we roam the value is larger than + * zero and no 0->1 transition happens. + * + * The cost is that if the AP switching was from an AP with keys + * to one without, we still allocate tailroom while it would no + * longer be needed. However, in the typical (fast) roaming case + * within an ESS this usually won't happen. + */ + + mutex_lock(&sdata->local->key_mtx); + decrease_tailroom_need_count(sdata, + sdata->crypto_tx_tailroom_pending_dec); + sdata->crypto_tx_tailroom_pending_dec = 0; + mutex_unlock(&sdata->local->key_mtx); +} + +void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid, + const u8 *replay_ctr, gfp_t gfp) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + trace_api_gtk_rekey_notify(sdata, bssid, replay_ctr); + + cfg80211_gtk_rekey_notify(sdata->dev, bssid, replay_ctr, gfp); +} +EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_notify); + +void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, + int tid, struct ieee80211_key_seq *seq) +{ + struct ieee80211_key *key; + const u8 *pn; + + key = container_of(keyconf, struct ieee80211_key, conf); + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_TKIP: + if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS)) + return; + seq->tkip.iv32 = key->u.tkip.rx[tid].iv32; + seq->tkip.iv16 = key->u.tkip.rx[tid].iv16; + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) + return; + if (tid < 0) + pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS]; + else + pn = key->u.ccmp.rx_pn[tid]; + memcpy(seq->ccmp.pn, pn, IEEE80211_CCMP_PN_LEN); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + if (WARN_ON(tid != 0)) + return; + pn = key->u.aes_cmac.rx_pn; + memcpy(seq->aes_cmac.pn, pn, IEEE80211_CMAC_PN_LEN); + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + if (WARN_ON(tid != 0)) + return; + pn = key->u.aes_gmac.rx_pn; + memcpy(seq->aes_gmac.pn, pn, IEEE80211_GMAC_PN_LEN); + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) + return; + if (tid < 0) + pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS]; + else + pn = key->u.gcmp.rx_pn[tid]; + memcpy(seq->gcmp.pn, pn, IEEE80211_GCMP_PN_LEN); + break; + } +} +EXPORT_SYMBOL(ieee80211_get_key_rx_seq); + +void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf, + int tid, struct ieee80211_key_seq *seq) +{ + struct ieee80211_key *key; + u8 *pn; + + key = container_of(keyconf, struct ieee80211_key, conf); + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_TKIP: + if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS)) + return; + key->u.tkip.rx[tid].iv32 = seq->tkip.iv32; + key->u.tkip.rx[tid].iv16 = seq->tkip.iv16; + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) + return; + if (tid < 0) + pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS]; + else + pn = key->u.ccmp.rx_pn[tid]; + memcpy(pn, seq->ccmp.pn, IEEE80211_CCMP_PN_LEN); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + if (WARN_ON(tid != 0)) + return; + pn = key->u.aes_cmac.rx_pn; + memcpy(pn, seq->aes_cmac.pn, IEEE80211_CMAC_PN_LEN); + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + if (WARN_ON(tid != 0)) + return; + pn = key->u.aes_gmac.rx_pn; + memcpy(pn, seq->aes_gmac.pn, IEEE80211_GMAC_PN_LEN); + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) + return; + if (tid < 0) + pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS]; + else + pn = key->u.gcmp.rx_pn[tid]; + memcpy(pn, seq->gcmp.pn, IEEE80211_GCMP_PN_LEN); + break; + default: + WARN_ON(1); + break; + } +} +EXPORT_SYMBOL_GPL(ieee80211_set_key_rx_seq); + +void ieee80211_remove_key(struct ieee80211_key_conf *keyconf) +{ + struct ieee80211_key *key; + + key = container_of(keyconf, struct ieee80211_key, conf); + + assert_key_lock(key->local); + + /* + * if key was uploaded, we assume the driver will/has remove(d) + * it, so adjust bookkeeping accordingly + */ + if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { + key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; + + if (!(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | + IEEE80211_KEY_FLAG_PUT_MIC_SPACE | + IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) + increment_tailroom_need_count(key->sdata); + } + + ieee80211_key_free(key, false); +} +EXPORT_SYMBOL_GPL(ieee80211_remove_key); + +struct ieee80211_key_conf * +ieee80211_gtk_rekey_add(struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + struct ieee80211_key *key; + int err; + + if (WARN_ON(!local->wowlan)) + return ERR_PTR(-EINVAL); + + if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) + return ERR_PTR(-EINVAL); + + key = ieee80211_key_alloc(keyconf->cipher, keyconf->keyidx, + keyconf->keylen, keyconf->key, + 0, NULL); + if (IS_ERR(key)) + return ERR_CAST(key); + + if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED) + key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; + + /* FIXME: this function needs to get a link ID */ + err = ieee80211_key_link(key, &sdata->deflink, NULL); + if (err) + return ERR_PTR(err); + + return &key->conf; +} +EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_add); + +void ieee80211_key_mic_failure(struct ieee80211_key_conf *keyconf) +{ + struct ieee80211_key *key; + + key = container_of(keyconf, struct ieee80211_key, conf); + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + key->u.aes_cmac.icverrors++; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + key->u.aes_gmac.icverrors++; + break; + default: + /* ignore the others for now, we don't keep counters now */ + break; + } +} +EXPORT_SYMBOL_GPL(ieee80211_key_mic_failure); + +void ieee80211_key_replay(struct ieee80211_key_conf *keyconf) +{ + struct ieee80211_key *key; + + key = container_of(keyconf, struct ieee80211_key, conf); + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + key->u.ccmp.replays++; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + key->u.aes_cmac.replays++; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + key->u.aes_gmac.replays++; + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + key->u.gcmp.replays++; + break; + } +} +EXPORT_SYMBOL_GPL(ieee80211_key_replay); + +int ieee80211_key_switch_links(struct ieee80211_sub_if_data *sdata, + unsigned long del_links_mask, + unsigned long add_links_mask) +{ + struct ieee80211_key *key; + int ret; + + list_for_each_entry(key, &sdata->key_list, list) { + if (key->conf.link_id < 0 || + !(del_links_mask & BIT(key->conf.link_id))) + continue; + + /* shouldn't happen for per-link keys */ + WARN_ON(key->sta); + + ieee80211_key_disable_hw_accel(key); + } + + list_for_each_entry(key, &sdata->key_list, list) { + if (key->conf.link_id < 0 || + !(add_links_mask & BIT(key->conf.link_id))) + continue; + + /* shouldn't happen for per-link keys */ + WARN_ON(key->sta); + + ret = ieee80211_key_enable_hw_accel(key); + if (ret) + return ret; + } + + return 0; +} diff --git a/net/mac80211/key.h b/net/mac80211/key.h new file mode 100644 index 0000000000..f3df97df4b --- /dev/null +++ b/net/mac80211/key.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2002-2004, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright (C) 2019, 2022 Intel Corporation + */ + +#ifndef IEEE80211_KEY_H +#define IEEE80211_KEY_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/crypto.h> +#include <linux/rcupdate.h> +#include <crypto/arc4.h> +#include <net/mac80211.h> + +#define NUM_DEFAULT_KEYS 4 +#define NUM_DEFAULT_MGMT_KEYS 2 +#define NUM_DEFAULT_BEACON_KEYS 2 +#define INVALID_PTK_KEYIDX 2 /* Keyidx always pointing to a NULL key for PTK */ + +struct ieee80211_local; +struct ieee80211_sub_if_data; +struct ieee80211_link_data; +struct sta_info; + +/** + * enum ieee80211_internal_key_flags - internal key flags + * + * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present + * in the hardware for TX crypto hardware acceleration. + * @KEY_FLAG_TAINTED: Key is tainted and packets should be dropped. + */ +enum ieee80211_internal_key_flags { + KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0), + KEY_FLAG_TAINTED = BIT(1), +}; + +enum ieee80211_internal_tkip_state { + TKIP_STATE_NOT_INIT, + TKIP_STATE_PHASE1_DONE, + TKIP_STATE_PHASE1_HW_UPLOADED, +}; + +struct tkip_ctx { + u16 p1k[5]; /* p1k cache */ + u32 p1k_iv32; /* iv32 for which p1k computed */ + enum ieee80211_internal_tkip_state state; +}; + +struct tkip_ctx_rx { + struct tkip_ctx ctx; + u32 iv32; /* current iv32 */ + u16 iv16; /* current iv16 */ +}; + +struct ieee80211_key { + struct ieee80211_local *local; + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + + /* for sdata list */ + struct list_head list; + + /* protected by key mutex */ + unsigned int flags; + + union { + struct { + /* protects tx context */ + spinlock_t txlock; + + /* last used TSC */ + struct tkip_ctx tx; + + /* last received RSC */ + struct tkip_ctx_rx rx[IEEE80211_NUM_TIDS]; + + /* number of mic failures */ + u32 mic_failures; + } tkip; + struct { + /* + * Last received packet number. The first + * IEEE80211_NUM_TIDS counters are used with Data + * frames and the last counter is used with Robust + * Management frames. + */ + u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_CCMP_PN_LEN]; + struct crypto_aead *tfm; + u32 replays; /* dot11RSNAStatsCCMPReplays */ + } ccmp; + struct { + u8 rx_pn[IEEE80211_CMAC_PN_LEN]; + struct crypto_shash *tfm; + u32 replays; /* dot11RSNAStatsCMACReplays */ + u32 icverrors; /* dot11RSNAStatsCMACICVErrors */ + } aes_cmac; + struct { + u8 rx_pn[IEEE80211_GMAC_PN_LEN]; + struct crypto_aead *tfm; + u32 replays; /* dot11RSNAStatsCMACReplays */ + u32 icverrors; /* dot11RSNAStatsCMACICVErrors */ + } aes_gmac; + struct { + /* Last received packet number. The first + * IEEE80211_NUM_TIDS counters are used with Data + * frames and the last counter is used with Robust + * Management frames. + */ + u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_GCMP_PN_LEN]; + struct crypto_aead *tfm; + u32 replays; /* dot11RSNAStatsGCMPReplays */ + } gcmp; + struct { + /* generic cipher scheme */ + u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_MAX_PN_LEN]; + } gen; + } u; + +#ifdef CONFIG_MAC80211_DEBUGFS + struct { + struct dentry *stalink; + struct dentry *dir; + int cnt; + } debugfs; +#endif + + unsigned int color; + + /* + * key config, must be last because it contains key + * material as variable length member + */ + struct ieee80211_key_conf conf; +}; + +struct ieee80211_key * +ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, + const u8 *key_data, + size_t seq_len, const u8 *seq); +/* + * Insert a key into data structures (sdata, sta if necessary) + * to make it used, free old key. On failure, also free the new key. + */ +int ieee80211_key_link(struct ieee80211_key *key, + struct ieee80211_link_data *link, + struct sta_info *sta); +int ieee80211_set_tx_key(struct ieee80211_key *key); +void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom); +void ieee80211_key_free_unused(struct ieee80211_key *key); +void ieee80211_set_default_key(struct ieee80211_link_data *link, int idx, + bool uni, bool multi); +void ieee80211_set_default_mgmt_key(struct ieee80211_link_data *link, + int idx); +void ieee80211_set_default_beacon_key(struct ieee80211_link_data *link, + int idx); +void ieee80211_remove_link_keys(struct ieee80211_link_data *link, + struct list_head *keys); +void ieee80211_free_key_list(struct ieee80211_local *local, + struct list_head *keys); +void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, + bool force_synchronize); +void ieee80211_free_sta_keys(struct ieee80211_local *local, + struct sta_info *sta); +void ieee80211_reenable_keys(struct ieee80211_sub_if_data *sdata); +int ieee80211_key_switch_links(struct ieee80211_sub_if_data *sdata, + unsigned long del_links_mask, + unsigned long add_links_mask); + +#define key_mtx_dereference(local, ref) \ + rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx))) +#define rcu_dereference_check_key_mtx(local, ref) \ + rcu_dereference_check(ref, lockdep_is_held(&((local)->key_mtx))) + +void ieee80211_delayed_tailroom_dec(struct work_struct *wk); + +#endif /* IEEE80211_KEY_H */ diff --git a/net/mac80211/led.c b/net/mac80211/led.c new file mode 100644 index 0000000000..2dc732147e --- /dev/null +++ b/net/mac80211/led.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2006, Johannes Berg <johannes@sipsolutions.net> + */ + +/* just for IFNAMSIZ */ +#include <linux/if.h> +#include <linux/slab.h> +#include <linux/export.h> +#include "led.h" + +void ieee80211_led_assoc(struct ieee80211_local *local, bool associated) +{ + if (!atomic_read(&local->assoc_led_active)) + return; + if (associated) + led_trigger_event(&local->assoc_led, LED_FULL); + else + led_trigger_event(&local->assoc_led, LED_OFF); +} + +void ieee80211_led_radio(struct ieee80211_local *local, bool enabled) +{ + if (!atomic_read(&local->radio_led_active)) + return; + if (enabled) + led_trigger_event(&local->radio_led, LED_FULL); + else + led_trigger_event(&local->radio_led, LED_OFF); +} + +void ieee80211_alloc_led_names(struct ieee80211_local *local) +{ + local->rx_led.name = kasprintf(GFP_KERNEL, "%srx", + wiphy_name(local->hw.wiphy)); + local->tx_led.name = kasprintf(GFP_KERNEL, "%stx", + wiphy_name(local->hw.wiphy)); + local->assoc_led.name = kasprintf(GFP_KERNEL, "%sassoc", + wiphy_name(local->hw.wiphy)); + local->radio_led.name = kasprintf(GFP_KERNEL, "%sradio", + wiphy_name(local->hw.wiphy)); +} + +void ieee80211_free_led_names(struct ieee80211_local *local) +{ + kfree(local->rx_led.name); + kfree(local->tx_led.name); + kfree(local->assoc_led.name); + kfree(local->radio_led.name); +} + +static int ieee80211_tx_led_activate(struct led_classdev *led_cdev) +{ + struct ieee80211_local *local = container_of(led_cdev->trigger, + struct ieee80211_local, + tx_led); + + atomic_inc(&local->tx_led_active); + + return 0; +} + +static void ieee80211_tx_led_deactivate(struct led_classdev *led_cdev) +{ + struct ieee80211_local *local = container_of(led_cdev->trigger, + struct ieee80211_local, + tx_led); + + atomic_dec(&local->tx_led_active); +} + +static int ieee80211_rx_led_activate(struct led_classdev *led_cdev) +{ + struct ieee80211_local *local = container_of(led_cdev->trigger, + struct ieee80211_local, + rx_led); + + atomic_inc(&local->rx_led_active); + + return 0; +} + +static void ieee80211_rx_led_deactivate(struct led_classdev *led_cdev) +{ + struct ieee80211_local *local = container_of(led_cdev->trigger, + struct ieee80211_local, + rx_led); + + atomic_dec(&local->rx_led_active); +} + +static int ieee80211_assoc_led_activate(struct led_classdev *led_cdev) +{ + struct ieee80211_local *local = container_of(led_cdev->trigger, + struct ieee80211_local, + assoc_led); + + atomic_inc(&local->assoc_led_active); + + return 0; +} + +static void ieee80211_assoc_led_deactivate(struct led_classdev *led_cdev) +{ + struct ieee80211_local *local = container_of(led_cdev->trigger, + struct ieee80211_local, + assoc_led); + + atomic_dec(&local->assoc_led_active); +} + +static int ieee80211_radio_led_activate(struct led_classdev *led_cdev) +{ + struct ieee80211_local *local = container_of(led_cdev->trigger, + struct ieee80211_local, + radio_led); + + atomic_inc(&local->radio_led_active); + + return 0; +} + +static void ieee80211_radio_led_deactivate(struct led_classdev *led_cdev) +{ + struct ieee80211_local *local = container_of(led_cdev->trigger, + struct ieee80211_local, + radio_led); + + atomic_dec(&local->radio_led_active); +} + +static int ieee80211_tpt_led_activate(struct led_classdev *led_cdev) +{ + struct ieee80211_local *local = container_of(led_cdev->trigger, + struct ieee80211_local, + tpt_led); + + atomic_inc(&local->tpt_led_active); + + return 0; +} + +static void ieee80211_tpt_led_deactivate(struct led_classdev *led_cdev) +{ + struct ieee80211_local *local = container_of(led_cdev->trigger, + struct ieee80211_local, + tpt_led); + + atomic_dec(&local->tpt_led_active); +} + +void ieee80211_led_init(struct ieee80211_local *local) +{ + atomic_set(&local->rx_led_active, 0); + local->rx_led.activate = ieee80211_rx_led_activate; + local->rx_led.deactivate = ieee80211_rx_led_deactivate; + if (local->rx_led.name && led_trigger_register(&local->rx_led)) { + kfree(local->rx_led.name); + local->rx_led.name = NULL; + } + + atomic_set(&local->tx_led_active, 0); + local->tx_led.activate = ieee80211_tx_led_activate; + local->tx_led.deactivate = ieee80211_tx_led_deactivate; + if (local->tx_led.name && led_trigger_register(&local->tx_led)) { + kfree(local->tx_led.name); + local->tx_led.name = NULL; + } + + atomic_set(&local->assoc_led_active, 0); + local->assoc_led.activate = ieee80211_assoc_led_activate; + local->assoc_led.deactivate = ieee80211_assoc_led_deactivate; + if (local->assoc_led.name && led_trigger_register(&local->assoc_led)) { + kfree(local->assoc_led.name); + local->assoc_led.name = NULL; + } + + atomic_set(&local->radio_led_active, 0); + local->radio_led.activate = ieee80211_radio_led_activate; + local->radio_led.deactivate = ieee80211_radio_led_deactivate; + if (local->radio_led.name && led_trigger_register(&local->radio_led)) { + kfree(local->radio_led.name); + local->radio_led.name = NULL; + } + + atomic_set(&local->tpt_led_active, 0); + if (local->tpt_led_trigger) { + local->tpt_led.activate = ieee80211_tpt_led_activate; + local->tpt_led.deactivate = ieee80211_tpt_led_deactivate; + if (led_trigger_register(&local->tpt_led)) { + kfree(local->tpt_led_trigger); + local->tpt_led_trigger = NULL; + } + } +} + +void ieee80211_led_exit(struct ieee80211_local *local) +{ + if (local->radio_led.name) + led_trigger_unregister(&local->radio_led); + if (local->assoc_led.name) + led_trigger_unregister(&local->assoc_led); + if (local->tx_led.name) + led_trigger_unregister(&local->tx_led); + if (local->rx_led.name) + led_trigger_unregister(&local->rx_led); + + if (local->tpt_led_trigger) { + led_trigger_unregister(&local->tpt_led); + kfree(local->tpt_led_trigger); + } +} + +const char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + return local->radio_led.name; +} +EXPORT_SYMBOL(__ieee80211_get_radio_led_name); + +const char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + return local->assoc_led.name; +} +EXPORT_SYMBOL(__ieee80211_get_assoc_led_name); + +const char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + return local->tx_led.name; +} +EXPORT_SYMBOL(__ieee80211_get_tx_led_name); + +const char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + return local->rx_led.name; +} +EXPORT_SYMBOL(__ieee80211_get_rx_led_name); + +static unsigned long tpt_trig_traffic(struct ieee80211_local *local, + struct tpt_led_trigger *tpt_trig) +{ + unsigned long traffic, delta; + + traffic = tpt_trig->tx_bytes + tpt_trig->rx_bytes; + + delta = traffic - tpt_trig->prev_traffic; + tpt_trig->prev_traffic = traffic; + return DIV_ROUND_UP(delta, 1024 / 8); +} + +static void tpt_trig_timer(struct timer_list *t) +{ + struct tpt_led_trigger *tpt_trig = from_timer(tpt_trig, t, timer); + struct ieee80211_local *local = tpt_trig->local; + unsigned long on, off, tpt; + int i; + + if (!tpt_trig->running) + return; + + mod_timer(&tpt_trig->timer, round_jiffies(jiffies + HZ)); + + tpt = tpt_trig_traffic(local, tpt_trig); + + /* default to just solid on */ + on = 1; + off = 0; + + for (i = tpt_trig->blink_table_len - 1; i >= 0; i--) { + if (tpt_trig->blink_table[i].throughput < 0 || + tpt > tpt_trig->blink_table[i].throughput) { + off = tpt_trig->blink_table[i].blink_time / 2; + on = tpt_trig->blink_table[i].blink_time - off; + break; + } + } + + led_trigger_blink(&local->tpt_led, on, off); +} + +const char * +__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, + unsigned int flags, + const struct ieee80211_tpt_blink *blink_table, + unsigned int blink_table_len) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct tpt_led_trigger *tpt_trig; + + if (WARN_ON(local->tpt_led_trigger)) + return NULL; + + tpt_trig = kzalloc(sizeof(struct tpt_led_trigger), GFP_KERNEL); + if (!tpt_trig) + return NULL; + + snprintf(tpt_trig->name, sizeof(tpt_trig->name), + "%stpt", wiphy_name(local->hw.wiphy)); + + local->tpt_led.name = tpt_trig->name; + + tpt_trig->blink_table = blink_table; + tpt_trig->blink_table_len = blink_table_len; + tpt_trig->want = flags; + tpt_trig->local = local; + + timer_setup(&tpt_trig->timer, tpt_trig_timer, 0); + + local->tpt_led_trigger = tpt_trig; + + return tpt_trig->name; +} +EXPORT_SYMBOL(__ieee80211_create_tpt_led_trigger); + +static void ieee80211_start_tpt_led_trig(struct ieee80211_local *local) +{ + struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger; + + if (tpt_trig->running) + return; + + /* reset traffic */ + tpt_trig_traffic(local, tpt_trig); + tpt_trig->running = true; + + tpt_trig_timer(&tpt_trig->timer); + mod_timer(&tpt_trig->timer, round_jiffies(jiffies + HZ)); +} + +static void ieee80211_stop_tpt_led_trig(struct ieee80211_local *local) +{ + struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger; + + if (!tpt_trig->running) + return; + + tpt_trig->running = false; + del_timer_sync(&tpt_trig->timer); + + led_trigger_event(&local->tpt_led, LED_OFF); +} + +void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local, + unsigned int types_on, unsigned int types_off) +{ + struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger; + bool allowed; + + WARN_ON(types_on & types_off); + + if (!tpt_trig) + return; + + tpt_trig->active &= ~types_off; + tpt_trig->active |= types_on; + + /* + * Regardless of wanted state, we shouldn't blink when + * the radio is disabled -- this can happen due to some + * code ordering issues with __ieee80211_recalc_idle() + * being called before the radio is started. + */ + allowed = tpt_trig->active & IEEE80211_TPT_LEDTRIG_FL_RADIO; + + if (!allowed || !(tpt_trig->active & tpt_trig->want)) + ieee80211_stop_tpt_led_trig(local); + else + ieee80211_start_tpt_led_trig(local); +} diff --git a/net/mac80211/led.h b/net/mac80211/led.h new file mode 100644 index 0000000000..d25f13346b --- /dev/null +++ b/net/mac80211/led.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2006, Johannes Berg <johannes@sipsolutions.net> + */ + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/leds.h> +#include "ieee80211_i.h" + +#define MAC80211_BLINK_DELAY 50 /* ms */ + +static inline void ieee80211_led_rx(struct ieee80211_local *local) +{ +#ifdef CONFIG_MAC80211_LEDS + if (!atomic_read(&local->rx_led_active)) + return; + led_trigger_blink_oneshot(&local->rx_led, MAC80211_BLINK_DELAY, MAC80211_BLINK_DELAY, 0); +#endif +} + +static inline void ieee80211_led_tx(struct ieee80211_local *local) +{ +#ifdef CONFIG_MAC80211_LEDS + if (!atomic_read(&local->tx_led_active)) + return; + led_trigger_blink_oneshot(&local->tx_led, MAC80211_BLINK_DELAY, MAC80211_BLINK_DELAY, 0); +#endif +} + +#ifdef CONFIG_MAC80211_LEDS +void ieee80211_led_assoc(struct ieee80211_local *local, + bool associated); +void ieee80211_led_radio(struct ieee80211_local *local, + bool enabled); +void ieee80211_alloc_led_names(struct ieee80211_local *local); +void ieee80211_free_led_names(struct ieee80211_local *local); +void ieee80211_led_init(struct ieee80211_local *local); +void ieee80211_led_exit(struct ieee80211_local *local); +void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local, + unsigned int types_on, unsigned int types_off); +#else +static inline void ieee80211_led_assoc(struct ieee80211_local *local, + bool associated) +{ +} +static inline void ieee80211_led_radio(struct ieee80211_local *local, + bool enabled) +{ +} +static inline void ieee80211_alloc_led_names(struct ieee80211_local *local) +{ +} +static inline void ieee80211_free_led_names(struct ieee80211_local *local) +{ +} +static inline void ieee80211_led_init(struct ieee80211_local *local) +{ +} +static inline void ieee80211_led_exit(struct ieee80211_local *local) +{ +} +static inline void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local, + unsigned int types_on, + unsigned int types_off) +{ +} +#endif + +static inline void +ieee80211_tpt_led_trig_tx(struct ieee80211_local *local, int bytes) +{ +#ifdef CONFIG_MAC80211_LEDS + if (atomic_read(&local->tpt_led_active)) + local->tpt_led_trigger->tx_bytes += bytes; +#endif +} + +static inline void +ieee80211_tpt_led_trig_rx(struct ieee80211_local *local, int bytes) +{ +#ifdef CONFIG_MAC80211_LEDS + if (atomic_read(&local->tpt_led_active)) + local->tpt_led_trigger->rx_bytes += bytes; +#endif +} diff --git a/net/mac80211/link.c b/net/mac80211/link.c new file mode 100644 index 0000000000..16cbaea93f --- /dev/null +++ b/net/mac80211/link.c @@ -0,0 +1,517 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * MLO link handling + * + * Copyright (C) 2022-2023 Intel Corporation + */ +#include <linux/slab.h> +#include <linux/kernel.h> +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "key.h" +#include "debugfs_netdev.h" + +void ieee80211_link_setup(struct ieee80211_link_data *link) +{ + if (link->sdata->vif.type == NL80211_IFTYPE_STATION) + ieee80211_mgd_setup_link(link); +} + +void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, + int link_id, + struct ieee80211_link_data *link, + struct ieee80211_bss_conf *link_conf) +{ + bool deflink = link_id < 0; + + if (link_id < 0) + link_id = 0; + + rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf); + rcu_assign_pointer(sdata->link[link_id], link); + + link->sdata = sdata; + link->link_id = link_id; + link->conf = link_conf; + link_conf->link_id = link_id; + link_conf->vif = &sdata->vif; + + INIT_WORK(&link->csa_finalize_work, + ieee80211_csa_finalize_work); + INIT_WORK(&link->color_change_finalize_work, + ieee80211_color_change_finalize_work); + INIT_DELAYED_WORK(&link->color_collision_detect_work, + ieee80211_color_collision_detection_work); + INIT_LIST_HEAD(&link->assigned_chanctx_list); + INIT_LIST_HEAD(&link->reserved_chanctx_list); + INIT_DELAYED_WORK(&link->dfs_cac_timer_work, + ieee80211_dfs_cac_timer_work); + + if (!deflink) { + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP: + ether_addr_copy(link_conf->addr, + sdata->wdev.links[link_id].addr); + link_conf->bssid = link_conf->addr; + WARN_ON(!(sdata->wdev.valid_links & BIT(link_id))); + break; + case NL80211_IFTYPE_STATION: + /* station sets the bssid in ieee80211_mgd_setup_link */ + break; + default: + WARN_ON(1); + } + + ieee80211_link_debugfs_add(link); + } +} + +void ieee80211_link_stop(struct ieee80211_link_data *link) +{ + if (link->sdata->vif.type == NL80211_IFTYPE_STATION) + ieee80211_mgd_stop_link(link); + + cancel_delayed_work_sync(&link->color_collision_detect_work); + ieee80211_link_release_channel(link); +} + +struct link_container { + struct ieee80211_link_data data; + struct ieee80211_bss_conf conf; +}; + +static void ieee80211_tear_down_links(struct ieee80211_sub_if_data *sdata, + struct link_container **links, u16 mask) +{ + struct ieee80211_link_data *link; + LIST_HEAD(keys); + unsigned int link_id; + + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { + if (!(mask & BIT(link_id))) + continue; + link = &links[link_id]->data; + if (link_id == 0 && !link) + link = &sdata->deflink; + if (WARN_ON(!link)) + continue; + ieee80211_remove_link_keys(link, &keys); + ieee80211_link_debugfs_remove(link); + ieee80211_link_stop(link); + } + + synchronize_rcu(); + + ieee80211_free_key_list(sdata->local, &keys); +} + +static void ieee80211_free_links(struct ieee80211_sub_if_data *sdata, + struct link_container **links) +{ + unsigned int link_id; + + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) + kfree(links[link_id]); +} + +static int ieee80211_check_dup_link_addrs(struct ieee80211_sub_if_data *sdata) +{ + unsigned int i, j; + + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { + struct ieee80211_link_data *link1; + + link1 = sdata_dereference(sdata->link[i], sdata); + if (!link1) + continue; + for (j = i + 1; j < IEEE80211_MLD_MAX_NUM_LINKS; j++) { + struct ieee80211_link_data *link2; + + link2 = sdata_dereference(sdata->link[j], sdata); + if (!link2) + continue; + + if (ether_addr_equal(link1->conf->addr, + link2->conf->addr)) + return -EALREADY; + } + } + + return 0; +} + +static void ieee80211_set_vif_links_bitmaps(struct ieee80211_sub_if_data *sdata, + u16 valid_links, u16 dormant_links) +{ + sdata->vif.valid_links = valid_links; + sdata->vif.dormant_links = dormant_links; + + if (!valid_links || + WARN((~valid_links & dormant_links) || + !(valid_links & ~dormant_links), + "Invalid links: valid=0x%x, dormant=0x%x", + valid_links, dormant_links)) { + sdata->vif.active_links = 0; + sdata->vif.dormant_links = 0; + return; + } + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP: + /* in an AP all links are always active */ + sdata->vif.active_links = valid_links; + + /* AP links are not expected to be disabled */ + WARN_ON(dormant_links); + break; + case NL80211_IFTYPE_STATION: + if (sdata->vif.active_links) + break; + sdata->vif.active_links = valid_links & ~dormant_links; + WARN_ON(hweight16(sdata->vif.active_links) > 1); + break; + default: + WARN_ON(1); + } +} + +static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata, + struct link_container **to_free, + u16 new_links, u16 dormant_links) +{ + u16 old_links = sdata->vif.valid_links; + u16 old_active = sdata->vif.active_links; + unsigned long add = new_links & ~old_links; + unsigned long rem = old_links & ~new_links; + unsigned int link_id; + int ret; + struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS] = {}, *link; + struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]; + struct ieee80211_link_data *old_data[IEEE80211_MLD_MAX_NUM_LINKS]; + bool use_deflink = old_links == 0; /* set for error case */ + + sdata_assert_lock(sdata); + + memset(to_free, 0, sizeof(links)); + + if (old_links == new_links && dormant_links == sdata->vif.dormant_links) + return 0; + + /* if there were no old links, need to clear the pointers to deflink */ + if (!old_links) + rem |= BIT(0); + + /* allocate new link structures first */ + for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + ret = -ENOMEM; + goto free; + } + links[link_id] = link; + } + + /* keep track of the old pointers for the driver */ + BUILD_BUG_ON(sizeof(old) != sizeof(sdata->vif.link_conf)); + memcpy(old, sdata->vif.link_conf, sizeof(old)); + /* and for us in error cases */ + BUILD_BUG_ON(sizeof(old_data) != sizeof(sdata->link)); + memcpy(old_data, sdata->link, sizeof(old_data)); + + /* grab old links to free later */ + for_each_set_bit(link_id, &rem, IEEE80211_MLD_MAX_NUM_LINKS) { + if (rcu_access_pointer(sdata->link[link_id]) != &sdata->deflink) { + /* + * we must have allocated the data through this path so + * we know we can free both at the same time + */ + to_free[link_id] = container_of(rcu_access_pointer(sdata->link[link_id]), + typeof(*links[link_id]), + data); + } + + RCU_INIT_POINTER(sdata->link[link_id], NULL); + RCU_INIT_POINTER(sdata->vif.link_conf[link_id], NULL); + } + + /* link them into data structures */ + for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { + WARN_ON(!use_deflink && + rcu_access_pointer(sdata->link[link_id]) == &sdata->deflink); + + link = links[link_id]; + ieee80211_link_init(sdata, link_id, &link->data, &link->conf); + ieee80211_link_setup(&link->data); + } + + if (new_links == 0) + ieee80211_link_init(sdata, -1, &sdata->deflink, + &sdata->vif.bss_conf); + + ret = ieee80211_check_dup_link_addrs(sdata); + if (!ret) { + /* for keys we will not be able to undo this */ + ieee80211_tear_down_links(sdata, to_free, rem); + + ieee80211_set_vif_links_bitmaps(sdata, new_links, dormant_links); + + /* tell the driver */ + ret = drv_change_vif_links(sdata->local, sdata, + old_links & old_active, + new_links & sdata->vif.active_links, + old); + } + + if (ret) { + /* restore config */ + memcpy(sdata->link, old_data, sizeof(old_data)); + memcpy(sdata->vif.link_conf, old, sizeof(old)); + ieee80211_set_vif_links_bitmaps(sdata, old_links, dormant_links); + /* and free (only) the newly allocated links */ + memset(to_free, 0, sizeof(links)); + goto free; + } + + /* use deflink/bss_conf again if and only if there are no more links */ + use_deflink = new_links == 0; + + goto deinit; +free: + /* if we failed during allocation, only free all */ + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { + kfree(links[link_id]); + links[link_id] = NULL; + } +deinit: + if (use_deflink) + ieee80211_link_init(sdata, -1, &sdata->deflink, + &sdata->vif.bss_conf); + return ret; +} + +int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata, + u16 new_links, u16 dormant_links) +{ + struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS]; + int ret; + + ret = ieee80211_vif_update_links(sdata, links, new_links, + dormant_links); + ieee80211_free_links(sdata, links); + + return ret; +} + +void ieee80211_vif_clear_links(struct ieee80211_sub_if_data *sdata) +{ + struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS]; + + /* + * The locking here is different because when we free links + * in the station case we need to be able to cancel_work_sync() + * something that also takes the lock. + */ + + sdata_lock(sdata); + ieee80211_vif_update_links(sdata, links, 0, 0); + sdata_unlock(sdata); + + ieee80211_free_links(sdata, links); +} + +static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata, + u16 active_links) +{ + struct ieee80211_bss_conf *link_confs[IEEE80211_MLD_MAX_NUM_LINKS]; + struct ieee80211_local *local = sdata->local; + u16 old_active = sdata->vif.active_links; + unsigned long rem = old_active & ~active_links; + unsigned long add = active_links & ~old_active; + struct sta_info *sta; + unsigned int link_id; + int ret, i; + + if (!ieee80211_sdata_running(sdata)) + return -ENETDOWN; + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return -EINVAL; + + if (active_links & ~ieee80211_vif_usable_links(&sdata->vif)) + return -EINVAL; + + /* nothing to do */ + if (old_active == active_links) + return 0; + + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) + link_confs[i] = sdata_dereference(sdata->vif.link_conf[i], + sdata); + + if (add) { + sdata->vif.active_links |= active_links; + ret = drv_change_vif_links(local, sdata, + old_active, + sdata->vif.active_links, + link_confs); + if (ret) { + sdata->vif.active_links = old_active; + return ret; + } + } + + for_each_set_bit(link_id, &rem, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_link_data *link; + + link = sdata_dereference(sdata->link[link_id], sdata); + + /* FIXME: kill TDLS connections on the link */ + + ieee80211_link_release_channel(link); + } + + list_for_each_entry(sta, &local->sta_list, list) { + if (sdata != sta->sdata) + continue; + + /* this is very temporary, but do it anyway */ + __ieee80211_sta_recalc_aggregates(sta, + old_active | active_links); + + ret = drv_change_sta_links(local, sdata, &sta->sta, + old_active, + old_active | active_links); + WARN_ON_ONCE(ret); + } + + ret = ieee80211_key_switch_links(sdata, rem, add); + WARN_ON_ONCE(ret); + + list_for_each_entry(sta, &local->sta_list, list) { + if (sdata != sta->sdata) + continue; + + __ieee80211_sta_recalc_aggregates(sta, active_links); + + ret = drv_change_sta_links(local, sdata, &sta->sta, + old_active | active_links, + active_links); + WARN_ON_ONCE(ret); + + /* + * Do it again, just in case - the driver might very + * well have called ieee80211_sta_recalc_aggregates() + * from there when filling in the new links, which + * would set it wrong since the vif's active links are + * not switched yet... + */ + __ieee80211_sta_recalc_aggregates(sta, active_links); + } + + for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_link_data *link; + + link = sdata_dereference(sdata->link[link_id], sdata); + + ret = ieee80211_link_use_channel(link, &link->conf->chandef, + IEEE80211_CHANCTX_SHARED); + WARN_ON_ONCE(ret); + + ieee80211_mgd_set_link_qos_params(link); + ieee80211_link_info_change_notify(sdata, link, + BSS_CHANGED_ERP_CTS_PROT | + BSS_CHANGED_ERP_PREAMBLE | + BSS_CHANGED_ERP_SLOT | + BSS_CHANGED_HT | + BSS_CHANGED_BASIC_RATES | + BSS_CHANGED_BSSID | + BSS_CHANGED_CQM | + BSS_CHANGED_QOS | + BSS_CHANGED_TXPOWER | + BSS_CHANGED_BANDWIDTH | + BSS_CHANGED_TWT | + BSS_CHANGED_HE_OBSS_PD | + BSS_CHANGED_HE_BSS_COLOR); + } + + old_active = sdata->vif.active_links; + sdata->vif.active_links = active_links; + + if (rem) { + ret = drv_change_vif_links(local, sdata, old_active, + active_links, link_confs); + WARN_ON_ONCE(ret); + } + + return 0; +} + +int __ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + u16 old_active; + int ret; + + sdata_assert_lock(sdata); + mutex_lock(&local->sta_mtx); + mutex_lock(&local->mtx); + mutex_lock(&local->key_mtx); + old_active = sdata->vif.active_links; + if (old_active & active_links) { + /* + * if there's at least one link that stays active across + * the change then switch to it (to those) first, and + * then enable the additional links + */ + ret = _ieee80211_set_active_links(sdata, + old_active & active_links); + if (!ret) + ret = _ieee80211_set_active_links(sdata, active_links); + } else { + /* otherwise switch directly */ + ret = _ieee80211_set_active_links(sdata, active_links); + } + mutex_unlock(&local->key_mtx); + mutex_unlock(&local->mtx); + mutex_unlock(&local->sta_mtx); + + return ret; +} + +int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + int ret; + + sdata_lock(sdata); + ret = __ieee80211_set_active_links(vif, active_links); + sdata_unlock(sdata); + + return ret; +} +EXPORT_SYMBOL_GPL(ieee80211_set_active_links); + +void ieee80211_set_active_links_async(struct ieee80211_vif *vif, + u16 active_links) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + if (!ieee80211_sdata_running(sdata)) + return; + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return; + + if (active_links & ~ieee80211_vif_usable_links(&sdata->vif)) + return; + + /* nothing to do */ + if (sdata->vif.active_links == active_links) + return; + + sdata->desired_active_links = active_links; + schedule_work(&sdata->activate_links_work); +} +EXPORT_SYMBOL_GPL(ieee80211_set_active_links_async); diff --git a/net/mac80211/main.c b/net/mac80211/main.c new file mode 100644 index 0000000000..4548f84451 --- /dev/null +++ b/net/mac80211/main.c @@ -0,0 +1,1616 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2018-2023 Intel Corporation + */ + +#include <net/mac80211.h> +#include <linux/module.h> +#include <linux/fips.h> +#include <linux/init.h> +#include <linux/netdevice.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/skbuff.h> +#include <linux/etherdevice.h> +#include <linux/if_arp.h> +#include <linux/rtnetlink.h> +#include <linux/bitmap.h> +#include <linux/inetdevice.h> +#include <net/net_namespace.h> +#include <net/dropreason.h> +#include <net/cfg80211.h> +#include <net/addrconf.h> + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" +#include "mesh.h" +#include "wep.h" +#include "led.h" +#include "debugfs.h" + +void ieee80211_configure_filter(struct ieee80211_local *local) +{ + u64 mc; + unsigned int changed_flags; + unsigned int new_flags = 0; + + if (atomic_read(&local->iff_allmultis)) + new_flags |= FIF_ALLMULTI; + + if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning) || + test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) + new_flags |= FIF_BCN_PRBRESP_PROMISC; + + if (local->fif_probe_req || local->probe_req_reg) + new_flags |= FIF_PROBE_REQ; + + if (local->fif_fcsfail) + new_flags |= FIF_FCSFAIL; + + if (local->fif_plcpfail) + new_flags |= FIF_PLCPFAIL; + + if (local->fif_control) + new_flags |= FIF_CONTROL; + + if (local->fif_other_bss) + new_flags |= FIF_OTHER_BSS; + + if (local->fif_pspoll) + new_flags |= FIF_PSPOLL; + + if (local->rx_mcast_action_reg) + new_flags |= FIF_MCAST_ACTION; + + spin_lock_bh(&local->filter_lock); + changed_flags = local->filter_flags ^ new_flags; + + mc = drv_prepare_multicast(local, &local->mc_list); + spin_unlock_bh(&local->filter_lock); + + /* be a bit nasty */ + new_flags |= (1<<31); + + drv_configure_filter(local, changed_flags, &new_flags, mc); + + WARN_ON(new_flags & (1<<31)); + + local->filter_flags = new_flags & ~(1<<31); +} + +static void ieee80211_reconfig_filter(struct work_struct *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, reconfig_filter); + + ieee80211_configure_filter(local); +} + +static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + struct cfg80211_chan_def chandef = {}; + u32 changed = 0; + int power; + u32 offchannel_flag; + + offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; + + if (local->scan_chandef.chan) { + chandef = local->scan_chandef; + } else if (local->tmp_channel) { + chandef.chan = local->tmp_channel; + chandef.width = NL80211_CHAN_WIDTH_20_NOHT; + chandef.center_freq1 = chandef.chan->center_freq; + chandef.freq1_offset = chandef.chan->freq_offset; + } else + chandef = local->_oper_chandef; + + WARN(!cfg80211_chandef_valid(&chandef), + "control:%d.%03d MHz width:%d center: %d.%03d/%d MHz", + chandef.chan->center_freq, chandef.chan->freq_offset, + chandef.width, chandef.center_freq1, chandef.freq1_offset, + chandef.center_freq2); + + if (!cfg80211_chandef_identical(&chandef, &local->_oper_chandef)) + local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL; + else + local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL; + + offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; + + if (offchannel_flag || + !cfg80211_chandef_identical(&local->hw.conf.chandef, + &local->_oper_chandef)) { + local->hw.conf.chandef = chandef; + changed |= IEEE80211_CONF_CHANGE_CHANNEL; + } + + if (!conf_is_ht(&local->hw.conf)) { + /* + * mac80211.h documents that this is only valid + * when the channel is set to an HT type, and + * that otherwise STATIC is used. + */ + local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC; + } else if (local->hw.conf.smps_mode != local->smps_mode) { + local->hw.conf.smps_mode = local->smps_mode; + changed |= IEEE80211_CONF_CHANGE_SMPS; + } + + power = ieee80211_chandef_max_power(&chandef); + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (!rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf)) + continue; + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + continue; + if (sdata->vif.bss_conf.txpower == INT_MIN) + continue; + power = min(power, sdata->vif.bss_conf.txpower); + } + rcu_read_unlock(); + + if (local->hw.conf.power_level != power) { + changed |= IEEE80211_CONF_CHANGE_POWER; + local->hw.conf.power_level = power; + } + + return changed; +} + +int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) +{ + int ret = 0; + + might_sleep(); + + if (!local->use_chanctx) + changed |= ieee80211_hw_conf_chan(local); + else + changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL | + IEEE80211_CONF_CHANGE_POWER | + IEEE80211_CONF_CHANGE_SMPS); + + if (changed && local->open_count) { + ret = drv_config(local, changed); + /* + * Goal: + * HW reconfiguration should never fail, the driver has told + * us what it can support so it should live up to that promise. + * + * Current status: + * rfkill is not integrated with mac80211 and a + * configuration command can thus fail if hardware rfkill + * is enabled + * + * FIXME: integrate rfkill with mac80211 and then add this + * WARN_ON() back + * + */ + /* WARN_ON(ret); */ + } + + return ret; +} + +#define BSS_CHANGED_VIF_CFG_FLAGS (BSS_CHANGED_ASSOC |\ + BSS_CHANGED_IDLE |\ + BSS_CHANGED_PS |\ + BSS_CHANGED_IBSS |\ + BSS_CHANGED_ARP_FILTER |\ + BSS_CHANGED_SSID) + +void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, + u64 changed) +{ + struct ieee80211_local *local = sdata->local; + + might_sleep(); + + if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + return; + + if (WARN_ON_ONCE(changed & (BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED) && + sdata->vif.type != NL80211_IFTYPE_AP && + sdata->vif.type != NL80211_IFTYPE_ADHOC && + sdata->vif.type != NL80211_IFTYPE_MESH_POINT && + sdata->vif.type != NL80211_IFTYPE_OCB)) + return; + + if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE || + sdata->vif.type == NL80211_IFTYPE_NAN || + (sdata->vif.type == NL80211_IFTYPE_MONITOR && + !sdata->vif.bss_conf.mu_mimo_owner && + !(changed & BSS_CHANGED_TXPOWER)))) + return; + + if (!check_sdata_in_driver(sdata)) + return; + + if (changed & BSS_CHANGED_VIF_CFG_FLAGS) { + u64 ch = changed & BSS_CHANGED_VIF_CFG_FLAGS; + + trace_drv_vif_cfg_changed(local, sdata, changed); + if (local->ops->vif_cfg_changed) + local->ops->vif_cfg_changed(&local->hw, &sdata->vif, ch); + } + + if (changed & ~BSS_CHANGED_VIF_CFG_FLAGS) { + u64 ch = changed & ~BSS_CHANGED_VIF_CFG_FLAGS; + + /* FIXME: should be for each link */ + trace_drv_link_info_changed(local, sdata, &sdata->vif.bss_conf, + changed); + if (local->ops->link_info_changed) + local->ops->link_info_changed(&local->hw, &sdata->vif, + &sdata->vif.bss_conf, ch); + } + + if (local->ops->bss_info_changed) + local->ops->bss_info_changed(&local->hw, &sdata->vif, + &sdata->vif.bss_conf, changed); + trace_drv_return_void(local); +} + +void ieee80211_vif_cfg_change_notify(struct ieee80211_sub_if_data *sdata, + u64 changed) +{ + struct ieee80211_local *local = sdata->local; + + WARN_ON_ONCE(changed & ~BSS_CHANGED_VIF_CFG_FLAGS); + + if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + return; + + drv_vif_cfg_changed(local, sdata, changed); +} + +void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link, + u64 changed) +{ + struct ieee80211_local *local = sdata->local; + + WARN_ON_ONCE(changed & BSS_CHANGED_VIF_CFG_FLAGS); + + if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + return; + + if (!check_sdata_in_driver(sdata)) + return; + + drv_link_info_changed(local, sdata, link->conf, link->link_id, changed); +} + +u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) +{ + sdata->vif.bss_conf.use_cts_prot = false; + sdata->vif.bss_conf.use_short_preamble = false; + sdata->vif.bss_conf.use_short_slot = false; + return BSS_CHANGED_ERP_CTS_PROT | + BSS_CHANGED_ERP_PREAMBLE | + BSS_CHANGED_ERP_SLOT; +} + +static void ieee80211_tasklet_handler(struct tasklet_struct *t) +{ + struct ieee80211_local *local = from_tasklet(local, t, tasklet); + struct sk_buff *skb; + + while ((skb = skb_dequeue(&local->skb_queue)) || + (skb = skb_dequeue(&local->skb_queue_unreliable))) { + switch (skb->pkt_type) { + case IEEE80211_RX_MSG: + /* Clear skb->pkt_type in order to not confuse kernel + * netstack. */ + skb->pkt_type = 0; + ieee80211_rx(&local->hw, skb); + break; + case IEEE80211_TX_STATUS_MSG: + skb->pkt_type = 0; + ieee80211_tx_status(&local->hw, skb); + break; + default: + WARN(1, "mac80211: Packet is of unknown type %d\n", + skb->pkt_type); + dev_kfree_skb(skb); + break; + } + } +} + +static void ieee80211_restart_work(struct work_struct *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, restart_work); + struct ieee80211_sub_if_data *sdata; + int ret; + + flush_workqueue(local->workqueue); + + rtnl_lock(); + /* we might do interface manipulations, so need both */ + wiphy_lock(local->hw.wiphy); + + WARN(test_bit(SCAN_HW_SCANNING, &local->scanning), + "%s called with hardware scan in progress\n", __func__); + + list_for_each_entry(sdata, &local->interfaces, list) { + /* + * XXX: there may be more work for other vif types and even + * for station mode: a good thing would be to run most of + * the iface type's dependent _stop (ieee80211_mg_stop, + * ieee80211_ibss_stop) etc... + * For now, fix only the specific bug that was seen: race + * between csa_connection_drop_work and us. + */ + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + /* + * This worker is scheduled from the iface worker that + * runs on mac80211's workqueue, so we can't be + * scheduling this worker after the cancel right here. + * The exception is ieee80211_chswitch_done. + * Then we can have a race... + */ + wiphy_work_cancel(local->hw.wiphy, + &sdata->u.mgd.csa_connection_drop_work); + if (sdata->vif.bss_conf.csa_active) { + sdata_lock(sdata); + ieee80211_sta_connection_lost(sdata, + WLAN_REASON_UNSPECIFIED, + false); + sdata_unlock(sdata); + } + } + flush_delayed_work(&sdata->dec_tailroom_needed_wk); + } + ieee80211_scan_cancel(local); + + /* make sure any new ROC will consider local->in_reconfig */ + wiphy_delayed_work_flush(local->hw.wiphy, &local->roc_work); + wiphy_work_flush(local->hw.wiphy, &local->hw_roc_done); + + /* wait for all packet processing to be done */ + synchronize_net(); + + ret = ieee80211_reconfig(local); + wiphy_unlock(local->hw.wiphy); + + if (ret) + cfg80211_shutdown_all_interfaces(local->hw.wiphy); + + rtnl_unlock(); +} + +void ieee80211_restart_hw(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + trace_api_restart_hw(local); + + wiphy_info(hw->wiphy, + "Hardware restart was requested\n"); + + /* use this reason, ieee80211_reconfig will unblock it */ + ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, + IEEE80211_QUEUE_STOP_REASON_SUSPEND, + false); + + /* + * Stop all Rx during the reconfig. We don't want state changes + * or driver callbacks while this is in progress. + */ + local->in_reconfig = true; + barrier(); + + queue_work(system_freezable_wq, &local->restart_work); +} +EXPORT_SYMBOL(ieee80211_restart_hw); + +#ifdef CONFIG_INET +static int ieee80211_ifa_changed(struct notifier_block *nb, + unsigned long data, void *arg) +{ + struct in_ifaddr *ifa = arg; + struct ieee80211_local *local = + container_of(nb, struct ieee80211_local, + ifa_notifier); + struct net_device *ndev = ifa->ifa_dev->dev; + struct wireless_dev *wdev = ndev->ieee80211_ptr; + struct in_device *idev; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_vif_cfg *vif_cfg; + struct ieee80211_if_managed *ifmgd; + int c = 0; + + /* Make sure it's our interface that got changed */ + if (!wdev) + return NOTIFY_DONE; + + if (wdev->wiphy != local->hw.wiphy) + return NOTIFY_DONE; + + sdata = IEEE80211_DEV_TO_SUB_IF(ndev); + vif_cfg = &sdata->vif.cfg; + + /* ARP filtering is only supported in managed mode */ + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return NOTIFY_DONE; + + idev = __in_dev_get_rtnl(sdata->dev); + if (!idev) + return NOTIFY_DONE; + + ifmgd = &sdata->u.mgd; + sdata_lock(sdata); + + /* Copy the addresses to the vif config list */ + ifa = rtnl_dereference(idev->ifa_list); + while (ifa) { + if (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN) + vif_cfg->arp_addr_list[c] = ifa->ifa_address; + ifa = rtnl_dereference(ifa->ifa_next); + c++; + } + + vif_cfg->arp_addr_cnt = c; + + /* Configure driver only if associated (which also implies it is up) */ + if (ifmgd->associated) + ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_ARP_FILTER); + + sdata_unlock(sdata); + + return NOTIFY_OK; +} +#endif + +#if IS_ENABLED(CONFIG_IPV6) +static int ieee80211_ifa6_changed(struct notifier_block *nb, + unsigned long data, void *arg) +{ + struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)arg; + struct inet6_dev *idev = ifa->idev; + struct net_device *ndev = ifa->idev->dev; + struct ieee80211_local *local = + container_of(nb, struct ieee80211_local, ifa6_notifier); + struct wireless_dev *wdev = ndev->ieee80211_ptr; + struct ieee80211_sub_if_data *sdata; + + /* Make sure it's our interface that got changed */ + if (!wdev || wdev->wiphy != local->hw.wiphy) + return NOTIFY_DONE; + + sdata = IEEE80211_DEV_TO_SUB_IF(ndev); + + /* + * For now only support station mode. This is mostly because + * doing AP would have to handle AP_VLAN in some way ... + */ + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return NOTIFY_DONE; + + drv_ipv6_addr_change(local, sdata, idev); + + return NOTIFY_OK; +} +#endif + +/* There isn't a lot of sense in it, but you can transmit anything you like */ +static const struct ieee80211_txrx_stypes +ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { + [NL80211_IFTYPE_ADHOC] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4), + }, + [NL80211_IFTYPE_STATION] = { + .tx = 0xffff, + /* + * To support Pre Association Security Negotiation (PASN) while + * already associated to one AP, allow user space to register to + * Rx authentication frames, so that the user space logic would + * be able to receive/handle authentication frames from a + * different AP as part of PASN. + * It is expected that user space would intelligently register + * for Rx authentication frames, i.e., only when PASN is used + * and configure a match filter only for PASN authentication + * algorithm, as otherwise the MLME functionality of mac80211 + * would be broken. + */ + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4), + }, + [NL80211_IFTYPE_AP] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4), + }, + [NL80211_IFTYPE_AP_VLAN] = { + /* copy AP */ + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4), + }, + [NL80211_IFTYPE_P2P_CLIENT] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4), + }, + [NL80211_IFTYPE_P2P_GO] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4), + }, + [NL80211_IFTYPE_MESH_POINT] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4), + }, + [NL80211_IFTYPE_P2P_DEVICE] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4), + }, +}; + +static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = { + .ampdu_params_info = IEEE80211_HT_AMPDU_PARM_FACTOR | + IEEE80211_HT_AMPDU_PARM_DENSITY, + + .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_MAX_AMSDU | + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40 | + IEEE80211_HT_CAP_TX_STBC | + IEEE80211_HT_CAP_RX_STBC | + IEEE80211_HT_CAP_LDPC_CODING | + IEEE80211_HT_CAP_40MHZ_INTOLERANT), + .mcs = { + .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, }, + }, +}; + +static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = { + .vht_cap_info = + cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | + IEEE80211_VHT_CAP_SHORT_GI_80 | + IEEE80211_VHT_CAP_SHORT_GI_160 | + IEEE80211_VHT_CAP_RXSTBC_MASK | + IEEE80211_VHT_CAP_TXSTBC | + IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN | + IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK), + .supp_mcs = { + .rx_mcs_map = cpu_to_le16(~0), + .tx_mcs_map = cpu_to_le16(~0), + }, +}; + +struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, + const struct ieee80211_ops *ops, + const char *requested_name) +{ + struct ieee80211_local *local; + int priv_size, i; + struct wiphy *wiphy; + bool use_chanctx; + + if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config || + !ops->add_interface || !ops->remove_interface || + !ops->configure_filter || !ops->wake_tx_queue)) + return NULL; + + if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove))) + return NULL; + + if (WARN_ON(!!ops->link_info_changed != !!ops->vif_cfg_changed || + (ops->link_info_changed && ops->bss_info_changed))) + return NULL; + + /* check all or no channel context operations exist */ + i = !!ops->add_chanctx + !!ops->remove_chanctx + + !!ops->change_chanctx + !!ops->assign_vif_chanctx + + !!ops->unassign_vif_chanctx; + if (WARN_ON(i != 0 && i != 5)) + return NULL; + use_chanctx = i == 5; + + /* Ensure 32-byte alignment of our private data and hw private data. + * We use the wiphy priv data for both our ieee80211_local and for + * the driver's private data + * + * In memory it'll be like this: + * + * +-------------------------+ + * | struct wiphy | + * +-------------------------+ + * | struct ieee80211_local | + * +-------------------------+ + * | driver's private data | + * +-------------------------+ + * + */ + priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len; + + wiphy = wiphy_new_nm(&mac80211_config_ops, priv_size, requested_name); + + if (!wiphy) + return NULL; + + wiphy->mgmt_stypes = ieee80211_default_mgmt_stypes; + + wiphy->privid = mac80211_wiphy_privid; + + wiphy->flags |= WIPHY_FLAG_NETNS_OK | + WIPHY_FLAG_4ADDR_AP | + WIPHY_FLAG_4ADDR_STATION | + WIPHY_FLAG_REPORTS_OBSS | + WIPHY_FLAG_OFFCHAN_TX; + + if (!use_chanctx || ops->remain_on_channel) + wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + + wiphy->features |= NL80211_FEATURE_SK_TX_STATUS | + NL80211_FEATURE_SAE | + NL80211_FEATURE_HT_IBSS | + NL80211_FEATURE_VIF_TXPOWER | + NL80211_FEATURE_MAC_ON_CREATE | + NL80211_FEATURE_USERSPACE_MPM | + NL80211_FEATURE_FULL_AP_CLIENT_STATE; + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_STA); + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211); + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH); + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS); + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_SCAN_FREQ_KHZ); + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE); + + if (!ops->hw_scan) { + wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | + NL80211_FEATURE_AP_SCAN; + /* + * if the driver behaves correctly using the probe request + * (template) from mac80211, then both of these should be + * supported even with hw scan - but let drivers opt in. + */ + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_SCAN_RANDOM_SN); + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT); + } + + if (!ops->set_key) + wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_TXQS); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_RRM); + + wiphy->bss_priv_size = sizeof(struct ieee80211_bss); + + local = wiphy_priv(wiphy); + + if (sta_info_init(local)) + goto err_free; + + local->hw.wiphy = wiphy; + + local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); + + local->ops = ops; + local->use_chanctx = use_chanctx; + + /* + * We need a bit of data queued to build aggregates properly, so + * instruct the TCP stack to allow more than a single ms of data + * to be queued in the stack. The value is a bit-shift of 1 + * second, so 7 is ~8ms of queued data. Only affects local TCP + * sockets. + * This is the default, anyhow - drivers may need to override it + * for local reasons (longer buffers, longer completion time, or + * similar). + */ + local->hw.tx_sk_pacing_shift = 7; + + /* set up some defaults */ + local->hw.queues = 1; + local->hw.max_rates = 1; + local->hw.max_report_rates = 0; + local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT; + local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT; + local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE; + local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; + local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; + local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS | + IEEE80211_RADIOTAP_MCS_HAVE_GI | + IEEE80211_RADIOTAP_MCS_HAVE_BW; + local->hw.radiotap_vht_details = IEEE80211_RADIOTAP_VHT_KNOWN_GI | + IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH; + local->hw.uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES; + local->hw.uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; + local->hw.max_mtu = IEEE80211_MAX_DATA_LEN; + local->user_power_level = IEEE80211_UNSET_POWER_LEVEL; + wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; + wiphy->vht_capa_mod_mask = &mac80211_vht_capa_mod_mask; + + local->ext_capa[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF; + + wiphy->extended_capabilities = local->ext_capa; + wiphy->extended_capabilities_mask = local->ext_capa; + wiphy->extended_capabilities_len = + ARRAY_SIZE(local->ext_capa); + + INIT_LIST_HEAD(&local->interfaces); + INIT_LIST_HEAD(&local->mon_list); + + __hw_addr_init(&local->mc_list); + + mutex_init(&local->iflist_mtx); + mutex_init(&local->mtx); + + mutex_init(&local->key_mtx); + spin_lock_init(&local->filter_lock); + spin_lock_init(&local->rx_path_lock); + spin_lock_init(&local->queue_stop_reason_lock); + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + INIT_LIST_HEAD(&local->active_txqs[i]); + spin_lock_init(&local->active_txq_lock[i]); + local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L; + local->aql_txq_limit_high[i] = + IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H; + atomic_set(&local->aql_ac_pending_airtime[i], 0); + } + + local->airtime_flags = AIRTIME_USE_TX | AIRTIME_USE_RX; + local->aql_threshold = IEEE80211_AQL_THRESHOLD; + atomic_set(&local->aql_total_pending_airtime, 0); + + spin_lock_init(&local->handle_wake_tx_queue_lock); + + INIT_LIST_HEAD(&local->chanctx_list); + mutex_init(&local->chanctx_mtx); + + wiphy_delayed_work_init(&local->scan_work, ieee80211_scan_work); + + INIT_WORK(&local->restart_work, ieee80211_restart_work); + + wiphy_work_init(&local->radar_detected_work, + ieee80211_dfs_radar_detected_work); + + INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); + local->smps_mode = IEEE80211_SMPS_OFF; + + INIT_WORK(&local->dynamic_ps_enable_work, + ieee80211_dynamic_ps_enable_work); + INIT_WORK(&local->dynamic_ps_disable_work, + ieee80211_dynamic_ps_disable_work); + timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0); + + wiphy_work_init(&local->sched_scan_stopped_work, + ieee80211_sched_scan_stopped_work); + + spin_lock_init(&local->ack_status_lock); + idr_init(&local->ack_status_frames); + + for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { + skb_queue_head_init(&local->pending[i]); + atomic_set(&local->agg_queue_stop[i], 0); + } + tasklet_setup(&local->tx_pending_tasklet, ieee80211_tx_pending); + tasklet_setup(&local->wake_txqs_tasklet, ieee80211_wake_txqs); + tasklet_setup(&local->tasklet, ieee80211_tasklet_handler); + + skb_queue_head_init(&local->skb_queue); + skb_queue_head_init(&local->skb_queue_unreliable); + + ieee80211_alloc_led_names(local); + + ieee80211_roc_setup(local); + + local->hw.radiotap_timestamp.units_pos = -1; + local->hw.radiotap_timestamp.accuracy = -1; + + return &local->hw; + err_free: + wiphy_free(wiphy); + return NULL; +} +EXPORT_SYMBOL(ieee80211_alloc_hw_nm); + +static int ieee80211_init_cipher_suites(struct ieee80211_local *local) +{ + bool have_wep = !fips_enabled; /* FIPS does not permit the use of RC4 */ + bool have_mfp = ieee80211_hw_check(&local->hw, MFP_CAPABLE); + int r = 0, w = 0; + u32 *suites; + static const u32 cipher_suites[] = { + /* keep WEP first, it may be removed below */ + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + WLAN_CIPHER_SUITE_CCMP_256, + WLAN_CIPHER_SUITE_GCMP, + WLAN_CIPHER_SUITE_GCMP_256, + + /* keep last -- depends on hw flags! */ + WLAN_CIPHER_SUITE_AES_CMAC, + WLAN_CIPHER_SUITE_BIP_CMAC_256, + WLAN_CIPHER_SUITE_BIP_GMAC_128, + WLAN_CIPHER_SUITE_BIP_GMAC_256, + }; + + if (ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL) || + local->hw.wiphy->cipher_suites) { + /* If the driver advertises, or doesn't support SW crypto, + * we only need to remove WEP if necessary. + */ + if (have_wep) + return 0; + + /* well if it has _no_ ciphers ... fine */ + if (!local->hw.wiphy->n_cipher_suites) + return 0; + + /* Driver provides cipher suites, but we need to exclude WEP */ + suites = kmemdup(local->hw.wiphy->cipher_suites, + sizeof(u32) * local->hw.wiphy->n_cipher_suites, + GFP_KERNEL); + if (!suites) + return -ENOMEM; + + for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) { + u32 suite = local->hw.wiphy->cipher_suites[r]; + + if (suite == WLAN_CIPHER_SUITE_WEP40 || + suite == WLAN_CIPHER_SUITE_WEP104) + continue; + suites[w++] = suite; + } + } else { + /* assign the (software supported and perhaps offloaded) + * cipher suites + */ + local->hw.wiphy->cipher_suites = cipher_suites; + local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); + + if (!have_mfp) + local->hw.wiphy->n_cipher_suites -= 4; + + if (!have_wep) { + local->hw.wiphy->cipher_suites += 2; + local->hw.wiphy->n_cipher_suites -= 2; + } + + /* not dynamically allocated, so just return */ + return 0; + } + + local->hw.wiphy->cipher_suites = suites; + local->hw.wiphy->n_cipher_suites = w; + local->wiphy_ciphers_allocated = true; + + return 0; +} + +int ieee80211_register_hw(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + int result, i; + enum nl80211_band band; + int channels, max_bitrates; + bool supp_ht, supp_vht, supp_he, supp_eht; + struct cfg80211_chan_def dflt_chandef = {}; + + if (ieee80211_hw_check(hw, QUEUE_CONTROL) && + (local->hw.offchannel_tx_hw_queue == IEEE80211_INVAL_HW_QUEUE || + local->hw.offchannel_tx_hw_queue >= local->hw.queues)) + return -EINVAL; + + if ((hw->wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH) && + (!local->ops->tdls_channel_switch || + !local->ops->tdls_cancel_channel_switch || + !local->ops->tdls_recv_channel_switch)) + return -EOPNOTSUPP; + + if (WARN_ON(ieee80211_hw_check(hw, SUPPORTS_TX_FRAG) && + !local->ops->set_frag_threshold)) + return -EINVAL; + + if (WARN_ON(local->hw.wiphy->interface_modes & + BIT(NL80211_IFTYPE_NAN) && + (!local->ops->start_nan || !local->ops->stop_nan))) + return -EINVAL; + + if (hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO) { + /* + * For drivers capable of doing MLO, assume modern driver + * or firmware facilities, so software doesn't have to do + * as much, e.g. monitoring beacons would be hard if we + * might not even know which link is active at which time. + */ + if (WARN_ON(!local->use_chanctx)) + return -EINVAL; + + if (WARN_ON(!local->ops->link_info_changed)) + return -EINVAL; + + if (WARN_ON(!ieee80211_hw_check(hw, HAS_RATE_CONTROL))) + return -EINVAL; + + if (WARN_ON(!ieee80211_hw_check(hw, AMPDU_AGGREGATION))) + return -EINVAL; + + if (WARN_ON(ieee80211_hw_check(hw, HOST_BROADCAST_PS_BUFFERING))) + return -EINVAL; + + if (WARN_ON(ieee80211_hw_check(hw, SUPPORTS_PS) && + (!ieee80211_hw_check(hw, SUPPORTS_DYNAMIC_PS) || + ieee80211_hw_check(hw, PS_NULLFUNC_STACK)))) + return -EINVAL; + + if (WARN_ON(!ieee80211_hw_check(hw, MFP_CAPABLE))) + return -EINVAL; + + if (WARN_ON(!ieee80211_hw_check(hw, CONNECTION_MONITOR))) + return -EINVAL; + + if (WARN_ON(ieee80211_hw_check(hw, NEED_DTIM_BEFORE_ASSOC))) + return -EINVAL; + + if (WARN_ON(ieee80211_hw_check(hw, TIMING_BEACON_ONLY))) + return -EINVAL; + + if (WARN_ON(!ieee80211_hw_check(hw, AP_LINK_PS))) + return -EINVAL; + + if (WARN_ON(ieee80211_hw_check(hw, DEAUTH_NEED_MGD_TX_PREP))) + return -EINVAL; + } + +#ifdef CONFIG_PM + if (hw->wiphy->wowlan && (!local->ops->suspend || !local->ops->resume)) + return -EINVAL; +#endif + + if (!local->use_chanctx) { + for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) { + const struct ieee80211_iface_combination *comb; + + comb = &local->hw.wiphy->iface_combinations[i]; + + if (comb->num_different_channels > 1) + return -EINVAL; + } + } else { + /* DFS is not supported with multi-channel combinations yet */ + for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) { + const struct ieee80211_iface_combination *comb; + + comb = &local->hw.wiphy->iface_combinations[i]; + + if (comb->radar_detect_widths && + comb->num_different_channels > 1) + return -EINVAL; + } + } + + /* Only HW csum features are currently compatible with mac80211 */ + if (WARN_ON(hw->netdev_features & ~MAC80211_SUPPORTED_FEATURES)) + return -EINVAL; + + if (hw->max_report_rates == 0) + hw->max_report_rates = hw->max_rates; + + local->rx_chains = 1; + + /* + * generic code guarantees at least one band, + * set this very early because much code assumes + * that hw.conf.channel is assigned + */ + channels = 0; + max_bitrates = 0; + supp_ht = false; + supp_vht = false; + supp_he = false; + supp_eht = false; + for (band = 0; band < NUM_NL80211_BANDS; band++) { + struct ieee80211_supported_band *sband; + + sband = local->hw.wiphy->bands[band]; + if (!sband) + continue; + + if (!dflt_chandef.chan) { + /* + * Assign the first enabled channel to dflt_chandef + * from the list of channels + */ + for (i = 0; i < sband->n_channels; i++) + if (!(sband->channels[i].flags & + IEEE80211_CHAN_DISABLED)) + break; + /* if none found then use the first anyway */ + if (i == sband->n_channels) + i = 0; + cfg80211_chandef_create(&dflt_chandef, + &sband->channels[i], + NL80211_CHAN_NO_HT); + /* init channel we're on */ + if (!local->use_chanctx && !local->_oper_chandef.chan) { + local->hw.conf.chandef = dflt_chandef; + local->_oper_chandef = dflt_chandef; + } + local->monitor_chandef = dflt_chandef; + } + + channels += sband->n_channels; + + /* + * Due to the way the aggregation code handles this and it + * being an HT capability, we can't really support delayed + * BA in MLO (yet). + */ + if (WARN_ON(sband->ht_cap.ht_supported && + (sband->ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA) && + hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)) + return -EINVAL; + + if (max_bitrates < sband->n_bitrates) + max_bitrates = sband->n_bitrates; + supp_ht = supp_ht || sband->ht_cap.ht_supported; + supp_vht = supp_vht || sband->vht_cap.vht_supported; + + for (i = 0; i < sband->n_iftype_data; i++) { + const struct ieee80211_sband_iftype_data *iftd; + + iftd = &sband->iftype_data[i]; + + supp_he = supp_he || iftd->he_cap.has_he; + supp_eht = supp_eht || iftd->eht_cap.has_eht; + } + + /* HT, VHT, HE require QoS, thus >= 4 queues */ + if (WARN_ON(local->hw.queues < IEEE80211_NUM_ACS && + (supp_ht || supp_vht || supp_he))) + return -EINVAL; + + /* EHT requires HE support */ + if (WARN_ON(supp_eht && !supp_he)) + return -EINVAL; + + if (!sband->ht_cap.ht_supported) + continue; + + /* TODO: consider VHT for RX chains, hopefully it's the same */ + local->rx_chains = + max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), + local->rx_chains); + + /* no need to mask, SM_PS_DISABLED has all bits set */ + sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED << + IEEE80211_HT_CAP_SM_PS_SHIFT; + } + + /* if low-level driver supports AP, we also support VLAN. + * drivers advertising SW_CRYPTO_CONTROL should enable AP_VLAN + * based on their support to transmit SW encrypted packets. + */ + if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP) && + !ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL)) { + hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); + hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN); + } + + /* mac80211 always supports monitor */ + hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); + hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR); + + /* mac80211 doesn't support more than one IBSS interface right now */ + for (i = 0; i < hw->wiphy->n_iface_combinations; i++) { + const struct ieee80211_iface_combination *c; + int j; + + c = &hw->wiphy->iface_combinations[i]; + + for (j = 0; j < c->n_limits; j++) + if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) && + c->limits[j].max > 1) + return -EINVAL; + } + + local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) + + sizeof(void *) * channels, GFP_KERNEL); + if (!local->int_scan_req) + return -ENOMEM; + + eth_broadcast_addr(local->int_scan_req->bssid); + + for (band = 0; band < NUM_NL80211_BANDS; band++) { + if (!local->hw.wiphy->bands[band]) + continue; + local->int_scan_req->rates[band] = (u32) -1; + } + +#ifndef CONFIG_MAC80211_MESH + /* mesh depends on Kconfig, but drivers should set it if they want */ + local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT); +#endif + + /* if the underlying driver supports mesh, mac80211 will (at least) + * provide routing of mesh authentication frames to userspace */ + if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT)) + local->hw.wiphy->flags |= WIPHY_FLAG_MESH_AUTH; + + /* mac80211 supports control port protocol changing */ + local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL; + + if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) { + local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + } else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC)) { + local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; + if (hw->max_signal <= 0) { + result = -EINVAL; + goto fail_workqueue; + } + } + + /* Mac80211 and therefore all drivers using SW crypto only + * are able to handle PTK rekeys and Extended Key ID. + */ + if (!local->ops->set_key) { + wiphy_ext_feature_set(local->hw.wiphy, + NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); + wiphy_ext_feature_set(local->hw.wiphy, + NL80211_EXT_FEATURE_EXT_KEY_ID); + } + + if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_ADHOC)) + wiphy_ext_feature_set(local->hw.wiphy, + NL80211_EXT_FEATURE_DEL_IBSS_STA); + + /* + * Calculate scan IE length -- we need this to alloc + * memory and to subtract from the driver limit. It + * includes the DS Params, (extended) supported rates, and HT + * information -- SSID is the driver's responsibility. + */ + local->scan_ies_len = 4 + max_bitrates /* (ext) supp rates */ + + 3 /* DS Params */; + if (supp_ht) + local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); + + if (supp_vht) + local->scan_ies_len += + 2 + sizeof(struct ieee80211_vht_cap); + + /* + * HE cap element is variable in size - set len to allow max size */ + if (supp_he) { + local->scan_ies_len += + 3 + sizeof(struct ieee80211_he_cap_elem) + + sizeof(struct ieee80211_he_mcs_nss_supp) + + IEEE80211_HE_PPE_THRES_MAX_LEN; + + if (supp_eht) + local->scan_ies_len += + 3 + sizeof(struct ieee80211_eht_cap_elem) + + sizeof(struct ieee80211_eht_mcs_nss_supp) + + IEEE80211_EHT_PPE_THRES_MAX_LEN; + } + + if (!local->ops->hw_scan) { + /* For hw_scan, driver needs to set these up. */ + local->hw.wiphy->max_scan_ssids = 4; + local->hw.wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; + } + + /* + * If the driver supports any scan IEs, then assume the + * limit includes the IEs mac80211 will add, otherwise + * leave it at zero and let the driver sort it out; we + * still pass our IEs to the driver but userspace will + * not be allowed to in that case. + */ + if (local->hw.wiphy->max_scan_ie_len) + local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len; + + result = ieee80211_init_cipher_suites(local); + if (result < 0) + goto fail_workqueue; + + if (!local->ops->remain_on_channel) + local->hw.wiphy->max_remain_on_channel_duration = 5000; + + /* mac80211 based drivers don't support internal TDLS setup */ + if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) + local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP; + + /* mac80211 supports eCSA, if the driver supports STA CSA at all */ + if (ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) + local->ext_capa[0] |= WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING; + + /* mac80211 supports multi BSSID, if the driver supports it */ + if (ieee80211_hw_check(&local->hw, SUPPORTS_MULTI_BSSID)) { + local->hw.wiphy->support_mbssid = true; + if (ieee80211_hw_check(&local->hw, + SUPPORTS_ONLY_HE_MULTI_BSSID)) + local->hw.wiphy->support_only_he_mbssid = true; + else + local->ext_capa[2] |= + WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT; + } + + local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CNTDWN_COUNTERS_NUM; + + /* + * We use the number of queues for feature tests (QoS, HT) internally + * so restrict them appropriately. + */ + if (hw->queues > IEEE80211_MAX_QUEUES) + hw->queues = IEEE80211_MAX_QUEUES; + + local->workqueue = + alloc_ordered_workqueue("%s", 0, wiphy_name(local->hw.wiphy)); + if (!local->workqueue) { + result = -ENOMEM; + goto fail_workqueue; + } + + /* + * The hardware needs headroom for sending the frame, + * and we need some headroom for passing the frame to monitor + * interfaces, but never both at the same time. + */ + local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, + IEEE80211_TX_STATUS_HEADROOM); + + /* + * if the driver doesn't specify a max listen interval we + * use 5 which should be a safe default + */ + if (local->hw.max_listen_interval == 0) + local->hw.max_listen_interval = 5; + + local->hw.conf.listen_interval = local->hw.max_listen_interval; + + local->dynamic_ps_forced_timeout = -1; + + if (!local->hw.max_nan_de_entries) + local->hw.max_nan_de_entries = IEEE80211_MAX_NAN_INSTANCE_ID; + + if (!local->hw.weight_multiplier) + local->hw.weight_multiplier = 1; + + ieee80211_wep_init(local); + + local->hw.conf.flags = IEEE80211_CONF_IDLE; + + ieee80211_led_init(local); + + result = ieee80211_txq_setup_flows(local); + if (result) + goto fail_flows; + + rtnl_lock(); + result = ieee80211_init_rate_ctrl_alg(local, + hw->rate_control_algorithm); + rtnl_unlock(); + if (result < 0) { + wiphy_debug(local->hw.wiphy, + "Failed to initialize rate control algorithm\n"); + goto fail_rate; + } + + if (local->rate_ctrl) { + clear_bit(IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, hw->flags); + if (local->rate_ctrl->ops->capa & RATE_CTRL_CAPA_VHT_EXT_NSS_BW) + ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); + } + + /* + * If the VHT capabilities don't have IEEE80211_VHT_EXT_NSS_BW_CAPABLE, + * or have it when we don't, copy the sband structure and set/clear it. + * This is necessary because rate scaling algorithms could be switched + * and have different support values. + * Print a message so that in the common case the reallocation can be + * avoided. + */ + BUILD_BUG_ON(NUM_NL80211_BANDS > 8 * sizeof(local->sband_allocated)); + for (band = 0; band < NUM_NL80211_BANDS; band++) { + struct ieee80211_supported_band *sband; + bool local_cap, ie_cap; + + local_cap = ieee80211_hw_check(hw, SUPPORTS_VHT_EXT_NSS_BW); + + sband = local->hw.wiphy->bands[band]; + if (!sband || !sband->vht_cap.vht_supported) + continue; + + ie_cap = !!(sband->vht_cap.vht_mcs.tx_highest & + cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE)); + + if (local_cap == ie_cap) + continue; + + sband = kmemdup(sband, sizeof(*sband), GFP_KERNEL); + if (!sband) { + result = -ENOMEM; + goto fail_rate; + } + + wiphy_dbg(hw->wiphy, "copying sband (band %d) due to VHT EXT NSS BW flag\n", + band); + + sband->vht_cap.vht_mcs.tx_highest ^= + cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); + + local->hw.wiphy->bands[band] = sband; + local->sband_allocated |= BIT(band); + } + + result = wiphy_register(local->hw.wiphy); + if (result < 0) + goto fail_wiphy_register; + + debugfs_hw_add(local); + rate_control_add_debugfs(local); + + rtnl_lock(); + wiphy_lock(hw->wiphy); + + /* add one default STA interface if supported */ + if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) && + !ieee80211_hw_check(hw, NO_AUTO_VIF)) { + struct vif_params params = {0}; + + result = ieee80211_if_add(local, "wlan%d", NET_NAME_ENUM, NULL, + NL80211_IFTYPE_STATION, ¶ms); + if (result) + wiphy_warn(local->hw.wiphy, + "Failed to add default virtual iface\n"); + } + + wiphy_unlock(hw->wiphy); + rtnl_unlock(); + +#ifdef CONFIG_INET + local->ifa_notifier.notifier_call = ieee80211_ifa_changed; + result = register_inetaddr_notifier(&local->ifa_notifier); + if (result) + goto fail_ifa; +#endif + +#if IS_ENABLED(CONFIG_IPV6) + local->ifa6_notifier.notifier_call = ieee80211_ifa6_changed; + result = register_inet6addr_notifier(&local->ifa6_notifier); + if (result) + goto fail_ifa6; +#endif + + return 0; + +#if IS_ENABLED(CONFIG_IPV6) + fail_ifa6: +#ifdef CONFIG_INET + unregister_inetaddr_notifier(&local->ifa_notifier); +#endif +#endif +#if defined(CONFIG_INET) || defined(CONFIG_IPV6) + fail_ifa: +#endif + wiphy_unregister(local->hw.wiphy); + fail_wiphy_register: + rtnl_lock(); + rate_control_deinitialize(local); + ieee80211_remove_interfaces(local); + rtnl_unlock(); + fail_rate: + fail_flows: + ieee80211_led_exit(local); + destroy_workqueue(local->workqueue); + fail_workqueue: + if (local->wiphy_ciphers_allocated) { + kfree(local->hw.wiphy->cipher_suites); + local->wiphy_ciphers_allocated = false; + } + kfree(local->int_scan_req); + return result; +} +EXPORT_SYMBOL(ieee80211_register_hw); + +void ieee80211_unregister_hw(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + tasklet_kill(&local->tx_pending_tasklet); + tasklet_kill(&local->tasklet); + +#ifdef CONFIG_INET + unregister_inetaddr_notifier(&local->ifa_notifier); +#endif +#if IS_ENABLED(CONFIG_IPV6) + unregister_inet6addr_notifier(&local->ifa6_notifier); +#endif + + rtnl_lock(); + + /* + * At this point, interface list manipulations are fine + * because the driver cannot be handing us frames any + * more and the tasklet is killed. + */ + ieee80211_remove_interfaces(local); + + wiphy_lock(local->hw.wiphy); + wiphy_delayed_work_cancel(local->hw.wiphy, &local->roc_work); + wiphy_work_cancel(local->hw.wiphy, &local->sched_scan_stopped_work); + wiphy_work_cancel(local->hw.wiphy, &local->radar_detected_work); + wiphy_unlock(local->hw.wiphy); + rtnl_unlock(); + + cancel_work_sync(&local->restart_work); + cancel_work_sync(&local->reconfig_filter); + + ieee80211_clear_tx_pending(local); + rate_control_deinitialize(local); + + if (skb_queue_len(&local->skb_queue) || + skb_queue_len(&local->skb_queue_unreliable)) + wiphy_warn(local->hw.wiphy, "skb_queue not empty\n"); + skb_queue_purge(&local->skb_queue); + skb_queue_purge(&local->skb_queue_unreliable); + + wiphy_unregister(local->hw.wiphy); + destroy_workqueue(local->workqueue); + ieee80211_led_exit(local); + kfree(local->int_scan_req); +} +EXPORT_SYMBOL(ieee80211_unregister_hw); + +static int ieee80211_free_ack_frame(int id, void *p, void *data) +{ + WARN_ONCE(1, "Have pending ack frames!\n"); + kfree_skb(p); + return 0; +} + +void ieee80211_free_hw(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + enum nl80211_band band; + + mutex_destroy(&local->iflist_mtx); + mutex_destroy(&local->mtx); + + if (local->wiphy_ciphers_allocated) { + kfree(local->hw.wiphy->cipher_suites); + local->wiphy_ciphers_allocated = false; + } + + idr_for_each(&local->ack_status_frames, + ieee80211_free_ack_frame, NULL); + idr_destroy(&local->ack_status_frames); + + sta_info_stop(local); + + ieee80211_free_led_names(local); + + for (band = 0; band < NUM_NL80211_BANDS; band++) { + if (!(local->sband_allocated & BIT(band))) + continue; + kfree(local->hw.wiphy->bands[band]); + } + + wiphy_free(local->hw.wiphy); +} +EXPORT_SYMBOL(ieee80211_free_hw); + +static const char * const drop_reasons_monitor[] = { +#define V(x) #x, + [0] = "RX_DROP_MONITOR", + MAC80211_DROP_REASONS_MONITOR(V) +}; + +static struct drop_reason_list drop_reason_list_monitor = { + .reasons = drop_reasons_monitor, + .n_reasons = ARRAY_SIZE(drop_reasons_monitor), +}; + +static const char * const drop_reasons_unusable[] = { + [0] = "RX_DROP_UNUSABLE", + MAC80211_DROP_REASONS_UNUSABLE(V) +#undef V +}; + +static struct drop_reason_list drop_reason_list_unusable = { + .reasons = drop_reasons_unusable, + .n_reasons = ARRAY_SIZE(drop_reasons_unusable), +}; + +static int __init ieee80211_init(void) +{ + struct sk_buff *skb; + int ret; + + BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb)); + BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) + + IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb)); + + ret = rc80211_minstrel_init(); + if (ret) + return ret; + + ret = ieee80211_iface_init(); + if (ret) + goto err_netdev; + + drop_reasons_register_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR, + &drop_reason_list_monitor); + drop_reasons_register_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE, + &drop_reason_list_unusable); + + return 0; + err_netdev: + rc80211_minstrel_exit(); + + return ret; +} + +static void __exit ieee80211_exit(void) +{ + rc80211_minstrel_exit(); + + ieee80211s_stop(); + + ieee80211_iface_exit(); + + drop_reasons_unregister_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR); + drop_reasons_unregister_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE); + + rcu_barrier(); +} + + +subsys_initcall(ieee80211_init); +module_exit(ieee80211_exit); + +MODULE_DESCRIPTION("IEEE 802.11 subsystem"); +MODULE_LICENSE("GPL"); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c new file mode 100644 index 0000000000..e31c312c12 --- /dev/null +++ b/net/mac80211/mesh.c @@ -0,0 +1,1811 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2008, 2009 open80211s Ltd. + * Copyright (C) 2018 - 2023 Intel Corporation + * Authors: Luis Carlos Cobo <luisca@cozybit.com> + * Javier Cardona <javier@cozybit.com> + */ + +#include <linux/slab.h> +#include <asm/unaligned.h> +#include "ieee80211_i.h" +#include "mesh.h" +#include "wme.h" +#include "driver-ops.h" + +static int mesh_allocated; +static struct kmem_cache *rm_cache; + +bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt) +{ + return (mgmt->u.action.u.mesh_action.action_code == + WLAN_MESH_ACTION_HWMP_PATH_SELECTION); +} + +void ieee80211s_init(void) +{ + mesh_allocated = 1; + rm_cache = kmem_cache_create("mesh_rmc", sizeof(struct rmc_entry), + 0, 0, NULL); +} + +void ieee80211s_stop(void) +{ + if (!mesh_allocated) + return; + kmem_cache_destroy(rm_cache); +} + +static void ieee80211_mesh_housekeeping_timer(struct timer_list *t) +{ + struct ieee80211_sub_if_data *sdata = + from_timer(sdata, t, u.mesh.housekeeping_timer); + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); + + wiphy_work_queue(local->hw.wiphy, &sdata->work); +} + +/** + * mesh_matches_local - check if the config of a mesh point matches ours + * + * @sdata: local mesh subif + * @ie: information elements of a management frame from the mesh peer + * + * This function checks if the mesh configuration of a mesh point matches the + * local mesh configuration, i.e. if both nodes belong to the same mesh network. + */ +bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *ie) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + u32 basic_rates = 0; + struct cfg80211_chan_def sta_chan_def; + struct ieee80211_supported_band *sband; + u32 vht_cap_info = 0; + + /* + * As support for each feature is added, check for matching + * - On mesh config capabilities + * - Power Save Support En + * - Sync support enabled + * - Sync support active + * - Sync support required from peer + * - MDA enabled + * - Power management control on fc + */ + if (!(ifmsh->mesh_id_len == ie->mesh_id_len && + memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && + (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) && + (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) && + (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) && + (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) && + (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) + return false; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return false; + + ieee80211_sta_get_rates(sdata, ie, sband->band, + &basic_rates); + + if (sdata->vif.bss_conf.basic_rates != basic_rates) + return false; + + cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chandef.chan, + NL80211_CHAN_NO_HT); + ieee80211_chandef_ht_oper(ie->ht_operation, &sta_chan_def); + + if (ie->vht_cap_elem) + vht_cap_info = le32_to_cpu(ie->vht_cap_elem->vht_cap_info); + + ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info, + ie->vht_operation, ie->ht_operation, + &sta_chan_def); + ieee80211_chandef_he_6ghz_oper(sdata, ie->he_operation, ie->eht_operation, + &sta_chan_def); + + if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef, + &sta_chan_def)) + return false; + + return true; +} + +/** + * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links + * + * @ie: information elements of a management frame from the mesh peer + */ +bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) +{ + return (ie->mesh_config->meshconf_cap & + IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS) != 0; +} + +/** + * mesh_accept_plinks_update - update accepting_plink in local mesh beacons + * + * @sdata: mesh interface in which mesh beacons are going to be updated + * + * Returns: beacon changed flag if the beacon content changed. + */ +u64 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata) +{ + bool free_plinks; + u64 changed = 0; + + /* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0, + * the mesh interface might be able to establish plinks with peers that + * are already on the table but are not on PLINK_ESTAB state. However, + * in general the mesh interface is not accepting peer link requests + * from new peers, and that must be reflected in the beacon + */ + free_plinks = mesh_plink_availables(sdata); + + if (free_plinks != sdata->u.mesh.accepting_plinks) { + sdata->u.mesh.accepting_plinks = free_plinks; + changed = BSS_CHANGED_BEACON; + } + + return changed; +} + +/* + * mesh_sta_cleanup - clean up any mesh sta state + * + * @sta: mesh sta to clean up. + */ +void mesh_sta_cleanup(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + u64 changed = mesh_plink_deactivate(sta); + + if (changed) + ieee80211_mbss_info_change_notify(sdata, changed); +} + +int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) +{ + int i; + + sdata->u.mesh.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL); + if (!sdata->u.mesh.rmc) + return -ENOMEM; + sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1; + for (i = 0; i < RMC_BUCKETS; i++) + INIT_HLIST_HEAD(&sdata->u.mesh.rmc->bucket[i]); + return 0; +} + +void mesh_rmc_free(struct ieee80211_sub_if_data *sdata) +{ + struct mesh_rmc *rmc = sdata->u.mesh.rmc; + struct rmc_entry *p; + struct hlist_node *n; + int i; + + if (!sdata->u.mesh.rmc) + return; + + for (i = 0; i < RMC_BUCKETS; i++) { + hlist_for_each_entry_safe(p, n, &rmc->bucket[i], list) { + hlist_del(&p->list); + kmem_cache_free(rm_cache, p); + } + } + + kfree(rmc); + sdata->u.mesh.rmc = NULL; +} + +/** + * mesh_rmc_check - Check frame in recent multicast cache and add if absent. + * + * @sdata: interface + * @sa: source address + * @mesh_hdr: mesh_header + * + * Returns: 0 if the frame is not in the cache, nonzero otherwise. + * + * Checks using the source address and the mesh sequence number if we have + * received this frame lately. If the frame is not in the cache, it is added to + * it. + */ +int mesh_rmc_check(struct ieee80211_sub_if_data *sdata, + const u8 *sa, struct ieee80211s_hdr *mesh_hdr) +{ + struct mesh_rmc *rmc = sdata->u.mesh.rmc; + u32 seqnum = 0; + int entries = 0; + u8 idx; + struct rmc_entry *p; + struct hlist_node *n; + + if (!rmc) + return -1; + + /* Don't care about endianness since only match matters */ + memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum)); + idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask; + hlist_for_each_entry_safe(p, n, &rmc->bucket[idx], list) { + ++entries; + if (time_after(jiffies, p->exp_time) || + entries == RMC_QUEUE_MAX_LEN) { + hlist_del(&p->list); + kmem_cache_free(rm_cache, p); + --entries; + } else if ((seqnum == p->seqnum) && ether_addr_equal(sa, p->sa)) + return -1; + } + + p = kmem_cache_alloc(rm_cache, GFP_ATOMIC); + if (!p) + return 0; + + p->seqnum = seqnum; + p->exp_time = jiffies + RMC_TIMEOUT; + memcpy(p->sa, sa, ETH_ALEN); + hlist_add_head(&p->list, &rmc->bucket[idx]); + return 0; +} + +int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + u8 *pos, neighbors; + u8 meshconf_len = sizeof(struct ieee80211_meshconf_ie); + bool is_connected_to_gate = ifmsh->num_gates > 0 || + ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol || + ifmsh->mshcfg.dot11MeshConnectedToMeshGate; + bool is_connected_to_as = ifmsh->mshcfg.dot11MeshConnectedToAuthServer; + + if (skb_tailroom(skb) < 2 + meshconf_len) + return -ENOMEM; + + pos = skb_put(skb, 2 + meshconf_len); + *pos++ = WLAN_EID_MESH_CONFIG; + *pos++ = meshconf_len; + + /* save a pointer for quick updates in pre-tbtt */ + ifmsh->meshconf_offset = pos - skb->data; + + /* Active path selection protocol ID */ + *pos++ = ifmsh->mesh_pp_id; + /* Active path selection metric ID */ + *pos++ = ifmsh->mesh_pm_id; + /* Congestion control mode identifier */ + *pos++ = ifmsh->mesh_cc_id; + /* Synchronization protocol identifier */ + *pos++ = ifmsh->mesh_sp_id; + /* Authentication Protocol identifier */ + *pos++ = ifmsh->mesh_auth_id; + /* Mesh Formation Info - number of neighbors */ + neighbors = atomic_read(&ifmsh->estab_plinks); + neighbors = min_t(int, neighbors, IEEE80211_MAX_MESH_PEERINGS); + *pos++ = (is_connected_to_as << 7) | + (neighbors << 1) | + is_connected_to_gate; + /* Mesh capability */ + *pos = 0x00; + *pos |= ifmsh->mshcfg.dot11MeshForwarding ? + IEEE80211_MESHCONF_CAPAB_FORWARDING : 0x00; + *pos |= ifmsh->accepting_plinks ? + IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; + /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */ + *pos |= ifmsh->ps_peers_deep_sleep ? + IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00; + return 0; +} + +int mesh_add_meshid_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + u8 *pos; + + if (skb_tailroom(skb) < 2 + ifmsh->mesh_id_len) + return -ENOMEM; + + pos = skb_put(skb, 2 + ifmsh->mesh_id_len); + *pos++ = WLAN_EID_MESH_ID; + *pos++ = ifmsh->mesh_id_len; + if (ifmsh->mesh_id_len) + memcpy(pos, ifmsh->mesh_id, ifmsh->mesh_id_len); + + return 0; +} + +static int mesh_add_awake_window_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + u8 *pos; + + /* see IEEE802.11-2012 13.14.6 */ + if (ifmsh->ps_peers_light_sleep == 0 && + ifmsh->ps_peers_deep_sleep == 0 && + ifmsh->nonpeer_pm == NL80211_MESH_POWER_ACTIVE) + return 0; + + if (skb_tailroom(skb) < 4) + return -ENOMEM; + + pos = skb_put(skb, 2 + 2); + *pos++ = WLAN_EID_MESH_AWAKE_WINDOW; + *pos++ = 2; + put_unaligned_le16(ifmsh->mshcfg.dot11MeshAwakeWindowDuration, pos); + + return 0; +} + +int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + u8 offset, len; + const u8 *data; + + if (!ifmsh->ie || !ifmsh->ie_len) + return 0; + + /* fast-forward to vendor IEs */ + offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0); + + if (offset < ifmsh->ie_len) { + len = ifmsh->ie_len - offset; + data = ifmsh->ie + offset; + if (skb_tailroom(skb) < len) + return -ENOMEM; + skb_put_data(skb, data, len); + } + + return 0; +} + +int mesh_add_rsn_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + u8 len = 0; + const u8 *data; + + if (!ifmsh->ie || !ifmsh->ie_len) + return 0; + + /* find RSN IE */ + data = cfg80211_find_ie(WLAN_EID_RSN, ifmsh->ie, ifmsh->ie_len); + if (!data) + return 0; + + len = data[1] + 2; + + if (skb_tailroom(skb) < len) + return -ENOMEM; + skb_put_data(skb, data, len); + + return 0; +} + +static int mesh_add_ds_params_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_channel *chan; + u8 *pos; + + if (skb_tailroom(skb) < 3) + return -ENOMEM; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + if (WARN_ON(!chanctx_conf)) { + rcu_read_unlock(); + return -EINVAL; + } + chan = chanctx_conf->def.chan; + rcu_read_unlock(); + + pos = skb_put(skb, 2 + 1); + *pos++ = WLAN_EID_DS_PARAMS; + *pos++ = 1; + *pos++ = ieee80211_frequency_to_channel(chan->center_freq); + + return 0; +} + +int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_supported_band *sband; + u8 *pos; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + + /* HT not allowed in 6 GHz */ + if (sband->band == NL80211_BAND_6GHZ) + return 0; + + if (!sband->ht_cap.ht_supported || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) + return 0; + + if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap)) + return -ENOMEM; + + pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap)); + ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap); + + return 0; +} + +int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_channel *channel; + struct ieee80211_supported_band *sband; + struct ieee80211_sta_ht_cap *ht_cap; + u8 *pos; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + if (WARN_ON(!chanctx_conf)) { + rcu_read_unlock(); + return -EINVAL; + } + channel = chanctx_conf->def.chan; + rcu_read_unlock(); + + sband = local->hw.wiphy->bands[channel->band]; + ht_cap = &sband->ht_cap; + + /* HT not allowed in 6 GHz */ + if (sband->band == NL80211_BAND_6GHZ) + return 0; + + if (!ht_cap->ht_supported || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) + return 0; + + if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation)) + return -ENOMEM; + + pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation)); + ieee80211_ie_build_ht_oper(pos, ht_cap, &sdata->vif.bss_conf.chandef, + sdata->vif.bss_conf.ht_operation_mode, + false); + + return 0; +} + +int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_supported_band *sband; + u8 *pos; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + + /* VHT not allowed in 6 GHz */ + if (sband->band == NL80211_BAND_6GHZ) + return 0; + + if (!sband->vht_cap.vht_supported || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) + return 0; + + if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap)) + return -ENOMEM; + + pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_cap)); + ieee80211_ie_build_vht_cap(pos, &sband->vht_cap, sband->vht_cap.cap); + + return 0; +} + +int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_channel *channel; + struct ieee80211_supported_band *sband; + struct ieee80211_sta_vht_cap *vht_cap; + u8 *pos; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + if (WARN_ON(!chanctx_conf)) { + rcu_read_unlock(); + return -EINVAL; + } + channel = chanctx_conf->def.chan; + rcu_read_unlock(); + + sband = local->hw.wiphy->bands[channel->band]; + vht_cap = &sband->vht_cap; + + /* VHT not allowed in 6 GHz */ + if (sband->band == NL80211_BAND_6GHZ) + return 0; + + if (!vht_cap->vht_supported || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) + return 0; + + if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_operation)) + return -ENOMEM; + + pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation)); + ieee80211_ie_build_vht_oper(pos, vht_cap, + &sdata->vif.bss_conf.chandef); + + return 0; +} + +int mesh_add_he_cap_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u8 ie_len) +{ + const struct ieee80211_sta_he_cap *he_cap; + struct ieee80211_supported_band *sband; + u8 *pos; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + + he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT); + + if (!he_cap || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) + return 0; + + if (skb_tailroom(skb) < ie_len) + return -ENOMEM; + + pos = skb_put(skb, ie_len); + ieee80211_ie_build_he_cap(0, pos, he_cap, pos + ie_len); + + return 0; +} + +int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + const struct ieee80211_sta_he_cap *he_cap; + struct ieee80211_supported_band *sband; + u32 len; + u8 *pos; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + + he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT); + if (!he_cap || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) + return 0; + + len = 2 + 1 + sizeof(struct ieee80211_he_operation); + if (sdata->vif.bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) + len += sizeof(struct ieee80211_he_6ghz_oper); + + if (skb_tailroom(skb) < len) + return -ENOMEM; + + pos = skb_put(skb, len); + ieee80211_ie_build_he_oper(pos, &sdata->vif.bss_conf.chandef); + + return 0; +} + +int mesh_add_he_6ghz_cap_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_supported_band *sband; + const struct ieee80211_sband_iftype_data *iftd; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + + iftd = ieee80211_get_sband_iftype_data(sband, + NL80211_IFTYPE_MESH_POINT); + /* The device doesn't support HE in mesh mode or at all */ + if (!iftd) + return 0; + + ieee80211_ie_build_he_6ghz_cap(sdata, sdata->deflink.smps_mode, skb); + return 0; +} + +int mesh_add_eht_cap_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u8 ie_len) +{ + const struct ieee80211_sta_he_cap *he_cap; + const struct ieee80211_sta_eht_cap *eht_cap; + struct ieee80211_supported_band *sband; + u8 *pos; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + + he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT); + eht_cap = ieee80211_get_eht_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT); + if (!he_cap || !eht_cap || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) + return 0; + + if (skb_tailroom(skb) < ie_len) + return -ENOMEM; + + pos = skb_put(skb, ie_len); + ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + ie_len, false); + + return 0; +} + +int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) +{ + const struct ieee80211_sta_eht_cap *eht_cap; + struct ieee80211_supported_band *sband; + u32 len; + u8 *pos; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + + eht_cap = ieee80211_get_eht_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT); + if (!eht_cap || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) + return 0; + + len = 2 + 1 + offsetof(struct ieee80211_eht_operation, optional) + + offsetof(struct ieee80211_eht_operation_info, optional); + + if (skb_tailroom(skb) < len) + return -ENOMEM; + + pos = skb_put(skb, len); + ieee80211_ie_build_eht_oper(pos, &sdata->vif.bss_conf.chandef, eht_cap); + + return 0; +} + +static void ieee80211_mesh_path_timer(struct timer_list *t) +{ + struct ieee80211_sub_if_data *sdata = + from_timer(sdata, t, u.mesh.mesh_path_timer); + + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); +} + +static void ieee80211_mesh_path_root_timer(struct timer_list *t) +{ + struct ieee80211_sub_if_data *sdata = + from_timer(sdata, t, u.mesh.mesh_path_root_timer); + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); + + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); +} + +void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) +{ + if (ifmsh->mshcfg.dot11MeshHWMPRootMode > IEEE80211_ROOTMODE_ROOT) + set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); + else { + clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); + /* stop running timer */ + del_timer_sync(&ifmsh->mesh_path_root_timer); + } +} + +static void +ieee80211_mesh_update_bss_params(struct ieee80211_sub_if_data *sdata, + u8 *ie, u8 ie_len) +{ + struct ieee80211_supported_band *sband; + const struct element *cap; + const struct ieee80211_he_operation *he_oper = NULL; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return; + + if (!ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT) || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) + return; + + sdata->vif.bss_conf.he_support = true; + + cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ie_len); + if (cap && cap->datalen >= 1 + sizeof(*he_oper) && + cap->datalen >= 1 + ieee80211_he_oper_size(cap->data + 1)) + he_oper = (void *)(cap->data + 1); + + if (he_oper) + sdata->vif.bss_conf.he_oper.params = + __le32_to_cpu(he_oper->he_oper_params); + + sdata->vif.bss_conf.eht_support = + !!ieee80211_get_eht_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT); +} + +bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u32 ctrl_flags) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_mesh_fast_tx *entry; + struct ieee80211s_hdr *meshhdr; + u8 sa[ETH_ALEN] __aligned(2); + struct tid_ampdu_tx *tid_tx; + struct sta_info *sta; + bool copy_sa = false; + u16 ethertype; + u8 tid; + + if (ctrl_flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP) + return false; + + if (ifmsh->mshcfg.dot11MeshNolearn) + return false; + + /* Add support for these cases later */ + if (ifmsh->ps_peers_light_sleep || ifmsh->ps_peers_deep_sleep) + return false; + + if (is_multicast_ether_addr(skb->data)) + return false; + + ethertype = (skb->data[12] << 8) | skb->data[13]; + if (ethertype < ETH_P_802_3_MIN) + return false; + + if (skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS) + return false; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + skb_set_transport_header(skb, skb_checksum_start_offset(skb)); + if (skb_checksum_help(skb)) + return false; + } + + entry = mesh_fast_tx_get(sdata, skb->data); + if (!entry) + return false; + + if (skb_headroom(skb) < entry->hdrlen + entry->fast_tx.hdr_len) + return false; + + sta = rcu_dereference(entry->mpath->next_hop); + if (!sta) + return false; + + tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; + tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); + if (tid_tx) { + if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) + return false; + if (tid_tx->timeout) + tid_tx->last_tx = jiffies; + } + + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return true; + + skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, sta, skb)); + + meshhdr = (struct ieee80211s_hdr *)entry->hdr; + if ((meshhdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) { + /* preserve SA from eth header for 6-addr frames */ + ether_addr_copy(sa, skb->data + ETH_ALEN); + copy_sa = true; + } + + memcpy(skb_push(skb, entry->hdrlen - 2 * ETH_ALEN), entry->hdr, + entry->hdrlen); + + meshhdr = (struct ieee80211s_hdr *)skb->data; + put_unaligned_le32(atomic_inc_return(&sdata->u.mesh.mesh_seqnum), + &meshhdr->seqnum); + meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; + if (copy_sa) + ether_addr_copy(meshhdr->eaddr2, sa); + + skb_push(skb, 2 * ETH_ALEN); + __ieee80211_xmit_fast(sdata, sta, &entry->fast_tx, skb, tid_tx, + entry->mpath->dst, sdata->vif.addr); + + return true; +} + +/** + * ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame + * @hdr: 802.11 frame header + * @fc: frame control field + * @meshda: destination address in the mesh + * @meshsa: source address in the mesh. Same as TA, as frame is + * locally originated. + * + * Return the length of the 802.11 (does not include a mesh control header) + */ +int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, + const u8 *meshda, const u8 *meshsa) +{ + if (is_multicast_ether_addr(meshda)) { + *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); + /* DA TA SA */ + memcpy(hdr->addr1, meshda, ETH_ALEN); + memcpy(hdr->addr2, meshsa, ETH_ALEN); + memcpy(hdr->addr3, meshsa, ETH_ALEN); + return 24; + } else { + *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); + /* RA TA DA SA */ + eth_zero_addr(hdr->addr1); /* RA is resolved later */ + memcpy(hdr->addr2, meshsa, ETH_ALEN); + memcpy(hdr->addr3, meshda, ETH_ALEN); + memcpy(hdr->addr4, meshsa, ETH_ALEN); + return 30; + } +} + +/** + * ieee80211_new_mesh_header - create a new mesh header + * @sdata: mesh interface to be used + * @meshhdr: uninitialized mesh header + * @addr4or5: 1st address in the ae header, which may correspond to address 4 + * (if addr6 is NULL) or address 5 (if addr6 is present). It may + * be NULL. + * @addr6: 2nd address in the ae header, which corresponds to addr6 of the + * mesh frame + * + * Return the header length. + */ +unsigned int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata, + struct ieee80211s_hdr *meshhdr, + const char *addr4or5, const char *addr6) +{ + if (WARN_ON(!addr4or5 && addr6)) + return 0; + + memset(meshhdr, 0, sizeof(*meshhdr)); + + meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; + + put_unaligned_le32(atomic_inc_return(&sdata->u.mesh.mesh_seqnum), + &meshhdr->seqnum); + if (addr4or5 && !addr6) { + meshhdr->flags |= MESH_FLAGS_AE_A4; + memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN); + return 2 * ETH_ALEN; + } else if (addr4or5 && addr6) { + meshhdr->flags |= MESH_FLAGS_AE_A5_A6; + memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN); + memcpy(meshhdr->eaddr2, addr6, ETH_ALEN); + return 3 * ETH_ALEN; + } + + return ETH_ALEN; +} + +static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + u64 changed; + + if (ifmsh->mshcfg.plink_timeout > 0) + ieee80211_sta_expire(sdata, ifmsh->mshcfg.plink_timeout * HZ); + mesh_path_expire(sdata); + + changed = mesh_accept_plinks_update(sdata); + ieee80211_mbss_info_change_notify(sdata, changed); + + mesh_fast_tx_gc(sdata); + + mod_timer(&ifmsh->housekeeping_timer, + round_jiffies(jiffies + + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); +} + +static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + u32 interval; + + mesh_path_tx_root_frame(sdata); + + if (ifmsh->mshcfg.dot11MeshHWMPRootMode == IEEE80211_PROACTIVE_RANN) + interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval; + else + interval = ifmsh->mshcfg.dot11MeshHWMProotInterval; + + mod_timer(&ifmsh->mesh_path_root_timer, + round_jiffies(TU_TO_EXP_TIME(interval))); +} + +static int +ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh) +{ + struct beacon_data *bcn; + int head_len, tail_len; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + struct ieee80211_chanctx_conf *chanctx_conf; + struct mesh_csa_settings *csa; + enum nl80211_band band; + u8 ie_len_he_cap, ie_len_eht_cap; + u8 *pos; + struct ieee80211_sub_if_data *sdata; + int hdr_len = offsetofend(struct ieee80211_mgmt, u.beacon); + + sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh); + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + band = chanctx_conf->def.chan->band; + rcu_read_unlock(); + + ie_len_he_cap = ieee80211_ie_len_he_cap(sdata, + NL80211_IFTYPE_MESH_POINT); + ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata, + NL80211_IFTYPE_MESH_POINT); + head_len = hdr_len + + 2 + /* NULL SSID */ + /* Channel Switch Announcement */ + 2 + sizeof(struct ieee80211_channel_sw_ie) + + /* Mesh Channel Switch Parameters */ + 2 + sizeof(struct ieee80211_mesh_chansw_params_ie) + + /* Channel Switch Wrapper + Wide Bandwidth CSA IE */ + 2 + 2 + sizeof(struct ieee80211_wide_bw_chansw_ie) + + 2 + sizeof(struct ieee80211_sec_chan_offs_ie) + + 2 + 8 + /* supported rates */ + 2 + 3; /* DS params */ + tail_len = 2 + (IEEE80211_MAX_SUPP_RATES - 8) + + 2 + sizeof(struct ieee80211_ht_cap) + + 2 + sizeof(struct ieee80211_ht_operation) + + 2 + ifmsh->mesh_id_len + + 2 + sizeof(struct ieee80211_meshconf_ie) + + 2 + sizeof(__le16) + /* awake window */ + 2 + sizeof(struct ieee80211_vht_cap) + + 2 + sizeof(struct ieee80211_vht_operation) + + ie_len_he_cap + + 2 + 1 + sizeof(struct ieee80211_he_operation) + + sizeof(struct ieee80211_he_6ghz_oper) + + 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa) + + ie_len_eht_cap + + 2 + 1 + offsetof(struct ieee80211_eht_operation, optional) + + offsetof(struct ieee80211_eht_operation_info, optional) + + ifmsh->ie_len; + + bcn = kzalloc(sizeof(*bcn) + head_len + tail_len, GFP_KERNEL); + /* need an skb for IE builders to operate on */ + skb = __dev_alloc_skb(max(head_len, tail_len), GFP_KERNEL); + + if (!bcn || !skb) + goto out_free; + + /* + * pointers go into the block we allocated, + * memory is | beacon_data | head | tail | + */ + bcn->head = ((u8 *) bcn) + sizeof(*bcn); + + /* fill in the head */ + mgmt = skb_put_zero(skb, hdr_len); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_BEACON); + eth_broadcast_addr(mgmt->da); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); + ieee80211_mps_set_frame_flags(sdata, NULL, (void *) mgmt); + mgmt->u.beacon.beacon_int = + cpu_to_le16(sdata->vif.bss_conf.beacon_int); + mgmt->u.beacon.capab_info |= cpu_to_le16( + sdata->u.mesh.security ? WLAN_CAPABILITY_PRIVACY : 0); + + pos = skb_put(skb, 2); + *pos++ = WLAN_EID_SSID; + *pos++ = 0x0; + + rcu_read_lock(); + csa = rcu_dereference(ifmsh->csa); + if (csa) { + enum nl80211_channel_type ct; + struct cfg80211_chan_def *chandef; + int ie_len = 2 + sizeof(struct ieee80211_channel_sw_ie) + + 2 + sizeof(struct ieee80211_mesh_chansw_params_ie); + + pos = skb_put_zero(skb, ie_len); + *pos++ = WLAN_EID_CHANNEL_SWITCH; + *pos++ = 3; + *pos++ = 0x0; + *pos++ = ieee80211_frequency_to_channel( + csa->settings.chandef.chan->center_freq); + bcn->cntdwn_current_counter = csa->settings.count; + bcn->cntdwn_counter_offsets[0] = hdr_len + 6; + *pos++ = csa->settings.count; + *pos++ = WLAN_EID_CHAN_SWITCH_PARAM; + *pos++ = 6; + if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_INIT) { + *pos++ = ifmsh->mshcfg.dot11MeshTTL; + *pos |= WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; + } else { + *pos++ = ifmsh->chsw_ttl; + } + *pos++ |= csa->settings.block_tx ? + WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00; + put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos); + pos += 2; + put_unaligned_le16(ifmsh->pre_value, pos); + pos += 2; + + switch (csa->settings.chandef.width) { + case NL80211_CHAN_WIDTH_40: + ie_len = 2 + sizeof(struct ieee80211_sec_chan_offs_ie); + pos = skb_put_zero(skb, ie_len); + + *pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET; /* EID */ + *pos++ = 1; /* len */ + ct = cfg80211_get_chandef_type(&csa->settings.chandef); + if (ct == NL80211_CHAN_HT40PLUS) + *pos++ = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + else + *pos++ = IEEE80211_HT_PARAM_CHA_SEC_BELOW; + break; + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + /* Channel Switch Wrapper + Wide Bandwidth CSA IE */ + ie_len = 2 + 2 + + sizeof(struct ieee80211_wide_bw_chansw_ie); + pos = skb_put_zero(skb, ie_len); + + *pos++ = WLAN_EID_CHANNEL_SWITCH_WRAPPER; /* EID */ + *pos++ = 5; /* len */ + /* put sub IE */ + chandef = &csa->settings.chandef; + ieee80211_ie_build_wide_bw_cs(pos, chandef); + break; + default: + break; + } + } + rcu_read_unlock(); + + if (ieee80211_add_srates_ie(sdata, skb, true, band) || + mesh_add_ds_params_ie(sdata, skb)) + goto out_free; + + bcn->head_len = skb->len; + memcpy(bcn->head, skb->data, bcn->head_len); + + /* now the tail */ + skb_trim(skb, 0); + bcn->tail = bcn->head + bcn->head_len; + + if (ieee80211_add_ext_srates_ie(sdata, skb, true, band) || + mesh_add_rsn_ie(sdata, skb) || + mesh_add_ht_cap_ie(sdata, skb) || + mesh_add_ht_oper_ie(sdata, skb) || + mesh_add_meshid_ie(sdata, skb) || + mesh_add_meshconf_ie(sdata, skb) || + mesh_add_awake_window_ie(sdata, skb) || + mesh_add_vht_cap_ie(sdata, skb) || + mesh_add_vht_oper_ie(sdata, skb) || + mesh_add_he_cap_ie(sdata, skb, ie_len_he_cap) || + mesh_add_he_oper_ie(sdata, skb) || + mesh_add_he_6ghz_cap_ie(sdata, skb) || + mesh_add_eht_cap_ie(sdata, skb, ie_len_eht_cap) || + mesh_add_eht_oper_ie(sdata, skb) || + mesh_add_vendor_ies(sdata, skb)) + goto out_free; + + bcn->tail_len = skb->len; + memcpy(bcn->tail, skb->data, bcn->tail_len); + ieee80211_mesh_update_bss_params(sdata, bcn->tail, bcn->tail_len); + bcn->meshconf = (struct ieee80211_meshconf_ie *) + (bcn->tail + ifmsh->meshconf_offset); + + dev_kfree_skb(skb); + rcu_assign_pointer(ifmsh->beacon, bcn); + return 0; +out_free: + kfree(bcn); + dev_kfree_skb(skb); + return -ENOMEM; +} + +static int +ieee80211_mesh_rebuild_beacon(struct ieee80211_sub_if_data *sdata) +{ + struct beacon_data *old_bcn; + int ret; + + old_bcn = sdata_dereference(sdata->u.mesh.beacon, sdata); + ret = ieee80211_mesh_build_beacon(&sdata->u.mesh); + if (ret) + /* just reuse old beacon */ + return ret; + + if (old_bcn) + kfree_rcu(old_bcn, rcu_head); + return 0; +} + +void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata, + u64 changed) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + unsigned long bits = changed; + u32 bit; + + if (!bits) + return; + + /* if we race with running work, worst case this work becomes a noop */ + for_each_set_bit(bit, &bits, sizeof(changed) * BITS_PER_BYTE) + set_bit(bit, ifmsh->mbss_changed); + set_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags); + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); +} + +int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_local *local = sdata->local; + u64 changed = BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED | + BSS_CHANGED_HT | + BSS_CHANGED_BASIC_RATES | + BSS_CHANGED_BEACON_INT | + BSS_CHANGED_MCAST_RATE; + + local->fif_other_bss++; + /* mesh ifaces must set allmulti to forward mcast traffic */ + atomic_inc(&local->iff_allmultis); + ieee80211_configure_filter(local); + + ifmsh->mesh_cc_id = 0; /* Disabled */ + /* register sync ops from extensible synchronization framework */ + ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id); + ifmsh->sync_offset_clockdrift_max = 0; + set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); + ieee80211_mesh_root_setup(ifmsh); + wiphy_work_queue(local->hw.wiphy, &sdata->work); + sdata->vif.bss_conf.ht_operation_mode = + ifmsh->mshcfg.ht_opmode; + sdata->vif.bss_conf.enable_beacon = true; + + changed |= ieee80211_mps_local_status_update(sdata); + + if (ieee80211_mesh_build_beacon(ifmsh)) { + ieee80211_stop_mesh(sdata); + return -ENOMEM; + } + + ieee80211_recalc_dtim(local, sdata); + ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed); + + netif_carrier_on(sdata->dev); + return 0; +} + +void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct beacon_data *bcn; + + netif_carrier_off(sdata->dev); + + /* flush STAs and mpaths on this iface */ + sta_info_flush(sdata); + ieee80211_free_keys(sdata, true); + mesh_path_flush_by_iface(sdata); + + /* stop the beacon */ + ifmsh->mesh_id_len = 0; + sdata->vif.bss_conf.enable_beacon = false; + sdata->beacon_rate_set = false; + clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_BEACON_ENABLED); + + /* remove beacon */ + bcn = sdata_dereference(ifmsh->beacon, sdata); + RCU_INIT_POINTER(ifmsh->beacon, NULL); + kfree_rcu(bcn, rcu_head); + + /* free all potentially still buffered group-addressed frames */ + local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf); + skb_queue_purge(&ifmsh->ps.bc_buf); + + del_timer_sync(&sdata->u.mesh.housekeeping_timer); + del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); + del_timer_sync(&sdata->u.mesh.mesh_path_timer); + + /* clear any mesh work (for next join) we may have accrued */ + ifmsh->wrkq_flags = 0; + memset(ifmsh->mbss_changed, 0, sizeof(ifmsh->mbss_changed)); + + local->fif_other_bss--; + atomic_dec(&local->iff_allmultis); + ieee80211_configure_filter(local); +} + +static void ieee80211_mesh_csa_mark_radar(struct ieee80211_sub_if_data *sdata) +{ + int err; + + /* if the current channel is a DFS channel, mark the channel as + * unavailable. + */ + err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, + &sdata->vif.bss_conf.chandef, + NL80211_IFTYPE_MESH_POINT); + if (err > 0) + cfg80211_radar_event(sdata->local->hw.wiphy, + &sdata->vif.bss_conf.chandef, GFP_ATOMIC); +} + +static bool +ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *elems, bool beacon) +{ + struct cfg80211_csa_settings params; + struct ieee80211_csa_ie csa_ie; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_supported_band *sband; + int err; + ieee80211_conn_flags_t conn_flags = 0; + u32 vht_cap_info = 0; + + sdata_assert_lock(sdata); + + sband = ieee80211_get_sband(sdata); + if (!sband) + return false; + + switch (sdata->vif.bss_conf.chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + conn_flags |= IEEE80211_CONN_DISABLE_HT; + fallthrough; + case NL80211_CHAN_WIDTH_20: + conn_flags |= IEEE80211_CONN_DISABLE_40MHZ; + fallthrough; + case NL80211_CHAN_WIDTH_40: + conn_flags |= IEEE80211_CONN_DISABLE_VHT; + break; + default: + break; + } + + if (elems->vht_cap_elem) + vht_cap_info = + le32_to_cpu(elems->vht_cap_elem->vht_cap_info); + + memset(¶ms, 0, sizeof(params)); + err = ieee80211_parse_ch_switch_ie(sdata, elems, sband->band, + vht_cap_info, + conn_flags, sdata->vif.addr, + &csa_ie); + if (err < 0) + return false; + if (err) + return false; + + /* Mark the channel unavailable if the reason for the switch is + * regulatory. + */ + if (csa_ie.reason_code == WLAN_REASON_MESH_CHAN_REGULATORY) + ieee80211_mesh_csa_mark_radar(sdata); + + params.chandef = csa_ie.chandef; + params.count = csa_ie.count; + + if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, ¶ms.chandef, + IEEE80211_CHAN_DISABLED) || + !cfg80211_reg_can_beacon(sdata->local->hw.wiphy, ¶ms.chandef, + NL80211_IFTYPE_MESH_POINT)) { + sdata_info(sdata, + "mesh STA %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), aborting\n", + sdata->vif.addr, + params.chandef.chan->center_freq, + params.chandef.width, + params.chandef.center_freq1, + params.chandef.center_freq2); + return false; + } + + err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, + ¶ms.chandef, + NL80211_IFTYPE_MESH_POINT); + if (err < 0) + return false; + if (err > 0 && !ifmsh->userspace_handles_dfs) { + sdata_info(sdata, + "mesh STA %pM switches to channel requiring DFS (%d MHz, width:%d, CF1/2: %d/%d MHz), aborting\n", + sdata->vif.addr, + params.chandef.chan->center_freq, + params.chandef.width, + params.chandef.center_freq1, + params.chandef.center_freq2); + return false; + } + + params.radar_required = err; + + if (cfg80211_chandef_identical(¶ms.chandef, + &sdata->vif.bss_conf.chandef)) { + mcsa_dbg(sdata, + "received csa with an identical chandef, ignoring\n"); + return true; + } + + mcsa_dbg(sdata, + "received channel switch announcement to go to channel %d MHz\n", + params.chandef.chan->center_freq); + + params.block_tx = csa_ie.mode & WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT; + if (beacon) { + ifmsh->chsw_ttl = csa_ie.ttl - 1; + if (ifmsh->pre_value >= csa_ie.pre_value) + return false; + ifmsh->pre_value = csa_ie.pre_value; + } + + if (ifmsh->chsw_ttl >= ifmsh->mshcfg.dot11MeshTTL) + return false; + + ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_REPEATER; + + if (ieee80211_channel_switch(sdata->local->hw.wiphy, sdata->dev, + ¶ms) < 0) + return false; + + return true; +} + +static void +ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct sk_buff *presp; + struct beacon_data *bcn; + struct ieee80211_mgmt *hdr; + struct ieee802_11_elems *elems; + size_t baselen; + u8 *pos; + + pos = mgmt->u.probe_req.variable; + baselen = (u8 *) pos - (u8 *) mgmt; + if (baselen > len) + return; + + elems = ieee802_11_parse_elems(pos, len - baselen, false, NULL); + if (!elems) + return; + + if (!elems->mesh_id) + goto free; + + /* 802.11-2012 10.1.4.3.2 */ + if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) && + !is_broadcast_ether_addr(mgmt->da)) || + elems->ssid_len != 0) + goto free; + + if (elems->mesh_id_len != 0 && + (elems->mesh_id_len != ifmsh->mesh_id_len || + memcmp(elems->mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len))) + goto free; + + rcu_read_lock(); + bcn = rcu_dereference(ifmsh->beacon); + + if (!bcn) + goto out; + + presp = dev_alloc_skb(local->tx_headroom + + bcn->head_len + bcn->tail_len); + if (!presp) + goto out; + + skb_reserve(presp, local->tx_headroom); + skb_put_data(presp, bcn->head, bcn->head_len); + skb_put_data(presp, bcn->tail, bcn->tail_len); + hdr = (struct ieee80211_mgmt *) presp->data; + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_RESP); + memcpy(hdr->da, mgmt->sa, ETH_ALEN); + IEEE80211_SKB_CB(presp)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + ieee80211_tx_skb(sdata, presp); +out: + rcu_read_unlock(); +free: + kfree(elems); +} + +static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, + u16 stype, + struct ieee80211_mgmt *mgmt, + size_t len, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee802_11_elems *elems; + struct ieee80211_channel *channel; + size_t baselen; + int freq; + enum nl80211_band band = rx_status->band; + + /* ignore ProbeResp to foreign address */ + if (stype == IEEE80211_STYPE_PROBE_RESP && + !ether_addr_equal(mgmt->da, sdata->vif.addr)) + return; + + baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; + if (baselen > len) + return; + + elems = ieee802_11_parse_elems(mgmt->u.probe_resp.variable, + len - baselen, + false, NULL); + if (!elems) + return; + + /* ignore non-mesh or secure / unsecure mismatch */ + if ((!elems->mesh_id || !elems->mesh_config) || + (elems->rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) || + (!elems->rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)) + goto free; + + if (elems->ds_params) + freq = ieee80211_channel_to_frequency(elems->ds_params[0], band); + else + freq = rx_status->freq; + + channel = ieee80211_get_channel(local->hw.wiphy, freq); + + if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) + goto free; + + if (mesh_matches_local(sdata, elems)) { + mpl_dbg(sdata, "rssi_threshold=%d,rx_status->signal=%d\n", + sdata->u.mesh.mshcfg.rssi_threshold, rx_status->signal); + if (!sdata->u.mesh.user_mpm || + sdata->u.mesh.mshcfg.rssi_threshold == 0 || + sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal) + mesh_neighbour_update(sdata, mgmt->sa, elems, + rx_status); + + if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT && + !sdata->vif.bss_conf.csa_active) + ieee80211_mesh_process_chnswitch(sdata, elems, true); + } + + if (ifmsh->sync_ops) + ifmsh->sync_ops->rx_bcn_presp(sdata, stype, mgmt, len, + elems->mesh_config, rx_status); +free: + kfree(elems); +} + +int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct mesh_csa_settings *tmp_csa_settings; + int ret = 0; + + /* Reset the TTL value and Initiator flag */ + ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE; + ifmsh->chsw_ttl = 0; + + /* Remove the CSA and MCSP elements from the beacon */ + tmp_csa_settings = sdata_dereference(ifmsh->csa, sdata); + RCU_INIT_POINTER(ifmsh->csa, NULL); + if (tmp_csa_settings) + kfree_rcu(tmp_csa_settings, rcu_head); + ret = ieee80211_mesh_rebuild_beacon(sdata); + if (ret) + return -EINVAL; + + *changed |= BSS_CHANGED_BEACON; + + mcsa_dbg(sdata, "complete switching to center freq %d MHz", + sdata->vif.bss_conf.chandef.chan->center_freq); + return 0; +} + +int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata, + struct cfg80211_csa_settings *csa_settings, + u64 *changed) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct mesh_csa_settings *tmp_csa_settings; + int ret = 0; + + lockdep_assert_held(&sdata->wdev.mtx); + + tmp_csa_settings = kmalloc(sizeof(*tmp_csa_settings), + GFP_ATOMIC); + if (!tmp_csa_settings) + return -ENOMEM; + + memcpy(&tmp_csa_settings->settings, csa_settings, + sizeof(struct cfg80211_csa_settings)); + + rcu_assign_pointer(ifmsh->csa, tmp_csa_settings); + + ret = ieee80211_mesh_rebuild_beacon(sdata); + if (ret) { + tmp_csa_settings = rcu_dereference(ifmsh->csa); + RCU_INIT_POINTER(ifmsh->csa, NULL); + kfree_rcu(tmp_csa_settings, rcu_head); + return ret; + } + + *changed |= BSS_CHANGED_BEACON; + return 0; +} + +static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee802_11_elems *elems) +{ + struct ieee80211_mgmt *mgmt_fwd; + struct sk_buff *skb; + struct ieee80211_local *local = sdata->local; + + skb = dev_alloc_skb(local->tx_headroom + len); + if (!skb) + return -ENOMEM; + skb_reserve(skb, local->tx_headroom); + mgmt_fwd = skb_put(skb, len); + + elems->mesh_chansw_params_ie->mesh_ttl--; + elems->mesh_chansw_params_ie->mesh_flags &= + ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; + + memcpy(mgmt_fwd, mgmt, len); + eth_broadcast_addr(mgmt_fwd->da); + memcpy(mgmt_fwd->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt_fwd->bssid, sdata->vif.addr, ETH_ALEN); + + ieee80211_tx_skb(sdata, skb); + return 0; +} + +static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee802_11_elems *elems; + u16 pre_value; + bool fwd_csa = true; + size_t baselen; + u8 *pos; + + if (mgmt->u.action.u.measurement.action_code != + WLAN_ACTION_SPCT_CHL_SWITCH) + return; + + pos = mgmt->u.action.u.chan_switch.variable; + baselen = offsetof(struct ieee80211_mgmt, + u.action.u.chan_switch.variable); + elems = ieee802_11_parse_elems(pos, len - baselen, true, NULL); + if (!elems) + return; + + if (!mesh_matches_local(sdata, elems)) + goto free; + + ifmsh->chsw_ttl = elems->mesh_chansw_params_ie->mesh_ttl; + if (!--ifmsh->chsw_ttl) + fwd_csa = false; + + pre_value = le16_to_cpu(elems->mesh_chansw_params_ie->mesh_pre_value); + if (ifmsh->pre_value >= pre_value) + goto free; + + ifmsh->pre_value = pre_value; + + if (!sdata->vif.bss_conf.csa_active && + !ieee80211_mesh_process_chnswitch(sdata, elems, false)) { + mcsa_dbg(sdata, "Failed to process CSA action frame"); + goto free; + } + + /* forward or re-broadcast the CSA frame */ + if (fwd_csa) { + if (mesh_fwd_csa_frame(sdata, mgmt, len, elems) < 0) + mcsa_dbg(sdata, "Failed to forward the CSA frame"); + } +free: + kfree(elems); +} + +static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len, + struct ieee80211_rx_status *rx_status) +{ + switch (mgmt->u.action.category) { + case WLAN_CATEGORY_SELF_PROTECTED: + switch (mgmt->u.action.u.self_prot.action_code) { + case WLAN_SP_MESH_PEERING_OPEN: + case WLAN_SP_MESH_PEERING_CLOSE: + case WLAN_SP_MESH_PEERING_CONFIRM: + mesh_rx_plink_frame(sdata, mgmt, len, rx_status); + break; + } + break; + case WLAN_CATEGORY_MESH_ACTION: + if (mesh_action_is_path_sel(mgmt)) + mesh_rx_path_sel_frame(sdata, mgmt, len); + break; + case WLAN_CATEGORY_SPECTRUM_MGMT: + mesh_rx_csa_frame(sdata, mgmt, len); + break; + } +} + +void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_rx_status *rx_status; + struct ieee80211_mgmt *mgmt; + u16 stype; + + sdata_lock(sdata); + + /* mesh already went down */ + if (!sdata->u.mesh.mesh_id_len) + goto out; + + rx_status = IEEE80211_SKB_RXCB(skb); + mgmt = (struct ieee80211_mgmt *) skb->data; + stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; + + switch (stype) { + case IEEE80211_STYPE_PROBE_RESP: + case IEEE80211_STYPE_BEACON: + ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len, + rx_status); + break; + case IEEE80211_STYPE_PROBE_REQ: + ieee80211_mesh_rx_probe_req(sdata, mgmt, skb->len); + break; + case IEEE80211_STYPE_ACTION: + ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); + break; + } +out: + sdata_unlock(sdata); +} + +static void mesh_bss_info_changed(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + u32 bit; + u64 changed = 0; + + for_each_set_bit(bit, ifmsh->mbss_changed, + sizeof(changed) * BITS_PER_BYTE) { + clear_bit(bit, ifmsh->mbss_changed); + changed |= BIT(bit); + } + + if (sdata->vif.bss_conf.enable_beacon && + (changed & (BSS_CHANGED_BEACON | + BSS_CHANGED_HT | + BSS_CHANGED_BASIC_RATES | + BSS_CHANGED_BEACON_INT))) + if (ieee80211_mesh_rebuild_beacon(sdata)) + return; + + ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed); +} + +void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + sdata_lock(sdata); + + /* mesh already went down */ + if (!sdata->u.mesh.mesh_id_len) + goto out; + + if (ifmsh->preq_queue_len && + time_after(jiffies, + ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) + mesh_path_start_discovery(sdata); + + if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) + ieee80211_mesh_housekeeping(sdata); + + if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags)) + ieee80211_mesh_rootpath(sdata); + + if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags)) + mesh_sync_adjust_tsf(sdata); + + if (test_and_clear_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags)) + mesh_bss_info_changed(sdata); +out: + sdata_unlock(sdata); +} + + +void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + static u8 zero_addr[ETH_ALEN] = {}; + + timer_setup(&ifmsh->housekeeping_timer, + ieee80211_mesh_housekeeping_timer, 0); + + ifmsh->accepting_plinks = true; + atomic_set(&ifmsh->mpaths, 0); + mesh_rmc_init(sdata); + ifmsh->last_preq = jiffies; + ifmsh->next_perr = jiffies; + ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE; + /* Allocate all mesh structures when creating the first mesh interface. */ + if (!mesh_allocated) + ieee80211s_init(); + + mesh_pathtbl_init(sdata); + + timer_setup(&ifmsh->mesh_path_timer, ieee80211_mesh_path_timer, 0); + timer_setup(&ifmsh->mesh_path_root_timer, + ieee80211_mesh_path_root_timer, 0); + INIT_LIST_HEAD(&ifmsh->preq_queue.list); + skb_queue_head_init(&ifmsh->ps.bc_buf); + spin_lock_init(&ifmsh->mesh_preq_queue_lock); + spin_lock_init(&ifmsh->sync_offset_lock); + RCU_INIT_POINTER(ifmsh->beacon, NULL); + + sdata->vif.bss_conf.bssid = zero_addr; +} + +void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata) +{ + mesh_rmc_free(sdata); + mesh_pathtbl_unregister(sdata); +} diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h new file mode 100644 index 0000000000..ad8469293d --- /dev/null +++ b/net/mac80211/mesh.h @@ -0,0 +1,398 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2008, 2009 open80211s Ltd. + * Copyright (C) 2023 Intel Corporation + * Authors: Luis Carlos Cobo <luisca@cozybit.com> + * Javier Cardona <javier@cozybit.com> + */ + +#ifndef IEEE80211S_H +#define IEEE80211S_H + +#include <linux/types.h> +#include <linux/jhash.h> +#include "ieee80211_i.h" + + +/* Data structures */ + +/** + * enum mesh_path_flags - mac80211 mesh path flags + * + * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding + * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path + * @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence + * number + * @MESH_PATH_FIXED: the mesh path has been manually set and should not be + * modified + * @MESH_PATH_RESOLVED: the mesh path can has been resolved + * @MESH_PATH_REQ_QUEUED: there is an unsent path request for this destination + * already queued up, waiting for the discovery process to start. + * @MESH_PATH_DELETED: the mesh path has been deleted and should no longer + * be used + * + * MESH_PATH_RESOLVED is used by the mesh path timer to + * decide when to stop or cancel the mesh path discovery. + */ +enum mesh_path_flags { + MESH_PATH_ACTIVE = BIT(0), + MESH_PATH_RESOLVING = BIT(1), + MESH_PATH_SN_VALID = BIT(2), + MESH_PATH_FIXED = BIT(3), + MESH_PATH_RESOLVED = BIT(4), + MESH_PATH_REQ_QUEUED = BIT(5), + MESH_PATH_DELETED = BIT(6), +}; + +/** + * enum mesh_deferred_task_flags - mac80211 mesh deferred tasks + * + * + * + * @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks + * @MESH_WORK_ROOT: the mesh root station needs to send a frame + * @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other + * mesh nodes + * @MESH_WORK_MBSS_CHANGED: rebuild beacon and notify driver of BSS changes + */ +enum mesh_deferred_task_flags { + MESH_WORK_HOUSEKEEPING, + MESH_WORK_ROOT, + MESH_WORK_DRIFT_ADJUST, + MESH_WORK_MBSS_CHANGED, +}; + +/** + * struct mesh_path - mac80211 mesh path structure + * + * @dst: mesh path destination mac address + * @mpp: mesh proxy mac address + * @rhash: rhashtable list pointer + * @walk_list: linked list containing all mesh_path objects. + * @gate_list: list pointer for known gates list + * @sdata: mesh subif + * @next_hop: mesh neighbor to which frames for this destination will be + * forwarded + * @timer: mesh path discovery timer + * @frame_queue: pending queue for frames sent to this destination while the + * path is unresolved + * @rcu: rcu head for freeing mesh path + * @sn: target sequence number + * @metric: current metric to this destination + * @hop_count: hops to destination + * @exp_time: in jiffies, when the path will expire or when it expired + * @discovery_timeout: timeout (lapse in jiffies) used for the last discovery + * retry + * @discovery_retries: number of discovery retries + * @flags: mesh path flags, as specified on &enum mesh_path_flags + * @state_lock: mesh path state lock used to protect changes to the + * mpath itself. No need to take this lock when adding or removing + * an mpath to a hash bucket on a path table. + * @rann_snd_addr: the RANN sender address + * @rann_metric: the aggregated path metric towards the root node + * @last_preq_to_root: Timestamp of last PREQ sent to root + * @is_root: the destination station of this path is a root node + * @is_gate: the destination station of this path is a mesh gate + * @path_change_count: the number of path changes to destination + * + * + * The dst address is unique in the mesh path table. Since the mesh_path is + * protected by RCU, deleting the next_hop STA must remove / substitute the + * mesh_path structure and wait until that is no longer reachable before + * destroying the STA completely. + */ +struct mesh_path { + u8 dst[ETH_ALEN]; + u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ + struct rhash_head rhash; + struct hlist_node walk_list; + struct hlist_node gate_list; + struct ieee80211_sub_if_data *sdata; + struct sta_info __rcu *next_hop; + struct timer_list timer; + struct sk_buff_head frame_queue; + struct rcu_head rcu; + u32 sn; + u32 metric; + u8 hop_count; + unsigned long exp_time; + u32 discovery_timeout; + u8 discovery_retries; + enum mesh_path_flags flags; + spinlock_t state_lock; + u8 rann_snd_addr[ETH_ALEN]; + u32 rann_metric; + unsigned long last_preq_to_root; + unsigned long fast_tx_check; + bool is_root; + bool is_gate; + u32 path_change_count; +}; + +#define MESH_FAST_TX_CACHE_MAX_SIZE 512 +#define MESH_FAST_TX_CACHE_THRESHOLD_SIZE 384 +#define MESH_FAST_TX_CACHE_TIMEOUT 8000 /* msecs */ + +/** + * struct ieee80211_mesh_fast_tx - cached mesh fast tx entry + * @rhash: rhashtable pointer + * @addr_key: The Ethernet DA which is the key for this entry + * @fast_tx: base fast_tx data + * @hdr: cached mesh and rfc1042 headers + * @hdrlen: length of mesh + rfc1042 + * @walk_list: list containing all the fast tx entries + * @mpath: mesh path corresponding to the Mesh DA + * @mppath: MPP entry corresponding to this DA + * @timestamp: Last used time of this entry + */ +struct ieee80211_mesh_fast_tx { + struct rhash_head rhash; + u8 addr_key[ETH_ALEN] __aligned(2); + + struct ieee80211_fast_tx fast_tx; + u8 hdr[sizeof(struct ieee80211s_hdr) + sizeof(rfc1042_header)]; + u16 hdrlen; + + struct mesh_path *mpath, *mppath; + struct hlist_node walk_list; + unsigned long timestamp; +}; + +/* Recent multicast cache */ +/* RMC_BUCKETS must be a power of 2, maximum 256 */ +#define RMC_BUCKETS 256 +#define RMC_QUEUE_MAX_LEN 4 +#define RMC_TIMEOUT (3 * HZ) + +/** + * struct rmc_entry - entry in the Recent Multicast Cache + * + * @seqnum: mesh sequence number of the frame + * @exp_time: expiration time of the entry, in jiffies + * @sa: source address of the frame + * @list: hashtable list pointer + * + * The Recent Multicast Cache keeps track of the latest multicast frames that + * have been received by a mesh interface and discards received multicast frames + * that are found in the cache. + */ +struct rmc_entry { + struct hlist_node list; + unsigned long exp_time; + u32 seqnum; + u8 sa[ETH_ALEN]; +}; + +struct mesh_rmc { + struct hlist_head bucket[RMC_BUCKETS]; + u32 idx_mask; +}; + +#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) + +#define MESH_PATH_EXPIRE (600 * HZ) + +/* Default maximum number of plinks per interface */ +#define MESH_MAX_PLINKS 256 + +/* Maximum number of paths per interface */ +#define MESH_MAX_MPATHS 1024 + +/* Number of frames buffered per destination for unresolved destinations */ +#define MESH_FRAME_QUEUE_LEN 10 + +/* Public interfaces */ +/* Various */ +int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, + const u8 *da, const u8 *sa); +unsigned int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata, + struct ieee80211s_hdr *meshhdr, + const char *addr4or5, const char *addr6); +int mesh_rmc_check(struct ieee80211_sub_if_data *sdata, + const u8 *addr, struct ieee80211s_hdr *mesh_hdr); +bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *ie); +int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +int mesh_add_meshid_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +int mesh_add_rsn_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +int mesh_add_he_cap_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u8 ie_len); +int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +int mesh_add_he_6ghz_cap_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +int mesh_add_eht_cap_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u8 ie_len); +int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); +int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); +void ieee80211s_init(void); +void ieee80211s_update_metric(struct ieee80211_local *local, + struct sta_info *sta, + struct ieee80211_tx_status *st); +void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); +void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata); +int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); +void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); +void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); +const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method); +/* wrapper for ieee80211_bss_info_change_notify() */ +void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata, + u64 changed); + +/* mesh power save */ +u64 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata); +u64 ieee80211_mps_set_sta_local_pm(struct sta_info *sta, + enum nl80211_mesh_power_mode pm); +void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_hdr *hdr); +void ieee80211_mps_sta_status_update(struct sta_info *sta); +void ieee80211_mps_rx_h_sta_process(struct sta_info *sta, + struct ieee80211_hdr *hdr); +void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta, + bool tx, bool acked); +void ieee80211_mps_frame_release(struct sta_info *sta, + struct ieee802_11_elems *elems); + +/* Mesh paths */ +int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata); +struct mesh_path *mesh_path_lookup(struct ieee80211_sub_if_data *sdata, + const u8 *dst); +struct mesh_path *mpp_path_lookup(struct ieee80211_sub_if_data *sdata, + const u8 *dst); +int mpp_path_add(struct ieee80211_sub_if_data *sdata, + const u8 *dst, const u8 *mpp); +struct mesh_path * +mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx); +struct mesh_path * +mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx); +void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); +void mesh_path_expire(struct ieee80211_sub_if_data *sdata); +void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len); +struct mesh_path * +mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst); + +int mesh_path_add_gate(struct mesh_path *mpath); +int mesh_path_send_to_gates(struct mesh_path *mpath); +int mesh_gate_num(struct ieee80211_sub_if_data *sdata); +u32 airtime_link_metric_get(struct ieee80211_local *local, + struct sta_info *sta); + +/* Mesh plinks */ +void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, + u8 *hw_addr, struct ieee802_11_elems *ie, + struct ieee80211_rx_status *rx_status); +bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); +u64 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); +void mesh_plink_timer(struct timer_list *t); +void mesh_plink_broken(struct sta_info *sta); +u64 mesh_plink_deactivate(struct sta_info *sta); +u64 mesh_plink_open(struct sta_info *sta); +u64 mesh_plink_block(struct sta_info *sta); +void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee80211_rx_status *rx_status); +void mesh_sta_cleanup(struct sta_info *sta); + +/* Private interfaces */ +/* Mesh paths */ +int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, + u8 ttl, const u8 *target, u32 target_sn, + u16 target_rcode, const u8 *ra); +void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); +void mesh_path_flush_pending(struct mesh_path *mpath); +void mesh_path_tx_pending(struct mesh_path *mpath); +void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata); +void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata); +int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr); +void mesh_path_timer(struct timer_list *t); +void mesh_path_flush_by_nexthop(struct sta_info *sta); +void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); + +bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt); +struct ieee80211_mesh_fast_tx * +mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, const u8 *addr); +bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u32 ctrl_flags); +void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, struct mesh_path *mpath); +void mesh_fast_tx_gc(struct ieee80211_sub_if_data *sdata); +void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata, + const u8 *addr); +void mesh_fast_tx_flush_mpath(struct mesh_path *mpath); +void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta); +void mesh_path_refresh(struct ieee80211_sub_if_data *sdata, + struct mesh_path *mpath, const u8 *addr); + +#ifdef CONFIG_MAC80211_MESH +static inline +u64 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) +{ + atomic_inc(&sdata->u.mesh.estab_plinks); + return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON; +} + +static inline +u64 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) +{ + atomic_dec(&sdata->u.mesh.estab_plinks); + return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON; +} + +static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) +{ + return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks - + atomic_read(&sdata->u.mesh.estab_plinks); +} + +static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata) +{ + return (min_t(long, mesh_plink_free_count(sdata), + MESH_MAX_PLINKS - sdata->local->num_sta)) > 0; +} + +static inline void mesh_path_activate(struct mesh_path *mpath) +{ + mpath->flags |= MESH_PATH_ACTIVE | MESH_PATH_RESOLVED; +} + +static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) +{ + return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP; +} + +void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata); +void mesh_sync_adjust_tsf(struct ieee80211_sub_if_data *sdata); +void ieee80211s_stop(void); +#else +static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) +{ return false; } +static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) +{} +static inline void ieee80211s_stop(void) {} +#endif + +#endif /* IEEE80211S_H */ diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c new file mode 100644 index 0000000000..5136907298 --- /dev/null +++ b/net/mac80211/mesh_hwmp.c @@ -0,0 +1,1351 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2008, 2009 open80211s Ltd. + * Copyright (C) 2019, 2021-2023 Intel Corporation + * Author: Luis Carlos Cobo <luisca@cozybit.com> + */ + +#include <linux/slab.h> +#include <linux/etherdevice.h> +#include <asm/unaligned.h> +#include "wme.h" +#include "mesh.h" + +#define TEST_FRAME_LEN 8192 +#define MAX_METRIC 0xffffffff +#define ARITH_SHIFT 8 +#define LINK_FAIL_THRESH 95 + +#define MAX_PREQ_QUEUE_LEN 64 + +static void mesh_queue_preq(struct mesh_path *, u8); + +static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae) +{ + if (ae) + offset += 6; + return get_unaligned_le32(preq_elem + offset); +} + +static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae) +{ + if (ae) + offset += 6; + return get_unaligned_le16(preq_elem + offset); +} + +/* HWMP IE processing macros */ +#define AE_F (1<<6) +#define AE_F_SET(x) (*x & AE_F) +#define PREQ_IE_FLAGS(x) (*(x)) +#define PREQ_IE_HOPCOUNT(x) (*(x + 1)) +#define PREQ_IE_TTL(x) (*(x + 2)) +#define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0) +#define PREQ_IE_ORIG_ADDR(x) (x + 7) +#define PREQ_IE_ORIG_SN(x) u32_field_get(x, 13, 0) +#define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x)) +#define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x)) +#define PREQ_IE_TARGET_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26)) +#define PREQ_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27) +#define PREQ_IE_TARGET_SN(x) u32_field_get(x, 33, AE_F_SET(x)) + + +#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x) +#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x) +#define PREP_IE_TTL(x) PREQ_IE_TTL(x) +#define PREP_IE_ORIG_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21) +#define PREP_IE_ORIG_SN(x) u32_field_get(x, 27, AE_F_SET(x)) +#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x)) +#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x)) +#define PREP_IE_TARGET_ADDR(x) (x + 3) +#define PREP_IE_TARGET_SN(x) u32_field_get(x, 9, 0) + +#define PERR_IE_TTL(x) (*(x)) +#define PERR_IE_TARGET_FLAGS(x) (*(x + 2)) +#define PERR_IE_TARGET_ADDR(x) (x + 3) +#define PERR_IE_TARGET_SN(x) u32_field_get(x, 9, 0) +#define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0) + +#define MSEC_TO_TU(x) (x*1000/1024) +#define SN_GT(x, y) ((s32)(y - x) < 0) +#define SN_LT(x, y) ((s32)(x - y) < 0) +#define MAX_SANE_SN_DELTA 32 + +static inline u32 SN_DELTA(u32 x, u32 y) +{ + return x >= y ? x - y : y - x; +} + +#define net_traversal_jiffies(s) \ + msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) +#define default_lifetime(s) \ + MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout) +#define min_preq_int_jiff(s) \ + (msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval)) +#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries) +#define disc_timeout_jiff(s) \ + msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout) +#define root_path_confirmation_jiffies(s) \ + msecs_to_jiffies(sdata->u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval) + +enum mpath_frame_type { + MPATH_PREQ = 0, + MPATH_PREP, + MPATH_PERR, + MPATH_RANN +}; + +static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + +static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, + const u8 *orig_addr, u32 orig_sn, + u8 target_flags, const u8 *target, + u32 target_sn, const u8 *da, + u8 hop_count, u8 ttl, + u32 lifetime, u32 metric, u32 preq_id, + struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + u8 *pos, ie_len; + int hdr_len = offsetofend(struct ieee80211_mgmt, + u.action.u.mesh_action); + + skb = dev_alloc_skb(local->tx_headroom + + hdr_len + + 2 + 37); /* max HWMP IE */ + if (!skb) + return -1; + skb_reserve(skb, local->tx_headroom); + mgmt = skb_put_zero(skb, hdr_len); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + /* BSSID == SA */ + memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); + mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION; + mgmt->u.action.u.mesh_action.action_code = + WLAN_MESH_ACTION_HWMP_PATH_SELECTION; + + switch (action) { + case MPATH_PREQ: + mhwmp_dbg(sdata, "sending PREQ to %pM\n", target); + ie_len = 37; + pos = skb_put(skb, 2 + ie_len); + *pos++ = WLAN_EID_PREQ; + break; + case MPATH_PREP: + mhwmp_dbg(sdata, "sending PREP to %pM\n", orig_addr); + ie_len = 31; + pos = skb_put(skb, 2 + ie_len); + *pos++ = WLAN_EID_PREP; + break; + case MPATH_RANN: + mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr); + ie_len = sizeof(struct ieee80211_rann_ie); + pos = skb_put(skb, 2 + ie_len); + *pos++ = WLAN_EID_RANN; + break; + default: + kfree_skb(skb); + return -ENOTSUPP; + } + *pos++ = ie_len; + *pos++ = flags; + *pos++ = hop_count; + *pos++ = ttl; + if (action == MPATH_PREP) { + memcpy(pos, target, ETH_ALEN); + pos += ETH_ALEN; + put_unaligned_le32(target_sn, pos); + pos += 4; + } else { + if (action == MPATH_PREQ) { + put_unaligned_le32(preq_id, pos); + pos += 4; + } + memcpy(pos, orig_addr, ETH_ALEN); + pos += ETH_ALEN; + put_unaligned_le32(orig_sn, pos); + pos += 4; + } + put_unaligned_le32(lifetime, pos); /* interval for RANN */ + pos += 4; + put_unaligned_le32(metric, pos); + pos += 4; + if (action == MPATH_PREQ) { + *pos++ = 1; /* destination count */ + *pos++ = target_flags; + memcpy(pos, target, ETH_ALEN); + pos += ETH_ALEN; + put_unaligned_le32(target_sn, pos); + pos += 4; + } else if (action == MPATH_PREP) { + memcpy(pos, orig_addr, ETH_ALEN); + pos += ETH_ALEN; + put_unaligned_le32(orig_sn, pos); + pos += 4; + } + + ieee80211_tx_skb(sdata, skb); + return 0; +} + + +/* Headroom is not adjusted. Caller should ensure that skb has sufficient + * headroom in case the frame is encrypted. */ +static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + + /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */ + skb_set_queue_mapping(skb, IEEE80211_AC_VO); + skb->priority = 7; + + info->control.vif = &sdata->vif; + info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; + ieee80211_set_qos_hdr(sdata, skb); + ieee80211_mps_set_frame_flags(sdata, NULL, hdr); +} + +/** + * mesh_path_error_tx - Sends a PERR mesh management frame + * + * @ttl: allowed remaining hops + * @target: broken destination + * @target_sn: SN of the broken destination + * @target_rcode: reason code for this PERR + * @ra: node this frame is addressed to + * @sdata: local mesh subif + * + * Note: This function may be called with driver locks taken that the driver + * also acquires in the TX path. To avoid a deadlock we don't transmit the + * frame directly but add it to the pending queue instead. + */ +int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, + u8 ttl, const u8 *target, u32 target_sn, + u16 target_rcode, const u8 *ra) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_mgmt *mgmt; + u8 *pos, ie_len; + int hdr_len = offsetofend(struct ieee80211_mgmt, + u.action.u.mesh_action); + + if (time_before(jiffies, ifmsh->next_perr)) + return -EAGAIN; + + skb = dev_alloc_skb(local->tx_headroom + + IEEE80211_ENCRYPT_HEADROOM + + IEEE80211_ENCRYPT_TAILROOM + + hdr_len + + 2 + 15 /* PERR IE */); + if (!skb) + return -1; + skb_reserve(skb, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM); + mgmt = skb_put_zero(skb, hdr_len); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + + memcpy(mgmt->da, ra, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + /* BSSID == SA */ + memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); + mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION; + mgmt->u.action.u.mesh_action.action_code = + WLAN_MESH_ACTION_HWMP_PATH_SELECTION; + ie_len = 15; + pos = skb_put(skb, 2 + ie_len); + *pos++ = WLAN_EID_PERR; + *pos++ = ie_len; + /* ttl */ + *pos++ = ttl; + /* number of destinations */ + *pos++ = 1; + /* Flags field has AE bit only as defined in + * sec 8.4.2.117 IEEE802.11-2012 + */ + *pos = 0; + pos++; + memcpy(pos, target, ETH_ALEN); + pos += ETH_ALEN; + put_unaligned_le32(target_sn, pos); + pos += 4; + put_unaligned_le16(target_rcode, pos); + + /* see note in function header */ + prepare_frame_for_deferred_tx(sdata, skb); + ifmsh->next_perr = TU_TO_EXP_TIME( + ifmsh->mshcfg.dot11MeshHWMPperrMinInterval); + ieee80211_add_pending_skb(local, skb); + return 0; +} + +void ieee80211s_update_metric(struct ieee80211_local *local, + struct sta_info *sta, + struct ieee80211_tx_status *st) +{ + struct ieee80211_tx_info *txinfo = st->info; + int failed; + struct rate_info rinfo; + + failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK); + + /* moving average, scaled to 100. + * feed failure as 100 and success as 0 + */ + ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, failed * 100); + if (ewma_mesh_fail_avg_read(&sta->mesh->fail_avg) > + LINK_FAIL_THRESH) + mesh_plink_broken(sta); + + /* use rate info set by the driver directly if present */ + if (st->n_rates) + rinfo = sta->deflink.tx_stats.last_rate_info; + else + sta_set_rate_info_tx(sta, &sta->deflink.tx_stats.last_rate, &rinfo); + + ewma_mesh_tx_rate_avg_add(&sta->mesh->tx_rate_avg, + cfg80211_calculate_bitrate(&rinfo)); +} + +u32 airtime_link_metric_get(struct ieee80211_local *local, + struct sta_info *sta) +{ + /* This should be adjusted for each device */ + int device_constant = 1 << ARITH_SHIFT; + int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT; + int s_unit = 1 << ARITH_SHIFT; + int rate, err; + u32 tx_time, estimated_retx; + u64 result; + unsigned long fail_avg = + ewma_mesh_fail_avg_read(&sta->mesh->fail_avg); + + if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) + return MAX_METRIC; + + /* Try to get rate based on HW/SW RC algorithm. + * Rate is returned in units of Kbps, correct this + * to comply with airtime calculation units + * Round up in case we get rate < 100Kbps + */ + rate = DIV_ROUND_UP(sta_get_expected_throughput(sta), 100); + + if (rate) { + err = 0; + } else { + if (fail_avg > LINK_FAIL_THRESH) + return MAX_METRIC; + + rate = ewma_mesh_tx_rate_avg_read(&sta->mesh->tx_rate_avg); + if (WARN_ON(!rate)) + return MAX_METRIC; + + err = (fail_avg << ARITH_SHIFT) / 100; + } + + /* bitrate is in units of 100 Kbps, while we need rate in units of + * 1Mbps. This will be corrected on tx_time computation. + */ + tx_time = (device_constant + 10 * test_frame_len / rate); + estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); + result = ((u64)tx_time * estimated_retx) >> (2 * ARITH_SHIFT); + return (u32)result; +} + +/** + * hwmp_route_info_get - Update routing info to originator and transmitter + * + * @sdata: local mesh subif + * @mgmt: mesh management frame + * @hwmp_ie: hwmp information element (PREP or PREQ) + * @action: type of hwmp ie + * + * This function updates the path routing information to the originator and the + * transmitter of a HWMP PREQ or PREP frame. + * + * Returns: metric to frame originator or 0 if the frame should not be further + * processed + * + * Notes: this function is the only place (besides user-provided info) where + * path routing information is updated. + */ +static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + const u8 *hwmp_ie, enum mpath_frame_type action) +{ + struct ieee80211_local *local = sdata->local; + struct mesh_path *mpath; + struct sta_info *sta; + bool fresh_info; + const u8 *orig_addr, *ta; + u32 orig_sn, orig_metric; + unsigned long orig_lifetime, exp_time; + u32 last_hop_metric, new_metric; + bool flush_mpath = false; + bool process = true; + u8 hopcount; + + rcu_read_lock(); + sta = sta_info_get(sdata, mgmt->sa); + if (!sta) { + rcu_read_unlock(); + return 0; + } + + last_hop_metric = airtime_link_metric_get(local, sta); + /* Update and check originator routing info */ + fresh_info = true; + + switch (action) { + case MPATH_PREQ: + orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie); + orig_sn = PREQ_IE_ORIG_SN(hwmp_ie); + orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie); + orig_metric = PREQ_IE_METRIC(hwmp_ie); + hopcount = PREQ_IE_HOPCOUNT(hwmp_ie) + 1; + break; + case MPATH_PREP: + /* Originator here refers to the MP that was the target in the + * Path Request. We divert from the nomenclature in the draft + * so that we can easily use a single function to gather path + * information from both PREQ and PREP frames. + */ + orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie); + orig_sn = PREP_IE_TARGET_SN(hwmp_ie); + orig_lifetime = PREP_IE_LIFETIME(hwmp_ie); + orig_metric = PREP_IE_METRIC(hwmp_ie); + hopcount = PREP_IE_HOPCOUNT(hwmp_ie) + 1; + break; + default: + rcu_read_unlock(); + return 0; + } + new_metric = orig_metric + last_hop_metric; + if (new_metric < orig_metric) + new_metric = MAX_METRIC; + exp_time = TU_TO_EXP_TIME(orig_lifetime); + + if (ether_addr_equal(orig_addr, sdata->vif.addr)) { + /* This MP is the originator, we are not interested in this + * frame, except for updating transmitter's path info. + */ + process = false; + fresh_info = false; + } else { + mpath = mesh_path_lookup(sdata, orig_addr); + if (mpath) { + spin_lock_bh(&mpath->state_lock); + if (mpath->flags & MESH_PATH_FIXED) + fresh_info = false; + else if ((mpath->flags & MESH_PATH_ACTIVE) && + (mpath->flags & MESH_PATH_SN_VALID)) { + if (SN_GT(mpath->sn, orig_sn) || + (mpath->sn == orig_sn && + (rcu_access_pointer(mpath->next_hop) != + sta ? + mult_frac(new_metric, 10, 9) : + new_metric) >= mpath->metric)) { + process = false; + fresh_info = false; + } + } else if (!(mpath->flags & MESH_PATH_ACTIVE)) { + bool have_sn, newer_sn, bounced; + + have_sn = mpath->flags & MESH_PATH_SN_VALID; + newer_sn = have_sn && SN_GT(orig_sn, mpath->sn); + bounced = have_sn && + (SN_DELTA(orig_sn, mpath->sn) > + MAX_SANE_SN_DELTA); + + if (!have_sn || newer_sn) { + /* if SN is newer than what we had + * then we can take it */; + } else if (bounced) { + /* if SN is way different than what + * we had then assume the other side + * rebooted or restarted */; + } else { + process = false; + fresh_info = false; + } + } + } else { + mpath = mesh_path_add(sdata, orig_addr); + if (IS_ERR(mpath)) { + rcu_read_unlock(); + return 0; + } + spin_lock_bh(&mpath->state_lock); + } + + if (fresh_info) { + if (rcu_access_pointer(mpath->next_hop) != sta) { + mpath->path_change_count++; + flush_mpath = true; + } + mesh_path_assign_nexthop(mpath, sta); + mpath->flags |= MESH_PATH_SN_VALID; + mpath->metric = new_metric; + mpath->sn = orig_sn; + mpath->exp_time = time_after(mpath->exp_time, exp_time) + ? mpath->exp_time : exp_time; + mpath->hop_count = hopcount; + mesh_path_activate(mpath); + spin_unlock_bh(&mpath->state_lock); + if (flush_mpath) + mesh_fast_tx_flush_mpath(mpath); + ewma_mesh_fail_avg_init(&sta->mesh->fail_avg); + /* init it at a low value - 0 start is tricky */ + ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1); + mesh_path_tx_pending(mpath); + /* draft says preq_id should be saved to, but there does + * not seem to be any use for it, skipping by now + */ + } else + spin_unlock_bh(&mpath->state_lock); + } + + /* Update and check transmitter routing info */ + ta = mgmt->sa; + if (ether_addr_equal(orig_addr, ta)) + fresh_info = false; + else { + fresh_info = true; + + mpath = mesh_path_lookup(sdata, ta); + if (mpath) { + spin_lock_bh(&mpath->state_lock); + if ((mpath->flags & MESH_PATH_FIXED) || + ((mpath->flags & MESH_PATH_ACTIVE) && + ((rcu_access_pointer(mpath->next_hop) != sta ? + mult_frac(last_hop_metric, 10, 9) : + last_hop_metric) > mpath->metric))) + fresh_info = false; + } else { + mpath = mesh_path_add(sdata, ta); + if (IS_ERR(mpath)) { + rcu_read_unlock(); + return 0; + } + spin_lock_bh(&mpath->state_lock); + } + + if (fresh_info) { + if (rcu_access_pointer(mpath->next_hop) != sta) { + mpath->path_change_count++; + flush_mpath = true; + } + mesh_path_assign_nexthop(mpath, sta); + mpath->metric = last_hop_metric; + mpath->exp_time = time_after(mpath->exp_time, exp_time) + ? mpath->exp_time : exp_time; + mpath->hop_count = 1; + mesh_path_activate(mpath); + spin_unlock_bh(&mpath->state_lock); + if (flush_mpath) + mesh_fast_tx_flush_mpath(mpath); + ewma_mesh_fail_avg_init(&sta->mesh->fail_avg); + /* init it at a low value - 0 start is tricky */ + ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1); + mesh_path_tx_pending(mpath); + } else + spin_unlock_bh(&mpath->state_lock); + } + + rcu_read_unlock(); + + return process ? new_metric : 0; +} + +static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + const u8 *preq_elem, u32 orig_metric) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct mesh_path *mpath = NULL; + const u8 *target_addr, *orig_addr; + const u8 *da; + u8 target_flags, ttl, flags; + u32 orig_sn, target_sn, lifetime, target_metric = 0; + bool reply = false; + bool forward = true; + bool root_is_gate; + + /* Update target SN, if present */ + target_addr = PREQ_IE_TARGET_ADDR(preq_elem); + orig_addr = PREQ_IE_ORIG_ADDR(preq_elem); + target_sn = PREQ_IE_TARGET_SN(preq_elem); + orig_sn = PREQ_IE_ORIG_SN(preq_elem); + target_flags = PREQ_IE_TARGET_F(preq_elem); + /* Proactive PREQ gate announcements */ + flags = PREQ_IE_FLAGS(preq_elem); + root_is_gate = !!(flags & RANN_FLAG_IS_GATE); + + mhwmp_dbg(sdata, "received PREQ from %pM\n", orig_addr); + + if (ether_addr_equal(target_addr, sdata->vif.addr)) { + mhwmp_dbg(sdata, "PREQ is for us\n"); + forward = false; + reply = true; + target_metric = 0; + + if (SN_GT(target_sn, ifmsh->sn)) + ifmsh->sn = target_sn; + + if (time_after(jiffies, ifmsh->last_sn_update + + net_traversal_jiffies(sdata)) || + time_before(jiffies, ifmsh->last_sn_update)) { + ++ifmsh->sn; + ifmsh->last_sn_update = jiffies; + } + target_sn = ifmsh->sn; + } else if (is_broadcast_ether_addr(target_addr) && + (target_flags & IEEE80211_PREQ_TO_FLAG)) { + rcu_read_lock(); + mpath = mesh_path_lookup(sdata, orig_addr); + if (mpath) { + if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) { + reply = true; + target_addr = sdata->vif.addr; + target_sn = ++ifmsh->sn; + target_metric = 0; + ifmsh->last_sn_update = jiffies; + } + if (root_is_gate) + mesh_path_add_gate(mpath); + } + rcu_read_unlock(); + } else { + rcu_read_lock(); + mpath = mesh_path_lookup(sdata, target_addr); + if (mpath) { + if ((!(mpath->flags & MESH_PATH_SN_VALID)) || + SN_LT(mpath->sn, target_sn)) { + mpath->sn = target_sn; + mpath->flags |= MESH_PATH_SN_VALID; + } else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) && + (mpath->flags & MESH_PATH_ACTIVE)) { + reply = true; + target_metric = mpath->metric; + target_sn = mpath->sn; + /* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/ + target_flags |= IEEE80211_PREQ_TO_FLAG; + } + } + rcu_read_unlock(); + } + + if (reply) { + lifetime = PREQ_IE_LIFETIME(preq_elem); + ttl = ifmsh->mshcfg.element_ttl; + if (ttl != 0) { + mhwmp_dbg(sdata, "replying to the PREQ\n"); + mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr, + orig_sn, 0, target_addr, + target_sn, mgmt->sa, 0, ttl, + lifetime, target_metric, 0, + sdata); + } else { + ifmsh->mshstats.dropped_frames_ttl++; + } + } + + if (forward && ifmsh->mshcfg.dot11MeshForwarding) { + u32 preq_id; + u8 hopcount; + + ttl = PREQ_IE_TTL(preq_elem); + lifetime = PREQ_IE_LIFETIME(preq_elem); + if (ttl <= 1) { + ifmsh->mshstats.dropped_frames_ttl++; + return; + } + mhwmp_dbg(sdata, "forwarding the PREQ from %pM\n", orig_addr); + --ttl; + preq_id = PREQ_IE_PREQ_ID(preq_elem); + hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; + da = (mpath && mpath->is_root) ? + mpath->rann_snd_addr : broadcast_addr; + + if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) { + target_addr = PREQ_IE_TARGET_ADDR(preq_elem); + target_sn = PREQ_IE_TARGET_SN(preq_elem); + } + + mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, + orig_sn, target_flags, target_addr, + target_sn, da, hopcount, ttl, lifetime, + orig_metric, preq_id, sdata); + if (!is_multicast_ether_addr(da)) + ifmsh->mshstats.fwded_unicast++; + else + ifmsh->mshstats.fwded_mcast++; + ifmsh->mshstats.fwded_frames++; + } +} + + +static inline struct sta_info * +next_hop_deref_protected(struct mesh_path *mpath) +{ + return rcu_dereference_protected(mpath->next_hop, + lockdep_is_held(&mpath->state_lock)); +} + + +static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + const u8 *prep_elem, u32 metric) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct mesh_path *mpath; + const u8 *target_addr, *orig_addr; + u8 ttl, hopcount, flags; + u8 next_hop[ETH_ALEN]; + u32 target_sn, orig_sn, lifetime; + + mhwmp_dbg(sdata, "received PREP from %pM\n", + PREP_IE_TARGET_ADDR(prep_elem)); + + orig_addr = PREP_IE_ORIG_ADDR(prep_elem); + if (ether_addr_equal(orig_addr, sdata->vif.addr)) + /* destination, no forwarding required */ + return; + + if (!ifmsh->mshcfg.dot11MeshForwarding) + return; + + ttl = PREP_IE_TTL(prep_elem); + if (ttl <= 1) { + sdata->u.mesh.mshstats.dropped_frames_ttl++; + return; + } + + rcu_read_lock(); + mpath = mesh_path_lookup(sdata, orig_addr); + if (mpath) + spin_lock_bh(&mpath->state_lock); + else + goto fail; + if (!(mpath->flags & MESH_PATH_ACTIVE)) { + spin_unlock_bh(&mpath->state_lock); + goto fail; + } + memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN); + spin_unlock_bh(&mpath->state_lock); + --ttl; + flags = PREP_IE_FLAGS(prep_elem); + lifetime = PREP_IE_LIFETIME(prep_elem); + hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1; + target_addr = PREP_IE_TARGET_ADDR(prep_elem); + target_sn = PREP_IE_TARGET_SN(prep_elem); + orig_sn = PREP_IE_ORIG_SN(prep_elem); + + mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, orig_sn, 0, + target_addr, target_sn, next_hop, hopcount, + ttl, lifetime, metric, 0, sdata); + rcu_read_unlock(); + + sdata->u.mesh.mshstats.fwded_unicast++; + sdata->u.mesh.mshstats.fwded_frames++; + return; + +fail: + rcu_read_unlock(); + sdata->u.mesh.mshstats.dropped_frames_no_route++; +} + +static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + const u8 *perr_elem) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct mesh_path *mpath; + u8 ttl; + const u8 *ta, *target_addr; + u32 target_sn; + u16 target_rcode; + + ta = mgmt->sa; + ttl = PERR_IE_TTL(perr_elem); + if (ttl <= 1) { + ifmsh->mshstats.dropped_frames_ttl++; + return; + } + ttl--; + target_addr = PERR_IE_TARGET_ADDR(perr_elem); + target_sn = PERR_IE_TARGET_SN(perr_elem); + target_rcode = PERR_IE_TARGET_RCODE(perr_elem); + + rcu_read_lock(); + mpath = mesh_path_lookup(sdata, target_addr); + if (mpath) { + struct sta_info *sta; + + spin_lock_bh(&mpath->state_lock); + sta = next_hop_deref_protected(mpath); + if (mpath->flags & MESH_PATH_ACTIVE && + ether_addr_equal(ta, sta->sta.addr) && + !(mpath->flags & MESH_PATH_FIXED) && + (!(mpath->flags & MESH_PATH_SN_VALID) || + SN_GT(target_sn, mpath->sn) || target_sn == 0)) { + mpath->flags &= ~MESH_PATH_ACTIVE; + if (target_sn != 0) + mpath->sn = target_sn; + else + mpath->sn += 1; + spin_unlock_bh(&mpath->state_lock); + if (!ifmsh->mshcfg.dot11MeshForwarding) + goto endperr; + mesh_path_error_tx(sdata, ttl, target_addr, + target_sn, target_rcode, + broadcast_addr); + } else + spin_unlock_bh(&mpath->state_lock); + } +endperr: + rcu_read_unlock(); +} + +static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + const struct ieee80211_rann_ie *rann) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + struct mesh_path *mpath; + u8 ttl, flags, hopcount; + const u8 *orig_addr; + u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval; + bool root_is_gate; + + ttl = rann->rann_ttl; + flags = rann->rann_flags; + root_is_gate = !!(flags & RANN_FLAG_IS_GATE); + orig_addr = rann->rann_addr; + orig_sn = le32_to_cpu(rann->rann_seq); + interval = le32_to_cpu(rann->rann_interval); + hopcount = rann->rann_hopcount; + hopcount++; + orig_metric = le32_to_cpu(rann->rann_metric); + + /* Ignore our own RANNs */ + if (ether_addr_equal(orig_addr, sdata->vif.addr)) + return; + + mhwmp_dbg(sdata, + "received RANN from %pM via neighbour %pM (is_gate=%d)\n", + orig_addr, mgmt->sa, root_is_gate); + + rcu_read_lock(); + sta = sta_info_get(sdata, mgmt->sa); + if (!sta) { + rcu_read_unlock(); + return; + } + + last_hop_metric = airtime_link_metric_get(local, sta); + new_metric = orig_metric + last_hop_metric; + if (new_metric < orig_metric) + new_metric = MAX_METRIC; + + mpath = mesh_path_lookup(sdata, orig_addr); + if (!mpath) { + mpath = mesh_path_add(sdata, orig_addr); + if (IS_ERR(mpath)) { + rcu_read_unlock(); + sdata->u.mesh.mshstats.dropped_frames_no_route++; + return; + } + } + + if (!(SN_LT(mpath->sn, orig_sn)) && + !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) { + rcu_read_unlock(); + return; + } + + if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) || + (time_after(jiffies, mpath->last_preq_to_root + + root_path_confirmation_jiffies(sdata)) || + time_before(jiffies, mpath->last_preq_to_root))) && + !(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) { + mhwmp_dbg(sdata, + "time to refresh root mpath %pM\n", + orig_addr); + mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); + mpath->last_preq_to_root = jiffies; + } + + mpath->sn = orig_sn; + mpath->rann_metric = new_metric; + mpath->is_root = true; + /* Recording RANNs sender address to send individually + * addressed PREQs destined for root mesh STA */ + memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN); + + if (root_is_gate) + mesh_path_add_gate(mpath); + + if (ttl <= 1) { + ifmsh->mshstats.dropped_frames_ttl++; + rcu_read_unlock(); + return; + } + ttl--; + + if (ifmsh->mshcfg.dot11MeshForwarding) { + mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, + orig_sn, 0, NULL, 0, broadcast_addr, + hopcount, ttl, interval, + new_metric, 0, sdata); + } + + rcu_read_unlock(); +} + + +void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) +{ + struct ieee802_11_elems *elems; + size_t baselen; + u32 path_metric; + struct sta_info *sta; + + /* need action_code */ + if (len < IEEE80211_MIN_ACTION_SIZE + 1) + return; + + rcu_read_lock(); + sta = sta_info_get(sdata, mgmt->sa); + if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; + elems = ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, + len - baselen, false, NULL); + if (!elems) + return; + + if (elems->preq) { + if (elems->preq_len != 37) + /* Right now we support just 1 destination and no AE */ + goto free; + path_metric = hwmp_route_info_get(sdata, mgmt, elems->preq, + MPATH_PREQ); + if (path_metric) + hwmp_preq_frame_process(sdata, mgmt, elems->preq, + path_metric); + } + if (elems->prep) { + if (elems->prep_len != 31) + /* Right now we support no AE */ + goto free; + path_metric = hwmp_route_info_get(sdata, mgmt, elems->prep, + MPATH_PREP); + if (path_metric) + hwmp_prep_frame_process(sdata, mgmt, elems->prep, + path_metric); + } + if (elems->perr) { + if (elems->perr_len != 15) + /* Right now we support only one destination per PERR */ + goto free; + hwmp_perr_frame_process(sdata, mgmt, elems->perr); + } + if (elems->rann) + hwmp_rann_frame_process(sdata, mgmt, elems->rann); +free: + kfree(elems); +} + +/** + * mesh_queue_preq - queue a PREQ to a given destination + * + * @mpath: mesh path to discover + * @flags: special attributes of the PREQ to be sent + * + * Locking: the function must be called from within a rcu read lock block. + * + */ +static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) +{ + struct ieee80211_sub_if_data *sdata = mpath->sdata; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct mesh_preq_queue *preq_node; + + preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC); + if (!preq_node) { + mhwmp_dbg(sdata, "could not allocate PREQ node\n"); + return; + } + + spin_lock_bh(&ifmsh->mesh_preq_queue_lock); + if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) { + spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); + kfree(preq_node); + if (printk_ratelimit()) + mhwmp_dbg(sdata, "PREQ node queue full\n"); + return; + } + + spin_lock(&mpath->state_lock); + if (mpath->flags & MESH_PATH_REQ_QUEUED) { + spin_unlock(&mpath->state_lock); + spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); + kfree(preq_node); + return; + } + + memcpy(preq_node->dst, mpath->dst, ETH_ALEN); + preq_node->flags = flags; + + mpath->flags |= MESH_PATH_REQ_QUEUED; + spin_unlock(&mpath->state_lock); + + list_add_tail(&preq_node->list, &ifmsh->preq_queue.list); + ++ifmsh->preq_queue_len; + spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); + + if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); + + else if (time_before(jiffies, ifmsh->last_preq)) { + /* avoid long wait if did not send preqs for a long time + * and jiffies wrapped around + */ + ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); + } else + mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq + + min_preq_int_jiff(sdata)); +} + +/** + * mesh_path_start_discovery - launch a path discovery from the PREQ queue + * + * @sdata: local mesh subif + */ +void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct mesh_preq_queue *preq_node; + struct mesh_path *mpath; + u8 ttl, target_flags = 0; + const u8 *da; + u32 lifetime; + + spin_lock_bh(&ifmsh->mesh_preq_queue_lock); + if (!ifmsh->preq_queue_len || + time_before(jiffies, ifmsh->last_preq + + min_preq_int_jiff(sdata))) { + spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); + return; + } + + preq_node = list_first_entry(&ifmsh->preq_queue.list, + struct mesh_preq_queue, list); + list_del(&preq_node->list); + --ifmsh->preq_queue_len; + spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); + + rcu_read_lock(); + mpath = mesh_path_lookup(sdata, preq_node->dst); + if (!mpath) + goto enddiscovery; + + spin_lock_bh(&mpath->state_lock); + if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) { + spin_unlock_bh(&mpath->state_lock); + goto enddiscovery; + } + mpath->flags &= ~MESH_PATH_REQ_QUEUED; + if (preq_node->flags & PREQ_Q_F_START) { + if (mpath->flags & MESH_PATH_RESOLVING) { + spin_unlock_bh(&mpath->state_lock); + goto enddiscovery; + } else { + mpath->flags &= ~MESH_PATH_RESOLVED; + mpath->flags |= MESH_PATH_RESOLVING; + mpath->discovery_retries = 0; + mpath->discovery_timeout = disc_timeout_jiff(sdata); + } + } else if (!(mpath->flags & MESH_PATH_RESOLVING) || + mpath->flags & MESH_PATH_RESOLVED) { + mpath->flags &= ~MESH_PATH_RESOLVING; + spin_unlock_bh(&mpath->state_lock); + goto enddiscovery; + } + + ifmsh->last_preq = jiffies; + + if (time_after(jiffies, ifmsh->last_sn_update + + net_traversal_jiffies(sdata)) || + time_before(jiffies, ifmsh->last_sn_update)) { + ++ifmsh->sn; + sdata->u.mesh.last_sn_update = jiffies; + } + lifetime = default_lifetime(sdata); + ttl = sdata->u.mesh.mshcfg.element_ttl; + if (ttl == 0) { + sdata->u.mesh.mshstats.dropped_frames_ttl++; + spin_unlock_bh(&mpath->state_lock); + goto enddiscovery; + } + + if (preq_node->flags & PREQ_Q_F_REFRESH) + target_flags |= IEEE80211_PREQ_TO_FLAG; + else + target_flags &= ~IEEE80211_PREQ_TO_FLAG; + + spin_unlock_bh(&mpath->state_lock); + da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr; + mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn, + target_flags, mpath->dst, mpath->sn, da, 0, + ttl, lifetime, 0, ifmsh->preq_id++, sdata); + + spin_lock_bh(&mpath->state_lock); + if (!(mpath->flags & MESH_PATH_DELETED)) + mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); + spin_unlock_bh(&mpath->state_lock); + +enddiscovery: + rcu_read_unlock(); + kfree(preq_node); +} + +/** + * mesh_nexthop_resolve - lookup next hop; conditionally start path discovery + * + * @skb: 802.11 frame to be sent + * @sdata: network subif the frame will be sent through + * + * Lookup next hop for given skb and start path discovery if no + * forwarding information is found. + * + * Returns: 0 if the next hop was found and -ENOENT if the frame was queued. + * skb is freed here if no mpath could be allocated. + */ +int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct mesh_path *mpath; + struct sk_buff *skb_to_free = NULL; + u8 *target_addr = hdr->addr3; + + /* Nulls are only sent to peers for PS and should be pre-addressed */ + if (ieee80211_is_qos_nullfunc(hdr->frame_control)) + return 0; + + /* Allow injected packets to bypass mesh routing */ + if (info->control.flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP) + return 0; + + if (!mesh_nexthop_lookup(sdata, skb)) + return 0; + + /* no nexthop found, start resolving */ + mpath = mesh_path_lookup(sdata, target_addr); + if (!mpath) { + mpath = mesh_path_add(sdata, target_addr); + if (IS_ERR(mpath)) { + mesh_path_discard_frame(sdata, skb); + return PTR_ERR(mpath); + } + } + + if (!(mpath->flags & MESH_PATH_RESOLVING) && + mesh_path_sel_is_hwmp(sdata)) + mesh_queue_preq(mpath, PREQ_Q_F_START); + + if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN) + skb_to_free = skb_dequeue(&mpath->frame_queue); + + info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; + ieee80211_set_qos_hdr(sdata, skb); + skb_queue_tail(&mpath->frame_queue, skb); + if (skb_to_free) + mesh_path_discard_frame(sdata, skb_to_free); + + return -ENOENT; +} + +/** + * mesh_nexthop_lookup_nolearn - try to set next hop without path discovery + * @skb: 802.11 frame to be sent + * @sdata: network subif the frame will be sent through + * + * Check if the meshDA (addr3) of a unicast frame is a direct neighbor. + * And if so, set the RA (addr1) to it to transmit to this node directly, + * avoiding PREQ/PREP path discovery. + * + * Returns: 0 if the next hop was found and -ENOENT otherwise. + */ +static int mesh_nexthop_lookup_nolearn(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct sta_info *sta; + + if (is_multicast_ether_addr(hdr->addr1)) + return -ENOENT; + + rcu_read_lock(); + sta = sta_info_get(sdata, hdr->addr3); + + if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) { + rcu_read_unlock(); + return -ENOENT; + } + rcu_read_unlock(); + + memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); + memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); + return 0; +} + +void mesh_path_refresh(struct ieee80211_sub_if_data *sdata, + struct mesh_path *mpath, const u8 *addr) +{ + if (mpath->flags & (MESH_PATH_REQ_QUEUED | MESH_PATH_FIXED | + MESH_PATH_RESOLVING)) + return; + + if (time_after(jiffies, + mpath->exp_time - + msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && + (!addr || ether_addr_equal(sdata->vif.addr, addr))) + mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); +} + +/** + * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling + * this function is considered "using" the associated mpath, so preempt a path + * refresh if this mpath expires soon. + * + * @skb: 802.11 frame to be sent + * @sdata: network subif the frame will be sent through + * + * Returns: 0 if the next hop was found. Nonzero otherwise. + */ +int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct mesh_path *mpath; + struct sta_info *next_hop; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + u8 *target_addr = hdr->addr3; + + if (ifmsh->mshcfg.dot11MeshNolearn && + !mesh_nexthop_lookup_nolearn(sdata, skb)) + return 0; + + mpath = mesh_path_lookup(sdata, target_addr); + if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE)) + return -ENOENT; + + mesh_path_refresh(sdata, mpath, hdr->addr4); + + next_hop = rcu_dereference(mpath->next_hop); + if (next_hop) { + memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN); + memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); + ieee80211_mps_set_frame_flags(sdata, next_hop, hdr); + if (ieee80211_hw_check(&sdata->local->hw, SUPPORT_FAST_XMIT)) + mesh_fast_tx_cache(sdata, skb, mpath); + return 0; + } + + return -ENOENT; +} + +void mesh_path_timer(struct timer_list *t) +{ + struct mesh_path *mpath = from_timer(mpath, t, timer); + struct ieee80211_sub_if_data *sdata = mpath->sdata; + int ret; + + if (sdata->local->quiescing) + return; + + spin_lock_bh(&mpath->state_lock); + if (mpath->flags & MESH_PATH_RESOLVED || + (!(mpath->flags & MESH_PATH_RESOLVING))) { + mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); + spin_unlock_bh(&mpath->state_lock); + } else if (mpath->discovery_retries < max_preq_retries(sdata)) { + ++mpath->discovery_retries; + mpath->discovery_timeout *= 2; + mpath->flags &= ~MESH_PATH_REQ_QUEUED; + spin_unlock_bh(&mpath->state_lock); + mesh_queue_preq(mpath, 0); + } else { + mpath->flags &= ~(MESH_PATH_RESOLVING | + MESH_PATH_RESOLVED | + MESH_PATH_REQ_QUEUED); + mpath->exp_time = jiffies; + spin_unlock_bh(&mpath->state_lock); + if (!mpath->is_gate && mesh_gate_num(sdata) > 0) { + ret = mesh_path_send_to_gates(mpath); + if (ret) + mhwmp_dbg(sdata, "no gate was reachable\n"); + } else + mesh_path_flush_pending(mpath); + } +} + +void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval; + u8 flags, target_flags = 0; + + flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol) + ? RANN_FLAG_IS_GATE : 0; + + switch (ifmsh->mshcfg.dot11MeshHWMPRootMode) { + case IEEE80211_PROACTIVE_RANN: + mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr, + ++ifmsh->sn, 0, NULL, 0, broadcast_addr, + 0, ifmsh->mshcfg.element_ttl, + interval, 0, 0, sdata); + break; + case IEEE80211_PROACTIVE_PREQ_WITH_PREP: + flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG; + fallthrough; + case IEEE80211_PROACTIVE_PREQ_NO_PREP: + interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout; + target_flags |= IEEE80211_PREQ_TO_FLAG | + IEEE80211_PREQ_USN_FLAG; + mesh_path_sel_frame_tx(MPATH_PREQ, flags, sdata->vif.addr, + ++ifmsh->sn, target_flags, + (u8 *) broadcast_addr, 0, broadcast_addr, + 0, ifmsh->mshcfg.element_ttl, interval, + 0, ifmsh->preq_id++, sdata); + break; + default: + mhwmp_dbg(sdata, "Proactive mechanism not supported\n"); + return; + } +} diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c new file mode 100644 index 0000000000..3e52aaa57b --- /dev/null +++ b/net/mac80211/mesh_pathtbl.c @@ -0,0 +1,1072 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2008, 2009 open80211s Ltd. + * Author: Luis Carlos Cobo <luisca@cozybit.com> + */ + +#include <linux/etherdevice.h> +#include <linux/list.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <net/mac80211.h> +#include "wme.h" +#include "ieee80211_i.h" +#include "mesh.h" +#include <linux/rhashtable.h> + +static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath); + +static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) +{ + /* Use last four bytes of hw addr as hash index */ + return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed); +} + +static const struct rhashtable_params mesh_rht_params = { + .nelem_hint = 2, + .automatic_shrinking = true, + .key_len = ETH_ALEN, + .key_offset = offsetof(struct mesh_path, dst), + .head_offset = offsetof(struct mesh_path, rhash), + .hashfn = mesh_table_hash, +}; + +static const struct rhashtable_params fast_tx_rht_params = { + .nelem_hint = 10, + .automatic_shrinking = true, + .key_len = ETH_ALEN, + .key_offset = offsetof(struct ieee80211_mesh_fast_tx, addr_key), + .head_offset = offsetof(struct ieee80211_mesh_fast_tx, rhash), + .hashfn = mesh_table_hash, +}; + +static void __mesh_fast_tx_entry_free(void *ptr, void *tblptr) +{ + struct ieee80211_mesh_fast_tx *entry = ptr; + + kfree_rcu(entry, fast_tx.rcu_head); +} + +static void mesh_fast_tx_deinit(struct ieee80211_sub_if_data *sdata) +{ + struct mesh_tx_cache *cache; + + cache = &sdata->u.mesh.tx_cache; + rhashtable_free_and_destroy(&cache->rht, + __mesh_fast_tx_entry_free, NULL); +} + +static void mesh_fast_tx_init(struct ieee80211_sub_if_data *sdata) +{ + struct mesh_tx_cache *cache; + + cache = &sdata->u.mesh.tx_cache; + rhashtable_init(&cache->rht, &fast_tx_rht_params); + INIT_HLIST_HEAD(&cache->walk_head); + spin_lock_init(&cache->walk_lock); +} + +static inline bool mpath_expired(struct mesh_path *mpath) +{ + return (mpath->flags & MESH_PATH_ACTIVE) && + time_after(jiffies, mpath->exp_time) && + !(mpath->flags & MESH_PATH_FIXED); +} + +static void mesh_path_rht_free(void *ptr, void *tblptr) +{ + struct mesh_path *mpath = ptr; + struct mesh_table *tbl = tblptr; + + mesh_path_free_rcu(tbl, mpath); +} + +static void mesh_table_init(struct mesh_table *tbl) +{ + INIT_HLIST_HEAD(&tbl->known_gates); + INIT_HLIST_HEAD(&tbl->walk_head); + atomic_set(&tbl->entries, 0); + spin_lock_init(&tbl->gates_lock); + spin_lock_init(&tbl->walk_lock); + + /* rhashtable_init() may fail only in case of wrong + * mesh_rht_params + */ + WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params)); +} + +static void mesh_table_free(struct mesh_table *tbl) +{ + rhashtable_free_and_destroy(&tbl->rhead, + mesh_path_rht_free, tbl); +} + +/** + * mesh_path_assign_nexthop - update mesh path next hop + * + * @mpath: mesh path to update + * @sta: next hop to assign + * + * Locking: mpath->state_lock must be held when calling this function + */ +void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) +{ + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + unsigned long flags; + + rcu_assign_pointer(mpath->next_hop, sta); + + spin_lock_irqsave(&mpath->frame_queue.lock, flags); + skb_queue_walk(&mpath->frame_queue, skb) { + hdr = (struct ieee80211_hdr *) skb->data; + memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); + memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); + ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr); + } + + spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); +} + +static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, + struct mesh_path *gate_mpath) +{ + struct ieee80211_hdr *hdr; + struct ieee80211s_hdr *mshdr; + int mesh_hdrlen, hdrlen; + char *next_hop; + + hdr = (struct ieee80211_hdr *) skb->data; + hdrlen = ieee80211_hdrlen(hdr->frame_control); + mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); + + if (!(mshdr->flags & MESH_FLAGS_AE)) { + /* size of the fixed part of the mesh header */ + mesh_hdrlen = 6; + + /* make room for the two extended addresses */ + skb_push(skb, 2 * ETH_ALEN); + memmove(skb->data, hdr, hdrlen + mesh_hdrlen); + + hdr = (struct ieee80211_hdr *) skb->data; + + /* we preserve the previous mesh header and only add + * the new addresses */ + mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); + mshdr->flags = MESH_FLAGS_AE_A5_A6; + memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); + memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); + } + + /* update next hop */ + hdr = (struct ieee80211_hdr *) skb->data; + rcu_read_lock(); + next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; + memcpy(hdr->addr1, next_hop, ETH_ALEN); + rcu_read_unlock(); + memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN); + memcpy(hdr->addr3, dst_addr, ETH_ALEN); +} + +/** + * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another + * + * This function is used to transfer or copy frames from an unresolved mpath to + * a gate mpath. The function also adds the Address Extension field and + * updates the next hop. + * + * If a frame already has an Address Extension field, only the next hop and + * destination addresses are updated. + * + * The gate mpath must be an active mpath with a valid mpath->next_hop. + * + * @gate_mpath: An active mpath the frames will be sent to (i.e. the gate) + * @from_mpath: The failed mpath + * @copy: When true, copy all the frames to the new mpath queue. When false, + * move them. + */ +static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, + struct mesh_path *from_mpath, + bool copy) +{ + struct sk_buff *skb, *fskb, *tmp; + struct sk_buff_head failq; + unsigned long flags; + + if (WARN_ON(gate_mpath == from_mpath)) + return; + if (WARN_ON(!gate_mpath->next_hop)) + return; + + __skb_queue_head_init(&failq); + + spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); + skb_queue_splice_init(&from_mpath->frame_queue, &failq); + spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); + + skb_queue_walk_safe(&failq, fskb, tmp) { + if (skb_queue_len(&gate_mpath->frame_queue) >= + MESH_FRAME_QUEUE_LEN) { + mpath_dbg(gate_mpath->sdata, "mpath queue full!\n"); + break; + } + + skb = skb_copy(fskb, GFP_ATOMIC); + if (WARN_ON(!skb)) + break; + + prepare_for_gate(skb, gate_mpath->dst, gate_mpath); + skb_queue_tail(&gate_mpath->frame_queue, skb); + + if (copy) + continue; + + __skb_unlink(fskb, &failq); + kfree_skb(fskb); + } + + mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n", + gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue)); + + if (!copy) + return; + + spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); + skb_queue_splice(&failq, &from_mpath->frame_queue); + spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); +} + + +static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, + struct ieee80211_sub_if_data *sdata) +{ + struct mesh_path *mpath; + + mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params); + + if (mpath && mpath_expired(mpath)) { + spin_lock_bh(&mpath->state_lock); + mpath->flags &= ~MESH_PATH_ACTIVE; + spin_unlock_bh(&mpath->state_lock); + } + return mpath; +} + +/** + * mesh_path_lookup - look up a path in the mesh path table + * @sdata: local subif + * @dst: hardware address (ETH_ALEN length) of destination + * + * Returns: pointer to the mesh path structure, or NULL if not found + * + * Locking: must be called within a read rcu section. + */ +struct mesh_path * +mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) +{ + return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata); +} + +struct mesh_path * +mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) +{ + return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata); +} + +static struct mesh_path * +__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) +{ + int i = 0; + struct mesh_path *mpath; + + hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { + if (i++ == idx) + break; + } + + if (!mpath) + return NULL; + + if (mpath_expired(mpath)) { + spin_lock_bh(&mpath->state_lock); + mpath->flags &= ~MESH_PATH_ACTIVE; + spin_unlock_bh(&mpath->state_lock); + } + return mpath; +} + +/** + * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index + * @idx: index + * @sdata: local subif, or NULL for all entries + * + * Returns: pointer to the mesh path structure, or NULL if not found. + * + * Locking: must be called within a read rcu section. + */ +struct mesh_path * +mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) +{ + return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx); +} + +/** + * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index + * @idx: index + * @sdata: local subif, or NULL for all entries + * + * Returns: pointer to the proxy path structure, or NULL if not found. + * + * Locking: must be called within a read rcu section. + */ +struct mesh_path * +mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) +{ + return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx); +} + +/** + * mesh_path_add_gate - add the given mpath to a mesh gate to our path table + * @mpath: gate path to add to table + */ +int mesh_path_add_gate(struct mesh_path *mpath) +{ + struct mesh_table *tbl; + int err; + + rcu_read_lock(); + tbl = &mpath->sdata->u.mesh.mesh_paths; + + spin_lock_bh(&mpath->state_lock); + if (mpath->is_gate) { + err = -EEXIST; + spin_unlock_bh(&mpath->state_lock); + goto err_rcu; + } + mpath->is_gate = true; + mpath->sdata->u.mesh.num_gates++; + + spin_lock(&tbl->gates_lock); + hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates); + spin_unlock(&tbl->gates_lock); + + spin_unlock_bh(&mpath->state_lock); + + mpath_dbg(mpath->sdata, + "Mesh path: Recorded new gate: %pM. %d known gates\n", + mpath->dst, mpath->sdata->u.mesh.num_gates); + err = 0; +err_rcu: + rcu_read_unlock(); + return err; +} + +/** + * mesh_gate_del - remove a mesh gate from the list of known gates + * @tbl: table which holds our list of known gates + * @mpath: gate mpath + */ +static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) +{ + lockdep_assert_held(&mpath->state_lock); + if (!mpath->is_gate) + return; + + mpath->is_gate = false; + spin_lock_bh(&tbl->gates_lock); + hlist_del_rcu(&mpath->gate_list); + mpath->sdata->u.mesh.num_gates--; + spin_unlock_bh(&tbl->gates_lock); + + mpath_dbg(mpath->sdata, + "Mesh path: Deleted gate: %pM. %d known gates\n", + mpath->dst, mpath->sdata->u.mesh.num_gates); +} + +/** + * mesh_gate_num - number of gates known to this interface + * @sdata: subif data + */ +int mesh_gate_num(struct ieee80211_sub_if_data *sdata) +{ + return sdata->u.mesh.num_gates; +} + +static +struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata, + const u8 *dst, gfp_t gfp_flags) +{ + struct mesh_path *new_mpath; + + new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags); + if (!new_mpath) + return NULL; + + memcpy(new_mpath->dst, dst, ETH_ALEN); + eth_broadcast_addr(new_mpath->rann_snd_addr); + new_mpath->is_root = false; + new_mpath->sdata = sdata; + new_mpath->flags = 0; + skb_queue_head_init(&new_mpath->frame_queue); + new_mpath->exp_time = jiffies; + spin_lock_init(&new_mpath->state_lock); + timer_setup(&new_mpath->timer, mesh_path_timer, 0); + + return new_mpath; +} + +static void mesh_fast_tx_entry_free(struct mesh_tx_cache *cache, + struct ieee80211_mesh_fast_tx *entry) +{ + hlist_del_rcu(&entry->walk_list); + rhashtable_remove_fast(&cache->rht, &entry->rhash, fast_tx_rht_params); + kfree_rcu(entry, fast_tx.rcu_head); +} + +struct ieee80211_mesh_fast_tx * +mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, const u8 *addr) +{ + struct ieee80211_mesh_fast_tx *entry; + struct mesh_tx_cache *cache; + + cache = &sdata->u.mesh.tx_cache; + entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params); + if (!entry) + return NULL; + + if (!(entry->mpath->flags & MESH_PATH_ACTIVE) || + mpath_expired(entry->mpath)) { + spin_lock_bh(&cache->walk_lock); + entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params); + if (entry) + mesh_fast_tx_entry_free(cache, entry); + spin_unlock_bh(&cache->walk_lock); + return NULL; + } + + mesh_path_refresh(sdata, entry->mpath, NULL); + if (entry->mppath) + entry->mppath->exp_time = jiffies; + entry->timestamp = jiffies; + + return entry; +} + +void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, struct mesh_path *mpath) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_mesh_fast_tx *entry, *prev; + struct ieee80211_mesh_fast_tx build = {}; + struct ieee80211s_hdr *meshhdr; + struct mesh_tx_cache *cache; + struct ieee80211_key *key; + struct mesh_path *mppath; + struct sta_info *sta; + u8 *qc; + + if (sdata->noack_map || + !ieee80211_is_data_qos(hdr->frame_control)) + return; + + build.fast_tx.hdr_len = ieee80211_hdrlen(hdr->frame_control); + meshhdr = (struct ieee80211s_hdr *)(skb->data + build.fast_tx.hdr_len); + build.hdrlen = ieee80211_get_mesh_hdrlen(meshhdr); + + cache = &sdata->u.mesh.tx_cache; + if (atomic_read(&cache->rht.nelems) >= MESH_FAST_TX_CACHE_MAX_SIZE) + return; + + sta = rcu_dereference(mpath->next_hop); + if (!sta) + return; + + if ((meshhdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) { + /* This is required to keep the mppath alive */ + mppath = mpp_path_lookup(sdata, meshhdr->eaddr1); + if (!mppath) + return; + build.mppath = mppath; + } else if (ieee80211_has_a4(hdr->frame_control)) { + mppath = mpath; + } else { + return; + } + + /* rate limit, in case fast xmit can't be enabled */ + if (mppath->fast_tx_check == jiffies) + return; + + mppath->fast_tx_check = jiffies; + + /* + * Same use of the sta lock as in ieee80211_check_fast_xmit, in order + * to protect against concurrent sta key updates. + */ + spin_lock_bh(&sta->lock); + key = rcu_access_pointer(sta->ptk[sta->ptk_idx]); + if (!key) + key = rcu_access_pointer(sdata->default_unicast_key); + build.fast_tx.key = key; + + if (key) { + bool gen_iv, iv_spc; + + gen_iv = key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV; + iv_spc = key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE; + + if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) || + (key->flags & KEY_FLAG_TAINTED)) + goto unlock_sta; + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + if (gen_iv) + build.fast_tx.pn_offs = build.fast_tx.hdr_len; + if (gen_iv || iv_spc) + build.fast_tx.hdr_len += IEEE80211_CCMP_HDR_LEN; + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + if (gen_iv) + build.fast_tx.pn_offs = build.fast_tx.hdr_len; + if (gen_iv || iv_spc) + build.fast_tx.hdr_len += IEEE80211_GCMP_HDR_LEN; + break; + default: + goto unlock_sta; + } + } + + memcpy(build.addr_key, mppath->dst, ETH_ALEN); + build.timestamp = jiffies; + build.fast_tx.band = info->band; + build.fast_tx.da_offs = offsetof(struct ieee80211_hdr, addr3); + build.fast_tx.sa_offs = offsetof(struct ieee80211_hdr, addr4); + build.mpath = mpath; + memcpy(build.hdr, meshhdr, build.hdrlen); + memcpy(build.hdr + build.hdrlen, rfc1042_header, sizeof(rfc1042_header)); + build.hdrlen += sizeof(rfc1042_header); + memcpy(build.fast_tx.hdr, hdr, build.fast_tx.hdr_len); + + hdr = (struct ieee80211_hdr *)build.fast_tx.hdr; + if (build.fast_tx.key) + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); + + qc = ieee80211_get_qos_ctl(hdr); + qc[1] |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8; + + entry = kmemdup(&build, sizeof(build), GFP_ATOMIC); + if (!entry) + goto unlock_sta; + + spin_lock(&cache->walk_lock); + prev = rhashtable_lookup_get_insert_fast(&cache->rht, + &entry->rhash, + fast_tx_rht_params); + if (unlikely(IS_ERR(prev))) { + kfree(entry); + goto unlock_cache; + } + + /* + * replace any previous entry in the hash table, in case we're + * replacing it with a different type (e.g. mpath -> mpp) + */ + if (unlikely(prev)) { + rhashtable_replace_fast(&cache->rht, &prev->rhash, + &entry->rhash, fast_tx_rht_params); + hlist_del_rcu(&prev->walk_list); + kfree_rcu(prev, fast_tx.rcu_head); + } + + hlist_add_head(&entry->walk_list, &cache->walk_head); + +unlock_cache: + spin_unlock(&cache->walk_lock); +unlock_sta: + spin_unlock_bh(&sta->lock); +} + +void mesh_fast_tx_gc(struct ieee80211_sub_if_data *sdata) +{ + unsigned long timeout = msecs_to_jiffies(MESH_FAST_TX_CACHE_TIMEOUT); + struct mesh_tx_cache *cache; + struct ieee80211_mesh_fast_tx *entry; + struct hlist_node *n; + + cache = &sdata->u.mesh.tx_cache; + if (atomic_read(&cache->rht.nelems) < MESH_FAST_TX_CACHE_THRESHOLD_SIZE) + return; + + spin_lock_bh(&cache->walk_lock); + hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list) + if (!time_is_after_jiffies(entry->timestamp + timeout)) + mesh_fast_tx_entry_free(cache, entry); + spin_unlock_bh(&cache->walk_lock); +} + +void mesh_fast_tx_flush_mpath(struct mesh_path *mpath) +{ + struct ieee80211_sub_if_data *sdata = mpath->sdata; + struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache; + struct ieee80211_mesh_fast_tx *entry; + struct hlist_node *n; + + cache = &sdata->u.mesh.tx_cache; + spin_lock_bh(&cache->walk_lock); + hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list) + if (entry->mpath == mpath) + mesh_fast_tx_entry_free(cache, entry); + spin_unlock_bh(&cache->walk_lock); +} + +void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta) +{ + struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache; + struct ieee80211_mesh_fast_tx *entry; + struct hlist_node *n; + + cache = &sdata->u.mesh.tx_cache; + spin_lock_bh(&cache->walk_lock); + hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list) + if (rcu_access_pointer(entry->mpath->next_hop) == sta) + mesh_fast_tx_entry_free(cache, entry); + spin_unlock_bh(&cache->walk_lock); +} + +void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata, + const u8 *addr) +{ + struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache; + struct ieee80211_mesh_fast_tx *entry; + + cache = &sdata->u.mesh.tx_cache; + spin_lock_bh(&cache->walk_lock); + entry = rhashtable_lookup_fast(&cache->rht, addr, fast_tx_rht_params); + if (entry) + mesh_fast_tx_entry_free(cache, entry); + spin_unlock_bh(&cache->walk_lock); +} + +/** + * mesh_path_add - allocate and add a new path to the mesh path table + * @dst: destination address of the path (ETH_ALEN length) + * @sdata: local subif + * + * Returns: 0 on success + * + * State: the initial state of the new path is set to 0 + */ +struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, + const u8 *dst) +{ + struct mesh_table *tbl; + struct mesh_path *mpath, *new_mpath; + + if (ether_addr_equal(dst, sdata->vif.addr)) + /* never add ourselves as neighbours */ + return ERR_PTR(-ENOTSUPP); + + if (is_multicast_ether_addr(dst)) + return ERR_PTR(-ENOTSUPP); + + if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) + return ERR_PTR(-ENOSPC); + + new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); + if (!new_mpath) + return ERR_PTR(-ENOMEM); + + tbl = &sdata->u.mesh.mesh_paths; + spin_lock_bh(&tbl->walk_lock); + mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead, + &new_mpath->rhash, + mesh_rht_params); + if (!mpath) + hlist_add_head(&new_mpath->walk_list, &tbl->walk_head); + spin_unlock_bh(&tbl->walk_lock); + + if (mpath) { + kfree(new_mpath); + + if (IS_ERR(mpath)) + return mpath; + + new_mpath = mpath; + } + + sdata->u.mesh.mesh_paths_generation++; + return new_mpath; +} + +int mpp_path_add(struct ieee80211_sub_if_data *sdata, + const u8 *dst, const u8 *mpp) +{ + struct mesh_table *tbl; + struct mesh_path *new_mpath; + int ret; + + if (ether_addr_equal(dst, sdata->vif.addr)) + /* never add ourselves as neighbours */ + return -ENOTSUPP; + + if (is_multicast_ether_addr(dst)) + return -ENOTSUPP; + + new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); + + if (!new_mpath) + return -ENOMEM; + + memcpy(new_mpath->mpp, mpp, ETH_ALEN); + tbl = &sdata->u.mesh.mpp_paths; + + spin_lock_bh(&tbl->walk_lock); + ret = rhashtable_lookup_insert_fast(&tbl->rhead, + &new_mpath->rhash, + mesh_rht_params); + if (!ret) + hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head); + spin_unlock_bh(&tbl->walk_lock); + + if (ret) + kfree(new_mpath); + else + mesh_fast_tx_flush_addr(sdata, dst); + + sdata->u.mesh.mpp_paths_generation++; + return ret; +} + + +/** + * mesh_plink_broken - deactivates paths and sends perr when a link breaks + * + * @sta: broken peer link + * + * This function must be called from the rate control algorithm if enough + * delivery errors suggest that a peer link is no longer usable. + */ +void mesh_plink_broken(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; + static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + struct mesh_path *mpath; + + rcu_read_lock(); + hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { + if (rcu_access_pointer(mpath->next_hop) == sta && + mpath->flags & MESH_PATH_ACTIVE && + !(mpath->flags & MESH_PATH_FIXED)) { + spin_lock_bh(&mpath->state_lock); + mpath->flags &= ~MESH_PATH_ACTIVE; + ++mpath->sn; + spin_unlock_bh(&mpath->state_lock); + mesh_path_error_tx(sdata, + sdata->u.mesh.mshcfg.element_ttl, + mpath->dst, mpath->sn, + WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); + } + } + rcu_read_unlock(); +} + +static void mesh_path_free_rcu(struct mesh_table *tbl, + struct mesh_path *mpath) +{ + struct ieee80211_sub_if_data *sdata = mpath->sdata; + + spin_lock_bh(&mpath->state_lock); + mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED; + mesh_gate_del(tbl, mpath); + spin_unlock_bh(&mpath->state_lock); + timer_shutdown_sync(&mpath->timer); + atomic_dec(&sdata->u.mesh.mpaths); + atomic_dec(&tbl->entries); + mesh_path_flush_pending(mpath); + kfree_rcu(mpath, rcu); +} + +static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) +{ + hlist_del_rcu(&mpath->walk_list); + rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); + if (tbl == &mpath->sdata->u.mesh.mpp_paths) + mesh_fast_tx_flush_addr(mpath->sdata, mpath->dst); + else + mesh_fast_tx_flush_mpath(mpath); + mesh_path_free_rcu(tbl, mpath); +} + +/** + * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches + * + * @sta: mesh peer to match + * + * RCU notes: this function is called when a mesh plink transitions from + * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that + * allows path creation. This will happen before the sta can be freed (because + * sta_info_destroy() calls this) so any reader in a rcu read block will be + * protected against the plink disappearing. + */ +void mesh_path_flush_by_nexthop(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; + struct mesh_path *mpath; + struct hlist_node *n; + + spin_lock_bh(&tbl->walk_lock); + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { + if (rcu_access_pointer(mpath->next_hop) == sta) + __mesh_path_del(tbl, mpath); + } + spin_unlock_bh(&tbl->walk_lock); +} + +static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, + const u8 *proxy) +{ + struct mesh_table *tbl = &sdata->u.mesh.mpp_paths; + struct mesh_path *mpath; + struct hlist_node *n; + + spin_lock_bh(&tbl->walk_lock); + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { + if (ether_addr_equal(mpath->mpp, proxy)) + __mesh_path_del(tbl, mpath); + } + spin_unlock_bh(&tbl->walk_lock); +} + +static void table_flush_by_iface(struct mesh_table *tbl) +{ + struct mesh_path *mpath; + struct hlist_node *n; + + spin_lock_bh(&tbl->walk_lock); + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { + __mesh_path_del(tbl, mpath); + } + spin_unlock_bh(&tbl->walk_lock); +} + +/** + * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface + * + * This function deletes both mesh paths as well as mesh portal paths. + * + * @sdata: interface data to match + * + */ +void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) +{ + table_flush_by_iface(&sdata->u.mesh.mesh_paths); + table_flush_by_iface(&sdata->u.mesh.mpp_paths); +} + +/** + * table_path_del - delete a path from the mesh or mpp table + * + * @tbl: mesh or mpp path table + * @sdata: local subif + * @addr: dst address (ETH_ALEN length) + * + * Returns: 0 if successful + */ +static int table_path_del(struct mesh_table *tbl, + struct ieee80211_sub_if_data *sdata, + const u8 *addr) +{ + struct mesh_path *mpath; + + spin_lock_bh(&tbl->walk_lock); + mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); + if (!mpath) { + spin_unlock_bh(&tbl->walk_lock); + return -ENXIO; + } + + __mesh_path_del(tbl, mpath); + spin_unlock_bh(&tbl->walk_lock); + return 0; +} + + +/** + * mesh_path_del - delete a mesh path from the table + * + * @addr: dst address (ETH_ALEN length) + * @sdata: local subif + * + * Returns: 0 if successful + */ +int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) +{ + int err; + + /* flush relevant mpp entries first */ + mpp_flush_by_proxy(sdata, addr); + + err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr); + sdata->u.mesh.mesh_paths_generation++; + return err; +} + +/** + * mesh_path_tx_pending - sends pending frames in a mesh path queue + * + * @mpath: mesh path to activate + * + * Locking: the state_lock of the mpath structure must NOT be held when calling + * this function. + */ +void mesh_path_tx_pending(struct mesh_path *mpath) +{ + if (mpath->flags & MESH_PATH_ACTIVE) + ieee80211_add_pending_skbs(mpath->sdata->local, + &mpath->frame_queue); +} + +/** + * mesh_path_send_to_gates - sends pending frames to all known mesh gates + * + * @mpath: mesh path whose queue will be emptied + * + * If there is only one gate, the frames are transferred from the failed mpath + * queue to that gate's queue. If there are more than one gates, the frames + * are copied from each gate to the next. After frames are copied, the + * mpath queues are emptied onto the transmission queue. + */ +int mesh_path_send_to_gates(struct mesh_path *mpath) +{ + struct ieee80211_sub_if_data *sdata = mpath->sdata; + struct mesh_table *tbl; + struct mesh_path *from_mpath = mpath; + struct mesh_path *gate; + bool copy = false; + + tbl = &sdata->u.mesh.mesh_paths; + + rcu_read_lock(); + hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { + if (gate->flags & MESH_PATH_ACTIVE) { + mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); + mesh_path_move_to_queue(gate, from_mpath, copy); + from_mpath = gate; + copy = true; + } else { + mpath_dbg(sdata, + "Not forwarding to %pM (flags %#x)\n", + gate->dst, gate->flags); + } + } + + hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { + mpath_dbg(sdata, "Sending to %pM\n", gate->dst); + mesh_path_tx_pending(gate); + } + rcu_read_unlock(); + + return (from_mpath == mpath) ? -EHOSTUNREACH : 0; +} + +/** + * mesh_path_discard_frame - discard a frame whose path could not be resolved + * + * @skb: frame to discard + * @sdata: network subif the frame was to be sent through + * + * Locking: the function must me called within a rcu_read_lock region + */ +void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + ieee80211_free_txskb(&sdata->local->hw, skb); + sdata->u.mesh.mshstats.dropped_frames_no_route++; +} + +/** + * mesh_path_flush_pending - free the pending queue of a mesh path + * + * @mpath: mesh path whose queue has to be freed + * + * Locking: the function must me called within a rcu_read_lock region + */ +void mesh_path_flush_pending(struct mesh_path *mpath) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) + mesh_path_discard_frame(mpath->sdata, skb); +} + +/** + * mesh_path_fix_nexthop - force a specific next hop for a mesh path + * + * @mpath: the mesh path to modify + * @next_hop: the next hop to force + * + * Locking: this function must be called holding mpath->state_lock + */ +void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) +{ + spin_lock_bh(&mpath->state_lock); + mesh_path_assign_nexthop(mpath, next_hop); + mpath->sn = 0xffff; + mpath->metric = 0; + mpath->hop_count = 0; + mpath->exp_time = 0; + mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID; + mesh_path_activate(mpath); + mesh_fast_tx_flush_mpath(mpath); + spin_unlock_bh(&mpath->state_lock); + ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg); + /* init it at a low value - 0 start is tricky */ + ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1); + mesh_path_tx_pending(mpath); +} + +void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) +{ + mesh_table_init(&sdata->u.mesh.mesh_paths); + mesh_table_init(&sdata->u.mesh.mpp_paths); + mesh_fast_tx_init(sdata); +} + +static +void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, + struct mesh_table *tbl) +{ + struct mesh_path *mpath; + struct hlist_node *n; + + spin_lock_bh(&tbl->walk_lock); + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { + if ((!(mpath->flags & MESH_PATH_RESOLVING)) && + (!(mpath->flags & MESH_PATH_FIXED)) && + time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) + __mesh_path_del(tbl, mpath); + } + spin_unlock_bh(&tbl->walk_lock); +} + +void mesh_path_expire(struct ieee80211_sub_if_data *sdata) +{ + mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths); + mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths); +} + +void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) +{ + mesh_fast_tx_deinit(sdata); + mesh_table_free(&sdata->u.mesh.mesh_paths); + mesh_table_free(&sdata->u.mesh.mpp_paths); +} diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c new file mode 100644 index 0000000000..cc62c2a01f --- /dev/null +++ b/net/mac80211/mesh_plink.c @@ -0,0 +1,1250 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2008, 2009 open80211s Ltd. + * Copyright (C) 2019, 2021-2023 Intel Corporation + * Author: Luis Carlos Cobo <luisca@cozybit.com> + */ +#include <linux/gfp.h> +#include <linux/kernel.h> +#include <linux/random.h> +#include <linux/rculist.h> + +#include "ieee80211_i.h" +#include "rate.h" +#include "mesh.h" + +#define PLINK_CNF_AID(mgmt) ((mgmt)->u.action.u.self_prot.variable + 2) +#define PLINK_GET_LLID(p) (p + 2) +#define PLINK_GET_PLID(p) (p + 4) + +#define mod_plink_timer(s, t) (mod_timer(&s->mesh->plink_timer, \ + jiffies + msecs_to_jiffies(t))) + +enum plink_event { + PLINK_UNDEFINED, + OPN_ACPT, + OPN_RJCT, + OPN_IGNR, + CNF_ACPT, + CNF_RJCT, + CNF_IGNR, + CLS_ACPT, + CLS_IGNR +}; + +static const char * const mplstates[] = { + [NL80211_PLINK_LISTEN] = "LISTEN", + [NL80211_PLINK_OPN_SNT] = "OPN-SNT", + [NL80211_PLINK_OPN_RCVD] = "OPN-RCVD", + [NL80211_PLINK_CNF_RCVD] = "CNF_RCVD", + [NL80211_PLINK_ESTAB] = "ESTAB", + [NL80211_PLINK_HOLDING] = "HOLDING", + [NL80211_PLINK_BLOCKED] = "BLOCKED" +}; + +static const char * const mplevents[] = { + [PLINK_UNDEFINED] = "NONE", + [OPN_ACPT] = "OPN_ACPT", + [OPN_RJCT] = "OPN_RJCT", + [OPN_IGNR] = "OPN_IGNR", + [CNF_ACPT] = "CNF_ACPT", + [CNF_RJCT] = "CNF_RJCT", + [CNF_IGNR] = "CNF_IGNR", + [CLS_ACPT] = "CLS_ACPT", + [CLS_IGNR] = "CLS_IGNR" +}; + +/* We only need a valid sta if user configured a minimum rssi_threshold. */ +static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta) +{ + s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold; + return rssi_threshold == 0 || + (sta && + (s8)-ewma_signal_read(&sta->deflink.rx_stats_avg.signal) > + rssi_threshold); +} + +/** + * mesh_plink_fsm_restart - restart a mesh peer link finite state machine + * + * @sta: mesh peer link to restart + * + * Locking: this function must be called holding sta->mesh->plink_lock + */ +static inline void mesh_plink_fsm_restart(struct sta_info *sta) +{ + lockdep_assert_held(&sta->mesh->plink_lock); + sta->mesh->plink_state = NL80211_PLINK_LISTEN; + sta->mesh->llid = sta->mesh->plid = sta->mesh->reason = 0; + sta->mesh->plink_retries = 0; +} + +/* + * mesh_set_short_slot_time - enable / disable ERP short slot time. + * + * The standard indirectly mandates mesh STAs to turn off short slot time by + * disallowing advertising this (802.11-2012 8.4.1.4), but that doesn't mean we + * can't be sneaky about it. Enable short slot time if all mesh STAs in the + * MBSS support ERP rates. + * + * Returns BSS_CHANGED_ERP_SLOT or 0 for no change. + */ +static u64 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + struct sta_info *sta; + u32 erp_rates = 0; + u64 changed = 0; + int i; + bool short_slot = false; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return changed; + + if (sband->band == NL80211_BAND_5GHZ) { + /* (IEEE 802.11-2012 19.4.5) */ + short_slot = true; + goto out; + } else if (sband->band != NL80211_BAND_2GHZ) { + goto out; + } + + for (i = 0; i < sband->n_bitrates; i++) + if (sband->bitrates[i].flags & IEEE80211_RATE_ERP_G) + erp_rates |= BIT(i); + + if (!erp_rates) + goto out; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sdata != sta->sdata || + sta->mesh->plink_state != NL80211_PLINK_ESTAB) + continue; + + short_slot = false; + if (erp_rates & sta->sta.deflink.supp_rates[sband->band]) + short_slot = true; + else + break; + } + rcu_read_unlock(); + +out: + if (sdata->vif.bss_conf.use_short_slot != short_slot) { + sdata->vif.bss_conf.use_short_slot = short_slot; + changed = BSS_CHANGED_ERP_SLOT; + mpl_dbg(sdata, "mesh_plink %pM: ERP short slot time %d\n", + sdata->vif.addr, short_slot); + } + return changed; +} + +/** + * mesh_set_ht_prot_mode - set correct HT protection mode + * @sdata: the (mesh) interface to handle + * + * Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT + * mesh STA in a MBSS. Three HT protection modes are supported for now, non-HT + * mixed mode, 20MHz-protection and no-protection mode. non-HT mixed mode is + * selected if any non-HT peers are present in our MBSS. 20MHz-protection mode + * is selected if all peers in our 20/40MHz MBSS support HT and at least one + * HT20 peer is present. Otherwise no-protection mode is selected. + */ +static u64 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + u16 ht_opmode; + bool non_ht_sta = false, ht20_sta = false; + + switch (sdata->vif.bss_conf.chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: + return 0; + default: + break; + } + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sdata != sta->sdata || + sta->mesh->plink_state != NL80211_PLINK_ESTAB) + continue; + + if (sta->sta.deflink.bandwidth > IEEE80211_STA_RX_BW_20) + continue; + + if (!sta->sta.deflink.ht_cap.ht_supported) { + mpl_dbg(sdata, "nonHT sta (%pM) is present\n", + sta->sta.addr); + non_ht_sta = true; + break; + } + + mpl_dbg(sdata, "HT20 sta (%pM) is present\n", sta->sta.addr); + ht20_sta = true; + } + rcu_read_unlock(); + + if (non_ht_sta) + ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED; + else if (ht20_sta && + sdata->vif.bss_conf.chandef.width > NL80211_CHAN_WIDTH_20) + ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ; + else + ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE; + + if (sdata->vif.bss_conf.ht_operation_mode == ht_opmode) + return 0; + + sdata->vif.bss_conf.ht_operation_mode = ht_opmode; + sdata->u.mesh.mshcfg.ht_opmode = ht_opmode; + mpl_dbg(sdata, "selected new HT protection mode %d\n", ht_opmode); + return BSS_CHANGED_HT; +} + +static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + enum ieee80211_self_protected_actioncode action, + u8 *da, u16 llid, u16 plid, u16 reason) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_tx_info *info; + struct ieee80211_mgmt *mgmt; + bool include_plid = false; + u16 peering_proto = 0; + u8 *pos, ie_len = 4; + u8 ie_len_he_cap, ie_len_eht_cap; + int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.self_prot); + int err = -ENOMEM; + + ie_len_he_cap = ieee80211_ie_len_he_cap(sdata, + NL80211_IFTYPE_MESH_POINT); + ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata, + NL80211_IFTYPE_MESH_POINT); + skb = dev_alloc_skb(local->tx_headroom + + hdr_len + + 2 + /* capability info */ + 2 + /* AID */ + 2 + 8 + /* supported rates */ + 2 + (IEEE80211_MAX_SUPP_RATES - 8) + + 2 + sdata->u.mesh.mesh_id_len + + 2 + sizeof(struct ieee80211_meshconf_ie) + + 2 + sizeof(struct ieee80211_ht_cap) + + 2 + sizeof(struct ieee80211_ht_operation) + + 2 + sizeof(struct ieee80211_vht_cap) + + 2 + sizeof(struct ieee80211_vht_operation) + + ie_len_he_cap + + 2 + 1 + sizeof(struct ieee80211_he_operation) + + sizeof(struct ieee80211_he_6ghz_oper) + + 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa) + + ie_len_eht_cap + + 2 + 1 + offsetof(struct ieee80211_eht_operation, optional) + + offsetof(struct ieee80211_eht_operation_info, optional) + + 2 + 8 + /* peering IE */ + sdata->u.mesh.ie_len); + if (!skb) + return err; + info = IEEE80211_SKB_CB(skb); + skb_reserve(skb, local->tx_headroom); + mgmt = skb_put_zero(skb, hdr_len); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); + mgmt->u.action.category = WLAN_CATEGORY_SELF_PROTECTED; + mgmt->u.action.u.self_prot.action_code = action; + + if (action != WLAN_SP_MESH_PEERING_CLOSE) { + struct ieee80211_supported_band *sband; + enum nl80211_band band; + + sband = ieee80211_get_sband(sdata); + if (!sband) { + err = -EINVAL; + goto free; + } + band = sband->band; + + /* capability info */ + pos = skb_put_zero(skb, 2); + if (action == WLAN_SP_MESH_PEERING_CONFIRM) { + /* AID */ + pos = skb_put(skb, 2); + put_unaligned_le16(sta->sta.aid, pos); + } + if (ieee80211_add_srates_ie(sdata, skb, true, band) || + ieee80211_add_ext_srates_ie(sdata, skb, true, band) || + mesh_add_rsn_ie(sdata, skb) || + mesh_add_meshid_ie(sdata, skb) || + mesh_add_meshconf_ie(sdata, skb)) + goto free; + } else { /* WLAN_SP_MESH_PEERING_CLOSE */ + info->flags |= IEEE80211_TX_CTL_NO_ACK; + if (mesh_add_meshid_ie(sdata, skb)) + goto free; + } + + /* Add Mesh Peering Management element */ + switch (action) { + case WLAN_SP_MESH_PEERING_OPEN: + break; + case WLAN_SP_MESH_PEERING_CONFIRM: + ie_len += 2; + include_plid = true; + break; + case WLAN_SP_MESH_PEERING_CLOSE: + if (plid) { + ie_len += 2; + include_plid = true; + } + ie_len += 2; /* reason code */ + break; + default: + err = -EINVAL; + goto free; + } + + if (WARN_ON(skb_tailroom(skb) < 2 + ie_len)) + goto free; + + pos = skb_put(skb, 2 + ie_len); + *pos++ = WLAN_EID_PEER_MGMT; + *pos++ = ie_len; + memcpy(pos, &peering_proto, 2); + pos += 2; + put_unaligned_le16(llid, pos); + pos += 2; + if (include_plid) { + put_unaligned_le16(plid, pos); + pos += 2; + } + if (action == WLAN_SP_MESH_PEERING_CLOSE) { + put_unaligned_le16(reason, pos); + pos += 2; + } + + if (action != WLAN_SP_MESH_PEERING_CLOSE) { + if (mesh_add_ht_cap_ie(sdata, skb) || + mesh_add_ht_oper_ie(sdata, skb) || + mesh_add_vht_cap_ie(sdata, skb) || + mesh_add_vht_oper_ie(sdata, skb) || + mesh_add_he_cap_ie(sdata, skb, ie_len_he_cap) || + mesh_add_he_oper_ie(sdata, skb) || + mesh_add_he_6ghz_cap_ie(sdata, skb) || + mesh_add_eht_cap_ie(sdata, skb, ie_len_eht_cap) || + mesh_add_eht_oper_ie(sdata, skb)) + goto free; + } + + if (mesh_add_vendor_ies(sdata, skb)) + goto free; + + ieee80211_tx_skb(sdata, skb); + return 0; +free: + kfree_skb(skb); + return err; +} + +/** + * __mesh_plink_deactivate - deactivate mesh peer link + * + * @sta: mesh peer link to deactivate + * + * Mesh paths with this peer as next hop should be flushed + * by the caller outside of plink_lock. + * + * Returns beacon changed flag if the beacon content changed. + * + * Locking: the caller must hold sta->mesh->plink_lock + */ +static u64 __mesh_plink_deactivate(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + u64 changed = 0; + + lockdep_assert_held(&sta->mesh->plink_lock); + + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) + changed = mesh_plink_dec_estab_count(sdata); + sta->mesh->plink_state = NL80211_PLINK_BLOCKED; + + ieee80211_mps_sta_status_update(sta); + changed |= ieee80211_mps_set_sta_local_pm(sta, + NL80211_MESH_POWER_UNKNOWN); + + return changed; +} + +/** + * mesh_plink_deactivate - deactivate mesh peer link + * + * @sta: mesh peer link to deactivate + * + * All mesh paths with this peer as next hop will be flushed + */ +u64 mesh_plink_deactivate(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + u64 changed; + + spin_lock_bh(&sta->mesh->plink_lock); + changed = __mesh_plink_deactivate(sta); + + if (!sdata->u.mesh.user_mpm) { + sta->mesh->reason = WLAN_REASON_MESH_PEER_CANCELED; + mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_CLOSE, + sta->sta.addr, sta->mesh->llid, + sta->mesh->plid, sta->mesh->reason); + } + spin_unlock_bh(&sta->mesh->plink_lock); + if (!sdata->u.mesh.user_mpm) + del_timer_sync(&sta->mesh->plink_timer); + mesh_path_flush_by_nexthop(sta); + + /* make sure no readers can access nexthop sta from here on */ + synchronize_net(); + + return changed; +} + +static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee802_11_elems *elems) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + u32 rates, basic_rates = 0, changed = 0; + enum ieee80211_sta_rx_bandwidth bw = sta->sta.deflink.bandwidth; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return; + + rates = ieee80211_sta_get_rates(sdata, elems, sband->band, + &basic_rates); + + spin_lock_bh(&sta->mesh->plink_lock); + sta->deflink.rx_stats.last_rx = jiffies; + + /* rates and capabilities don't change during peering */ + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB && + sta->mesh->processed_beacon) + goto out; + sta->mesh->processed_beacon = true; + + if (sta->sta.deflink.supp_rates[sband->band] != rates) + changed |= IEEE80211_RC_SUPP_RATES_CHANGED; + sta->sta.deflink.supp_rates[sband->band] = rates; + + if (ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, + elems->ht_cap_elem, + &sta->deflink)) + changed |= IEEE80211_RC_BW_CHANGED; + + ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, + elems->vht_cap_elem, NULL, + &sta->deflink); + + ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, elems->he_cap, + elems->he_cap_len, + elems->he_6ghz_capa, + &sta->deflink); + + ieee80211_eht_cap_ie_to_sta_eht_cap(sdata, sband, elems->he_cap, + elems->he_cap_len, + elems->eht_cap, elems->eht_cap_len, + &sta->deflink); + + if (bw != sta->sta.deflink.bandwidth) + changed |= IEEE80211_RC_BW_CHANGED; + + /* HT peer is operating 20MHz-only */ + if (elems->ht_operation && + !(elems->ht_operation->ht_param & + IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { + if (sta->sta.deflink.bandwidth != IEEE80211_STA_RX_BW_20) + changed |= IEEE80211_RC_BW_CHANGED; + sta->sta.deflink.bandwidth = IEEE80211_STA_RX_BW_20; + } + + if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) + rate_control_rate_init(sta); + else + rate_control_rate_update(local, sband, sta, 0, changed); +out: + spin_unlock_bh(&sta->mesh->plink_lock); +} + +static int mesh_allocate_aid(struct ieee80211_sub_if_data *sdata) +{ + struct sta_info *sta; + unsigned long *aid_map; + int aid; + + aid_map = bitmap_zalloc(IEEE80211_MAX_AID + 1, GFP_KERNEL); + if (!aid_map) + return -ENOMEM; + + /* reserve aid 0 for mcast indication */ + __set_bit(0, aid_map); + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) + __set_bit(sta->sta.aid, aid_map); + rcu_read_unlock(); + + aid = find_first_zero_bit(aid_map, IEEE80211_MAX_AID + 1); + bitmap_free(aid_map); + + if (aid > IEEE80211_MAX_AID) + return -ENOBUFS; + + return aid; +} + +static struct sta_info * +__mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr) +{ + struct sta_info *sta; + int aid; + + if (sdata->local->num_sta >= MESH_MAX_PLINKS) + return NULL; + + aid = mesh_allocate_aid(sdata); + if (aid < 0) + return NULL; + + sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL); + if (!sta) + return NULL; + + sta->mesh->plink_state = NL80211_PLINK_LISTEN; + sta->sta.wme = true; + sta->sta.aid = aid; + + sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); + sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); + sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED); + + return sta; +} + +static struct sta_info * +mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr, + struct ieee802_11_elems *elems, + struct ieee80211_rx_status *rx_status) +{ + struct sta_info *sta = NULL; + + /* Userspace handles station allocation */ + if (sdata->u.mesh.user_mpm || + sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) { + if (mesh_peer_accepts_plinks(elems) && + mesh_plink_availables(sdata)) { + int sig = 0; + + if (ieee80211_hw_check(&sdata->local->hw, SIGNAL_DBM)) + sig = rx_status->signal; + + cfg80211_notify_new_peer_candidate(sdata->dev, addr, + elems->ie_start, + elems->total_len, + sig, GFP_KERNEL); + } + } else + sta = __mesh_sta_info_alloc(sdata, addr); + + return sta; +} + +/* + * mesh_sta_info_get - return mesh sta info entry for @addr. + * + * @sdata: local meshif + * @addr: peer's address + * @elems: IEs from beacon or mesh peering frame. + * @rx_status: rx status for the frame for signal reporting + * + * Return existing or newly allocated sta_info under RCU read lock. + * (re)initialize with given IEs. + */ +static struct sta_info * +mesh_sta_info_get(struct ieee80211_sub_if_data *sdata, + u8 *addr, struct ieee802_11_elems *elems, + struct ieee80211_rx_status *rx_status) __acquires(RCU) +{ + struct sta_info *sta = NULL; + + rcu_read_lock(); + sta = sta_info_get(sdata, addr); + if (sta) { + mesh_sta_info_init(sdata, sta, elems); + } else { + rcu_read_unlock(); + /* can't run atomic */ + sta = mesh_sta_info_alloc(sdata, addr, elems, rx_status); + if (!sta) { + rcu_read_lock(); + return NULL; + } + + mesh_sta_info_init(sdata, sta, elems); + + if (sta_info_insert_rcu(sta)) + return NULL; + } + + return sta; +} + +/* + * mesh_neighbour_update - update or initialize new mesh neighbor. + * + * @sdata: local meshif + * @addr: peer's address + * @elems: IEs from beacon or mesh peering frame + * @rx_status: rx status for the frame for signal reporting + * + * Initiates peering if appropriate. + */ +void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, + u8 *hw_addr, + struct ieee802_11_elems *elems, + struct ieee80211_rx_status *rx_status) +{ + struct sta_info *sta; + u64 changed = 0; + + sta = mesh_sta_info_get(sdata, hw_addr, elems, rx_status); + if (!sta) + goto out; + + sta->mesh->connected_to_gate = elems->mesh_config->meshconf_form & + IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE; + + if (mesh_peer_accepts_plinks(elems) && + sta->mesh->plink_state == NL80211_PLINK_LISTEN && + sdata->u.mesh.accepting_plinks && + sdata->u.mesh.mshcfg.auto_open_plinks && + rssi_threshold_check(sdata, sta)) + changed = mesh_plink_open(sta); + + ieee80211_mps_frame_release(sta, elems); +out: + rcu_read_unlock(); + ieee80211_mbss_info_change_notify(sdata, changed); +} + +void mesh_plink_timer(struct timer_list *t) +{ + struct mesh_sta *mesh = from_timer(mesh, t, plink_timer); + struct sta_info *sta; + u16 reason = 0; + struct ieee80211_sub_if_data *sdata; + struct mesh_config *mshcfg; + enum ieee80211_self_protected_actioncode action = 0; + + /* + * This STA is valid because sta_info_destroy() will + * del_timer_sync() this timer after having made sure + * it cannot be readded (by deleting the plink.) + */ + sta = mesh->plink_sta; + + if (sta->sdata->local->quiescing) + return; + + spin_lock_bh(&sta->mesh->plink_lock); + + /* If a timer fires just before a state transition on another CPU, + * we may have already extended the timeout and changed state by the + * time we've acquired the lock and arrived here. In that case, + * skip this timer and wait for the new one. + */ + if (time_before(jiffies, sta->mesh->plink_timer.expires)) { + mpl_dbg(sta->sdata, + "Ignoring timer for %pM in state %s (timer adjusted)", + sta->sta.addr, mplstates[sta->mesh->plink_state]); + spin_unlock_bh(&sta->mesh->plink_lock); + return; + } + + /* del_timer() and handler may race when entering these states */ + if (sta->mesh->plink_state == NL80211_PLINK_LISTEN || + sta->mesh->plink_state == NL80211_PLINK_ESTAB) { + mpl_dbg(sta->sdata, + "Ignoring timer for %pM in state %s (timer deleted)", + sta->sta.addr, mplstates[sta->mesh->plink_state]); + spin_unlock_bh(&sta->mesh->plink_lock); + return; + } + + mpl_dbg(sta->sdata, + "Mesh plink timer for %pM fired on state %s\n", + sta->sta.addr, mplstates[sta->mesh->plink_state]); + sdata = sta->sdata; + mshcfg = &sdata->u.mesh.mshcfg; + + switch (sta->mesh->plink_state) { + case NL80211_PLINK_OPN_RCVD: + case NL80211_PLINK_OPN_SNT: + /* retry timer */ + if (sta->mesh->plink_retries < mshcfg->dot11MeshMaxRetries) { + u32 rand; + mpl_dbg(sta->sdata, + "Mesh plink for %pM (retry, timeout): %d %d\n", + sta->sta.addr, sta->mesh->plink_retries, + sta->mesh->plink_timeout); + get_random_bytes(&rand, sizeof(u32)); + sta->mesh->plink_timeout = sta->mesh->plink_timeout + + rand % sta->mesh->plink_timeout; + ++sta->mesh->plink_retries; + mod_plink_timer(sta, sta->mesh->plink_timeout); + action = WLAN_SP_MESH_PEERING_OPEN; + break; + } + reason = WLAN_REASON_MESH_MAX_RETRIES; + fallthrough; + case NL80211_PLINK_CNF_RCVD: + /* confirm timer */ + if (!reason) + reason = WLAN_REASON_MESH_CONFIRM_TIMEOUT; + sta->mesh->plink_state = NL80211_PLINK_HOLDING; + mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout); + action = WLAN_SP_MESH_PEERING_CLOSE; + break; + case NL80211_PLINK_HOLDING: + /* holding timer */ + del_timer(&sta->mesh->plink_timer); + mesh_plink_fsm_restart(sta); + break; + default: + break; + } + spin_unlock_bh(&sta->mesh->plink_lock); + if (action) + mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr, + sta->mesh->llid, sta->mesh->plid, reason); +} + +static inline void mesh_plink_timer_set(struct sta_info *sta, u32 timeout) +{ + sta->mesh->plink_timeout = timeout; + mod_timer(&sta->mesh->plink_timer, jiffies + msecs_to_jiffies(timeout)); +} + +static bool llid_in_use(struct ieee80211_sub_if_data *sdata, + u16 llid) +{ + struct ieee80211_local *local = sdata->local; + bool in_use = false; + struct sta_info *sta; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sdata != sta->sdata) + continue; + + if (!memcmp(&sta->mesh->llid, &llid, sizeof(llid))) { + in_use = true; + break; + } + } + rcu_read_unlock(); + + return in_use; +} + +static u16 mesh_get_new_llid(struct ieee80211_sub_if_data *sdata) +{ + u16 llid; + + do { + get_random_bytes(&llid, sizeof(llid)); + } while (llid_in_use(sdata, llid)); + + return llid; +} + +u64 mesh_plink_open(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + u64 changed; + + if (!test_sta_flag(sta, WLAN_STA_AUTH)) + return 0; + + spin_lock_bh(&sta->mesh->plink_lock); + sta->mesh->llid = mesh_get_new_llid(sdata); + if (sta->mesh->plink_state != NL80211_PLINK_LISTEN && + sta->mesh->plink_state != NL80211_PLINK_BLOCKED) { + spin_unlock_bh(&sta->mesh->plink_lock); + return 0; + } + sta->mesh->plink_state = NL80211_PLINK_OPN_SNT; + mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout); + spin_unlock_bh(&sta->mesh->plink_lock); + mpl_dbg(sdata, + "Mesh plink: starting establishment with %pM\n", + sta->sta.addr); + + /* set the non-peer mode to active during peering */ + changed = ieee80211_mps_local_status_update(sdata); + + mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_OPEN, + sta->sta.addr, sta->mesh->llid, 0, 0); + return changed; +} + +u64 mesh_plink_block(struct sta_info *sta) +{ + u64 changed; + + spin_lock_bh(&sta->mesh->plink_lock); + changed = __mesh_plink_deactivate(sta); + sta->mesh->plink_state = NL80211_PLINK_BLOCKED; + spin_unlock_bh(&sta->mesh->plink_lock); + mesh_path_flush_by_nexthop(sta); + + return changed; +} + +static void mesh_plink_close(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + enum plink_event event) +{ + struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg; + u16 reason = (event == CLS_ACPT) ? + WLAN_REASON_MESH_CLOSE : WLAN_REASON_MESH_CONFIG; + + sta->mesh->reason = reason; + sta->mesh->plink_state = NL80211_PLINK_HOLDING; + mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout); +} + +static u64 mesh_plink_establish(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta) +{ + struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg; + u64 changed = 0; + + del_timer(&sta->mesh->plink_timer); + sta->mesh->plink_state = NL80211_PLINK_ESTAB; + changed |= mesh_plink_inc_estab_count(sdata); + changed |= mesh_set_ht_prot_mode(sdata); + changed |= mesh_set_short_slot_time(sdata); + mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n", sta->sta.addr); + ieee80211_mps_sta_status_update(sta); + changed |= ieee80211_mps_set_sta_local_pm(sta, mshcfg->power_mode); + return changed; +} + +/** + * mesh_plink_fsm - step @sta MPM based on @event + * + * @sdata: interface + * @sta: mesh neighbor + * @event: peering event + * + * Return: changed MBSS flags + */ +static u64 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, enum plink_event event) +{ + struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg; + enum ieee80211_self_protected_actioncode action = 0; + u64 changed = 0; + bool flush = false; + + mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr, + mplstates[sta->mesh->plink_state], mplevents[event]); + + spin_lock_bh(&sta->mesh->plink_lock); + switch (sta->mesh->plink_state) { + case NL80211_PLINK_LISTEN: + switch (event) { + case CLS_ACPT: + mesh_plink_fsm_restart(sta); + break; + case OPN_ACPT: + sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD; + sta->mesh->llid = mesh_get_new_llid(sdata); + mesh_plink_timer_set(sta, + mshcfg->dot11MeshRetryTimeout); + + /* set the non-peer mode to active during peering */ + changed |= ieee80211_mps_local_status_update(sdata); + action = WLAN_SP_MESH_PEERING_OPEN; + break; + default: + break; + } + break; + case NL80211_PLINK_OPN_SNT: + switch (event) { + case OPN_RJCT: + case CNF_RJCT: + case CLS_ACPT: + mesh_plink_close(sdata, sta, event); + action = WLAN_SP_MESH_PEERING_CLOSE; + break; + case OPN_ACPT: + /* retry timer is left untouched */ + sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD; + action = WLAN_SP_MESH_PEERING_CONFIRM; + break; + case CNF_ACPT: + sta->mesh->plink_state = NL80211_PLINK_CNF_RCVD; + mod_plink_timer(sta, mshcfg->dot11MeshConfirmTimeout); + break; + default: + break; + } + break; + case NL80211_PLINK_OPN_RCVD: + switch (event) { + case OPN_RJCT: + case CNF_RJCT: + case CLS_ACPT: + mesh_plink_close(sdata, sta, event); + action = WLAN_SP_MESH_PEERING_CLOSE; + break; + case OPN_ACPT: + action = WLAN_SP_MESH_PEERING_CONFIRM; + break; + case CNF_ACPT: + changed |= mesh_plink_establish(sdata, sta); + break; + default: + break; + } + break; + case NL80211_PLINK_CNF_RCVD: + switch (event) { + case OPN_RJCT: + case CNF_RJCT: + case CLS_ACPT: + mesh_plink_close(sdata, sta, event); + action = WLAN_SP_MESH_PEERING_CLOSE; + break; + case OPN_ACPT: + changed |= mesh_plink_establish(sdata, sta); + action = WLAN_SP_MESH_PEERING_CONFIRM; + break; + default: + break; + } + break; + case NL80211_PLINK_ESTAB: + switch (event) { + case CLS_ACPT: + changed |= __mesh_plink_deactivate(sta); + changed |= mesh_set_ht_prot_mode(sdata); + changed |= mesh_set_short_slot_time(sdata); + mesh_plink_close(sdata, sta, event); + action = WLAN_SP_MESH_PEERING_CLOSE; + flush = true; + break; + case OPN_ACPT: + action = WLAN_SP_MESH_PEERING_CONFIRM; + break; + default: + break; + } + break; + case NL80211_PLINK_HOLDING: + switch (event) { + case CLS_ACPT: + del_timer(&sta->mesh->plink_timer); + mesh_plink_fsm_restart(sta); + break; + case OPN_ACPT: + case CNF_ACPT: + case OPN_RJCT: + case CNF_RJCT: + action = WLAN_SP_MESH_PEERING_CLOSE; + break; + default: + break; + } + break; + default: + /* should not get here, PLINK_BLOCKED is dealt with at the + * beginning of the function + */ + break; + } + spin_unlock_bh(&sta->mesh->plink_lock); + if (flush) + mesh_path_flush_by_nexthop(sta); + if (action) { + mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr, + sta->mesh->llid, sta->mesh->plid, + sta->mesh->reason); + + /* also send confirm in open case */ + if (action == WLAN_SP_MESH_PEERING_OPEN) { + mesh_plink_frame_tx(sdata, sta, + WLAN_SP_MESH_PEERING_CONFIRM, + sta->sta.addr, sta->mesh->llid, + sta->mesh->plid, 0); + } + } + + return changed; +} + +/* + * mesh_plink_get_event - get correct MPM event + * + * @sdata: interface + * @sta: peer, leave NULL if processing a frame from a new suitable peer + * @elems: peering management IEs + * @ftype: frame type + * @llid: peer's peer link ID + * @plid: peer's local link ID + * + * Return: new peering event for @sta, but PLINK_UNDEFINED should be treated as + * an error. + */ +static enum plink_event +mesh_plink_get_event(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee802_11_elems *elems, + enum ieee80211_self_protected_actioncode ftype, + u16 llid, u16 plid) +{ + enum plink_event event = PLINK_UNDEFINED; + u8 ie_len = elems->peering_len; + bool matches_local; + + matches_local = (ftype == WLAN_SP_MESH_PEERING_CLOSE || + mesh_matches_local(sdata, elems)); + + /* deny open request from non-matching peer */ + if (!matches_local && !sta) { + event = OPN_RJCT; + goto out; + } + + if (!sta) { + if (ftype != WLAN_SP_MESH_PEERING_OPEN) { + mpl_dbg(sdata, "Mesh plink: cls or cnf from unknown peer\n"); + goto out; + } + /* ftype == WLAN_SP_MESH_PEERING_OPEN */ + if (!mesh_plink_free_count(sdata)) { + mpl_dbg(sdata, "Mesh plink error: no more free plinks\n"); + goto out; + } + + /* new matching peer */ + event = OPN_ACPT; + goto out; + } else { + if (!test_sta_flag(sta, WLAN_STA_AUTH)) { + mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n"); + goto out; + } + if (sta->mesh->plink_state == NL80211_PLINK_BLOCKED) + goto out; + } + + switch (ftype) { + case WLAN_SP_MESH_PEERING_OPEN: + if (!matches_local) + event = OPN_RJCT; + else if (!mesh_plink_free_count(sdata) || + (sta->mesh->plid && sta->mesh->plid != plid)) + event = OPN_IGNR; + else + event = OPN_ACPT; + break; + case WLAN_SP_MESH_PEERING_CONFIRM: + if (!matches_local) + event = CNF_RJCT; + else if (!mesh_plink_free_count(sdata) || + sta->mesh->llid != llid || + (sta->mesh->plid && sta->mesh->plid != plid)) + event = CNF_IGNR; + else + event = CNF_ACPT; + break; + case WLAN_SP_MESH_PEERING_CLOSE: + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) + /* Do not check for llid or plid. This does not + * follow the standard but since multiple plinks + * per sta are not supported, it is necessary in + * order to avoid a livelock when MP A sees an + * establish peer link to MP B but MP B does not + * see it. This can be caused by a timeout in + * B's peer link establishment or B beign + * restarted. + */ + event = CLS_ACPT; + else if (sta->mesh->plid != plid) + event = CLS_IGNR; + else if (ie_len == 8 && sta->mesh->llid != llid) + event = CLS_IGNR; + else + event = CLS_ACPT; + break; + default: + mpl_dbg(sdata, "Mesh plink: unknown frame subtype\n"); + break; + } + +out: + return event; +} + +static void +mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + struct ieee802_11_elems *elems, + struct ieee80211_rx_status *rx_status) +{ + + struct sta_info *sta; + enum plink_event event; + enum ieee80211_self_protected_actioncode ftype; + u64 changed = 0; + u8 ie_len = elems->peering_len; + u16 plid, llid = 0; + + if (!elems->peering) { + mpl_dbg(sdata, + "Mesh plink: missing necessary peer link ie\n"); + return; + } + + if (elems->rsn_len && + sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) { + mpl_dbg(sdata, + "Mesh plink: can't establish link with secure peer\n"); + return; + } + + ftype = mgmt->u.action.u.self_prot.action_code; + if ((ftype == WLAN_SP_MESH_PEERING_OPEN && ie_len != 4) || + (ftype == WLAN_SP_MESH_PEERING_CONFIRM && ie_len != 6) || + (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len != 6 + && ie_len != 8)) { + mpl_dbg(sdata, + "Mesh plink: incorrect plink ie length %d %d\n", + ftype, ie_len); + return; + } + + if (ftype != WLAN_SP_MESH_PEERING_CLOSE && + (!elems->mesh_id || !elems->mesh_config)) { + mpl_dbg(sdata, "Mesh plink: missing necessary ie\n"); + return; + } + /* Note the lines below are correct, the llid in the frame is the plid + * from the point of view of this host. + */ + plid = get_unaligned_le16(PLINK_GET_LLID(elems->peering)); + if (ftype == WLAN_SP_MESH_PEERING_CONFIRM || + (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8)) + llid = get_unaligned_le16(PLINK_GET_PLID(elems->peering)); + + /* WARNING: Only for sta pointer, is dropped & re-acquired */ + rcu_read_lock(); + + sta = sta_info_get(sdata, mgmt->sa); + + if (ftype == WLAN_SP_MESH_PEERING_OPEN && + !rssi_threshold_check(sdata, sta)) { + mpl_dbg(sdata, "Mesh plink: %pM does not meet rssi threshold\n", + mgmt->sa); + goto unlock_rcu; + } + + /* Now we will figure out the appropriate event... */ + event = mesh_plink_get_event(sdata, sta, elems, ftype, llid, plid); + + if (event == OPN_ACPT) { + rcu_read_unlock(); + /* allocate sta entry if necessary and update info */ + sta = mesh_sta_info_get(sdata, mgmt->sa, elems, rx_status); + if (!sta) { + mpl_dbg(sdata, "Mesh plink: failed to init peer!\n"); + goto unlock_rcu; + } + sta->mesh->plid = plid; + } else if (!sta && event == OPN_RJCT) { + mesh_plink_frame_tx(sdata, NULL, WLAN_SP_MESH_PEERING_CLOSE, + mgmt->sa, 0, plid, + WLAN_REASON_MESH_CONFIG); + goto unlock_rcu; + } else if (!sta || event == PLINK_UNDEFINED) { + /* something went wrong */ + goto unlock_rcu; + } + + if (event == CNF_ACPT) { + /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */ + if (!sta->mesh->plid) + sta->mesh->plid = plid; + + sta->mesh->aid = get_unaligned_le16(PLINK_CNF_AID(mgmt)); + } + + changed |= mesh_plink_fsm(sdata, sta, event); + +unlock_rcu: + rcu_read_unlock(); + + if (changed) + ieee80211_mbss_info_change_notify(sdata, changed); +} + +void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee80211_rx_status *rx_status) +{ + struct ieee802_11_elems *elems; + size_t baselen; + u8 *baseaddr; + + /* need action_code, aux */ + if (len < IEEE80211_MIN_ACTION_SIZE + 3) + return; + + if (sdata->u.mesh.user_mpm) + /* userspace must register for these */ + return; + + if (is_multicast_ether_addr(mgmt->da)) { + mpl_dbg(sdata, + "Mesh plink: ignore frame from multicast address\n"); + return; + } + + baseaddr = mgmt->u.action.u.self_prot.variable; + baselen = (u8 *) mgmt->u.action.u.self_prot.variable - (u8 *) mgmt; + if (mgmt->u.action.u.self_prot.action_code == + WLAN_SP_MESH_PEERING_CONFIRM) { + baseaddr += 4; + baselen += 4; + + if (baselen > len) + return; + } + elems = ieee802_11_parse_elems(baseaddr, len - baselen, true, NULL); + if (elems) { + mesh_process_plink_frame(sdata, mgmt, elems, rx_status); + kfree(elems); + } +} diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c new file mode 100644 index 0000000000..35eacca43e --- /dev/null +++ b/net/mac80211/mesh_ps.c @@ -0,0 +1,608 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2012-2013, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de> + * Copyright 2012-2013, cozybit Inc. + * Copyright (C) 2021 Intel Corporation + * Copyright (C) 2023 Intel Corporation + */ + +#include "mesh.h" +#include "wme.h" + + +/* mesh PS management */ + +/** + * mps_qos_null_get - create pre-addressed QoS Null frame for mesh powersave + * @sta: the station to get the frame for + */ +static struct sk_buff *mps_qos_null_get(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_hdr *nullfunc; /* use 4addr header */ + struct sk_buff *skb; + int size = sizeof(*nullfunc); + __le16 fc; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + size + 2); + if (!skb) + return NULL; + skb_reserve(skb, local->hw.extra_tx_headroom); + + nullfunc = skb_put(skb, size); + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); + ieee80211_fill_mesh_addresses(nullfunc, &fc, sta->sta.addr, + sdata->vif.addr); + nullfunc->frame_control = fc; + nullfunc->duration_id = 0; + nullfunc->seq_ctrl = 0; + /* no address resolution for this frame -> set addr 1 immediately */ + memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); + skb_put_zero(skb, 2); /* append QoS control field */ + ieee80211_mps_set_frame_flags(sdata, sta, nullfunc); + + return skb; +} + +/** + * mps_qos_null_tx - send a QoS Null to indicate link-specific power mode + * @sta: the station to send to + */ +static void mps_qos_null_tx(struct sta_info *sta) +{ + struct sk_buff *skb; + + skb = mps_qos_null_get(sta); + if (!skb) + return; + + mps_dbg(sta->sdata, "announcing peer-specific power mode to %pM\n", + sta->sta.addr); + + /* don't unintentionally start a MPSP */ + if (!test_sta_flag(sta, WLAN_STA_PS_STA)) { + u8 *qc = ieee80211_get_qos_ctl((void *) skb->data); + + qc[0] |= IEEE80211_QOS_CTL_EOSP; + } + + ieee80211_tx_skb(sta->sdata, skb); +} + +/** + * ieee80211_mps_local_status_update - track status of local link-specific PMs + * + * @sdata: local mesh subif + * + * sets the non-peer power mode and triggers the driver PS (re-)configuration + * Return BSS_CHANGED_BEACON if a beacon update is necessary. + */ +u64 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct sta_info *sta; + bool peering = false; + int light_sleep_cnt = 0; + int deep_sleep_cnt = 0; + u64 changed = 0; + enum nl80211_mesh_power_mode nonpeer_pm; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { + if (sdata != sta->sdata) + continue; + + switch (sta->mesh->plink_state) { + case NL80211_PLINK_OPN_SNT: + case NL80211_PLINK_OPN_RCVD: + case NL80211_PLINK_CNF_RCVD: + peering = true; + break; + case NL80211_PLINK_ESTAB: + if (sta->mesh->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP) + light_sleep_cnt++; + else if (sta->mesh->local_pm == NL80211_MESH_POWER_DEEP_SLEEP) + deep_sleep_cnt++; + break; + default: + break; + } + } + rcu_read_unlock(); + + /* + * Set non-peer mode to active during peering/scanning/authentication + * (see IEEE802.11-2012 13.14.8.3). The non-peer mesh power mode is + * deep sleep if the local STA is in light or deep sleep towards at + * least one mesh peer (see 13.14.3.1). Otherwise, set it to the + * user-configured default value. + */ + if (peering) { + mps_dbg(sdata, "setting non-peer PM to active for peering\n"); + nonpeer_pm = NL80211_MESH_POWER_ACTIVE; + } else if (light_sleep_cnt || deep_sleep_cnt) { + mps_dbg(sdata, "setting non-peer PM to deep sleep\n"); + nonpeer_pm = NL80211_MESH_POWER_DEEP_SLEEP; + } else { + mps_dbg(sdata, "setting non-peer PM to user value\n"); + nonpeer_pm = ifmsh->mshcfg.power_mode; + } + + /* need update if sleep counts move between 0 and non-zero */ + if (ifmsh->nonpeer_pm != nonpeer_pm || + !ifmsh->ps_peers_light_sleep != !light_sleep_cnt || + !ifmsh->ps_peers_deep_sleep != !deep_sleep_cnt) + changed = BSS_CHANGED_BEACON; + + ifmsh->nonpeer_pm = nonpeer_pm; + ifmsh->ps_peers_light_sleep = light_sleep_cnt; + ifmsh->ps_peers_deep_sleep = deep_sleep_cnt; + + return changed; +} + +/** + * ieee80211_mps_set_sta_local_pm - set local PM towards a mesh STA + * + * @sta: mesh STA + * @pm: the power mode to set + * Return BSS_CHANGED_BEACON if a beacon update is in order. + */ +u64 ieee80211_mps_set_sta_local_pm(struct sta_info *sta, + enum nl80211_mesh_power_mode pm) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + + if (sta->mesh->local_pm == pm) + return 0; + + mps_dbg(sdata, "local STA operates in mode %d with %pM\n", + pm, sta->sta.addr); + + sta->mesh->local_pm = pm; + + /* + * announce peer-specific power mode transition + * (see IEEE802.11-2012 13.14.3.2 and 13.14.3.3) + */ + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) + mps_qos_null_tx(sta); + + return ieee80211_mps_local_status_update(sdata); +} + +/** + * ieee80211_mps_set_frame_flags - set mesh PS flags in FC (and QoS Control) + * + * @sdata: local mesh subif + * @sta: mesh STA + * @hdr: 802.11 frame header + * + * see IEEE802.11-2012 8.2.4.1.7 and 8.2.4.5.11 + * + * NOTE: sta must be given when an individually-addressed QoS frame header + * is handled, for group-addressed and management frames it is not used + */ +void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_hdr *hdr) +{ + enum nl80211_mesh_power_mode pm; + u8 *qc; + + if (WARN_ON(is_unicast_ether_addr(hdr->addr1) && + ieee80211_is_data_qos(hdr->frame_control) && + !sta)) + return; + + if (is_unicast_ether_addr(hdr->addr1) && + ieee80211_is_data_qos(hdr->frame_control) && + sta->mesh->plink_state == NL80211_PLINK_ESTAB) + pm = sta->mesh->local_pm; + else + pm = sdata->u.mesh.nonpeer_pm; + + if (pm == NL80211_MESH_POWER_ACTIVE) + hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_PM); + else + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return; + + qc = ieee80211_get_qos_ctl(hdr); + + if ((is_unicast_ether_addr(hdr->addr1) && + pm == NL80211_MESH_POWER_DEEP_SLEEP) || + (is_multicast_ether_addr(hdr->addr1) && + sdata->u.mesh.ps_peers_deep_sleep > 0)) + qc[1] |= (IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8); + else + qc[1] &= ~(IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8); +} + +/** + * ieee80211_mps_sta_status_update - update buffering status of neighbor STA + * + * @sta: mesh STA + * + * called after change of peering status or non-peer/peer-specific power mode + */ +void ieee80211_mps_sta_status_update(struct sta_info *sta) +{ + enum nl80211_mesh_power_mode pm; + bool do_buffer; + + /* For non-assoc STA, prevent buffering or frame transmission */ + if (sta->sta_state < IEEE80211_STA_ASSOC) + return; + + /* + * use peer-specific power mode if peering is established and the + * peer's power mode is known + */ + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB && + sta->mesh->peer_pm != NL80211_MESH_POWER_UNKNOWN) + pm = sta->mesh->peer_pm; + else + pm = sta->mesh->nonpeer_pm; + + do_buffer = (pm != NL80211_MESH_POWER_ACTIVE); + + /* clear the MPSP flags for non-peers or active STA */ + if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) { + clear_sta_flag(sta, WLAN_STA_MPSP_OWNER); + clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT); + } else if (!do_buffer) { + clear_sta_flag(sta, WLAN_STA_MPSP_OWNER); + } + + /* Don't let the same PS state be set twice */ + if (test_sta_flag(sta, WLAN_STA_PS_STA) == do_buffer) + return; + + if (do_buffer) { + set_sta_flag(sta, WLAN_STA_PS_STA); + atomic_inc(&sta->sdata->u.mesh.ps.num_sta_ps); + mps_dbg(sta->sdata, "start PS buffering frames towards %pM\n", + sta->sta.addr); + } else { + ieee80211_sta_ps_deliver_wakeup(sta); + } +} + +static void mps_set_sta_peer_pm(struct sta_info *sta, + struct ieee80211_hdr *hdr) +{ + enum nl80211_mesh_power_mode pm; + u8 *qc = ieee80211_get_qos_ctl(hdr); + + /* + * Test Power Management field of frame control (PW) and + * mesh power save level subfield of QoS control field (PSL) + * + * | PM | PSL| Mesh PM | + * +----+----+---------+ + * | 0 |Rsrv| Active | + * | 1 | 0 | Light | + * | 1 | 1 | Deep | + */ + if (ieee80211_has_pm(hdr->frame_control)) { + if (qc[1] & (IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8)) + pm = NL80211_MESH_POWER_DEEP_SLEEP; + else + pm = NL80211_MESH_POWER_LIGHT_SLEEP; + } else { + pm = NL80211_MESH_POWER_ACTIVE; + } + + if (sta->mesh->peer_pm == pm) + return; + + mps_dbg(sta->sdata, "STA %pM enters mode %d\n", + sta->sta.addr, pm); + + sta->mesh->peer_pm = pm; + + ieee80211_mps_sta_status_update(sta); +} + +static void mps_set_sta_nonpeer_pm(struct sta_info *sta, + struct ieee80211_hdr *hdr) +{ + enum nl80211_mesh_power_mode pm; + + if (ieee80211_has_pm(hdr->frame_control)) + pm = NL80211_MESH_POWER_DEEP_SLEEP; + else + pm = NL80211_MESH_POWER_ACTIVE; + + if (sta->mesh->nonpeer_pm == pm) + return; + + mps_dbg(sta->sdata, "STA %pM sets non-peer mode to %d\n", + sta->sta.addr, pm); + + sta->mesh->nonpeer_pm = pm; + + ieee80211_mps_sta_status_update(sta); +} + +/** + * ieee80211_mps_rx_h_sta_process - frame receive handler for mesh powersave + * + * @sta: STA info that transmitted the frame + * @hdr: IEEE 802.11 (QoS) Header + */ +void ieee80211_mps_rx_h_sta_process(struct sta_info *sta, + struct ieee80211_hdr *hdr) +{ + if (is_unicast_ether_addr(hdr->addr1) && + ieee80211_is_data_qos(hdr->frame_control)) { + /* + * individually addressed QoS Data/Null frames contain + * peer link-specific PS mode towards the local STA + */ + mps_set_sta_peer_pm(sta, hdr); + + /* check for mesh Peer Service Period trigger frames */ + ieee80211_mpsp_trigger_process(ieee80211_get_qos_ctl(hdr), + sta, false, false); + } else { + /* + * can only determine non-peer PS mode + * (see IEEE802.11-2012 8.2.4.1.7) + */ + mps_set_sta_nonpeer_pm(sta, hdr); + } +} + + +/* mesh PS frame release */ + +static void mpsp_trigger_send(struct sta_info *sta, bool rspi, bool eosp) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct sk_buff *skb; + struct ieee80211_hdr *nullfunc; + struct ieee80211_tx_info *info; + u8 *qc; + + skb = mps_qos_null_get(sta); + if (!skb) + return; + + nullfunc = (struct ieee80211_hdr *) skb->data; + if (!eosp) + nullfunc->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + /* + * | RSPI | EOSP | MPSP triggering | + * +------+------+--------------------+ + * | 0 | 0 | local STA is owner | + * | 0 | 1 | no MPSP (MPSP end) | + * | 1 | 0 | both STA are owner | + * | 1 | 1 | peer STA is owner | see IEEE802.11-2012 13.14.9.2 + */ + qc = ieee80211_get_qos_ctl(nullfunc); + if (rspi) + qc[1] |= (IEEE80211_QOS_CTL_RSPI >> 8); + if (eosp) + qc[0] |= IEEE80211_QOS_CTL_EOSP; + + info = IEEE80211_SKB_CB(skb); + + info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | + IEEE80211_TX_CTL_REQ_TX_STATUS; + + mps_dbg(sdata, "sending MPSP trigger%s%s to %pM\n", + rspi ? " RSPI" : "", eosp ? " EOSP" : "", sta->sta.addr); + + ieee80211_tx_skb(sdata, skb); +} + +/** + * mpsp_qos_null_append - append QoS Null frame to MPSP skb queue if needed + * @sta: the station to handle + * @frames: the frame list to append to + * + * To properly end a mesh MPSP the last transmitted frame has to set the EOSP + * flag in the QoS Control field. In case the current tailing frame is not a + * QoS Data frame, append a QoS Null to carry the flag. + */ +static void mpsp_qos_null_append(struct sta_info *sta, + struct sk_buff_head *frames) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct sk_buff *new_skb, *skb = skb_peek_tail(frames); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_tx_info *info; + + if (ieee80211_is_data_qos(hdr->frame_control)) + return; + + new_skb = mps_qos_null_get(sta); + if (!new_skb) + return; + + mps_dbg(sdata, "appending QoS Null in MPSP towards %pM\n", + sta->sta.addr); + /* + * This frame has to be transmitted last. Assign lowest priority to + * make sure it cannot pass other frames when releasing multiple ACs. + */ + new_skb->priority = 1; + skb_set_queue_mapping(new_skb, IEEE80211_AC_BK); + ieee80211_set_qos_hdr(sdata, new_skb); + + info = IEEE80211_SKB_CB(new_skb); + info->control.vif = &sdata->vif; + info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; + + __skb_queue_tail(frames, new_skb); +} + +/** + * mps_frame_deliver - transmit frames during mesh powersave + * + * @sta: STA info to transmit to + * @n_frames: number of frames to transmit. -1 for all + */ +static void mps_frame_deliver(struct sta_info *sta, int n_frames) +{ + struct ieee80211_local *local = sta->sdata->local; + int ac; + struct sk_buff_head frames; + struct sk_buff *skb; + bool more_data = false; + + skb_queue_head_init(&frames); + + /* collect frame(s) from buffers */ + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + while (n_frames != 0) { + skb = skb_dequeue(&sta->tx_filtered[ac]); + if (!skb) { + skb = skb_dequeue( + &sta->ps_tx_buf[ac]); + if (skb) + local->total_ps_buffered--; + } + if (!skb) + break; + n_frames--; + __skb_queue_tail(&frames, skb); + } + + if (!skb_queue_empty(&sta->tx_filtered[ac]) || + !skb_queue_empty(&sta->ps_tx_buf[ac])) + more_data = true; + } + + /* nothing to send? -> EOSP */ + if (skb_queue_empty(&frames)) { + mpsp_trigger_send(sta, false, true); + return; + } + + /* in a MPSP make sure the last skb is a QoS Data frame */ + if (test_sta_flag(sta, WLAN_STA_MPSP_OWNER)) + mpsp_qos_null_append(sta, &frames); + + mps_dbg(sta->sdata, "sending %d frames to PS STA %pM\n", + skb_queue_len(&frames), sta->sta.addr); + + /* prepare collected frames for transmission */ + skb_queue_walk(&frames, skb) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *) skb->data; + + /* + * Tell TX path to send this frame even though the + * STA may still remain is PS mode after this frame + * exchange. + */ + info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; + + if (more_data || !skb_queue_is_last(&frames, skb)) + hdr->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + else + hdr->frame_control &= + cpu_to_le16(~IEEE80211_FCTL_MOREDATA); + + if (skb_queue_is_last(&frames, skb) && + ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qoshdr = ieee80211_get_qos_ctl(hdr); + + /* MPSP trigger frame ends service period */ + *qoshdr |= IEEE80211_QOS_CTL_EOSP; + info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + } + } + + ieee80211_add_pending_skbs(local, &frames); + sta_info_recalc_tim(sta); +} + +/** + * ieee80211_mpsp_trigger_process - track status of mesh Peer Service Periods + * + * @qc: QoS Control field + * @sta: peer to start a MPSP with + * @tx: frame was transmitted by the local STA + * @acked: frame has been transmitted successfully + * + * NOTE: active mode STA may only serve as MPSP owner + */ +void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta, + bool tx, bool acked) +{ + u8 rspi = qc[1] & (IEEE80211_QOS_CTL_RSPI >> 8); + u8 eosp = qc[0] & IEEE80211_QOS_CTL_EOSP; + + if (tx) { + if (rspi && acked) + set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT); + + if (eosp) + clear_sta_flag(sta, WLAN_STA_MPSP_OWNER); + else if (acked && + test_sta_flag(sta, WLAN_STA_PS_STA) && + !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER)) + mps_frame_deliver(sta, -1); + } else { + if (eosp) + clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT); + else if (sta->mesh->local_pm != NL80211_MESH_POWER_ACTIVE) + set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT); + + if (rspi && !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER)) + mps_frame_deliver(sta, -1); + } +} + +/** + * ieee80211_mps_frame_release - release frames buffered due to mesh power save + * + * @sta: mesh STA + * @elems: IEs of beacon or probe response + * + * For peers if we have individually-addressed frames buffered or the peer + * indicates buffered frames, send a corresponding MPSP trigger frame. Since + * we do not evaluate the awake window duration, QoS Nulls are used as MPSP + * trigger frames. If the neighbour STA is not a peer, only send single frames. + */ +void ieee80211_mps_frame_release(struct sta_info *sta, + struct ieee802_11_elems *elems) +{ + int ac, buffer_local = 0; + bool has_buffered = false; + + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) + has_buffered = ieee80211_check_tim(elems->tim, elems->tim_len, + sta->mesh->aid); + + if (has_buffered) + mps_dbg(sta->sdata, "%pM indicates buffered frames\n", + sta->sta.addr); + + /* only transmit to PS STA with announced, non-zero awake window */ + if (test_sta_flag(sta, WLAN_STA_PS_STA) && + (!elems->awake_window || !get_unaligned_le16(elems->awake_window))) + return; + + if (!test_sta_flag(sta, WLAN_STA_MPSP_OWNER)) + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + buffer_local += skb_queue_len(&sta->ps_tx_buf[ac]) + + skb_queue_len(&sta->tx_filtered[ac]); + + if (!has_buffered && !buffer_local) + return; + + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) + mpsp_trigger_send(sta, has_buffered, !buffer_local); + else + mps_frame_deliver(sta, 1); +} diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c new file mode 100644 index 0000000000..9e342cc250 --- /dev/null +++ b/net/mac80211/mesh_sync.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2011-2012, Pavel Zubarev <pavel.zubarev@gmail.com> + * Copyright 2011-2012, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de> + * Copyright 2011-2012, cozybit Inc. + * Copyright (C) 2021 Intel Corporation + */ + +#include "ieee80211_i.h" +#include "mesh.h" +#include "driver-ops.h" + +/* This is not in the standard. It represents a tolerable tsf drift below + * which we do no TSF adjustment. + */ +#define TOFFSET_MINIMUM_ADJUSTMENT 10 + +/* This is not in the standard. It is a margin added to the + * Toffset setpoint to mitigate TSF overcorrection + * introduced by TSF adjustment latency. + */ +#define TOFFSET_SET_MARGIN 20 + +/* This is not in the standard. It represents the maximum Toffset jump above + * which we'll invalidate the Toffset setpoint and choose a new setpoint. This + * could be, for instance, in case a neighbor is restarted and its TSF counter + * reset. + */ +#define TOFFSET_MAXIMUM_ADJUSTMENT 800 /* 0.8 ms */ + +struct sync_method { + u8 method; + struct ieee80211_mesh_sync_ops ops; +}; + +/** + * mesh_peer_tbtt_adjusting - check if an mp is currently adjusting its TBTT + * + * @cfg: mesh config element from the mesh peer (or %NULL) + */ +static bool mesh_peer_tbtt_adjusting(const struct ieee80211_meshconf_ie *cfg) +{ + return cfg && + (cfg->meshconf_cap & IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING); +} + +void mesh_sync_adjust_tsf(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + /* sdata->vif.bss_conf.beacon_int in 1024us units, 0.04% */ + u64 beacon_int_fraction = sdata->vif.bss_conf.beacon_int * 1024 / 2500; + u64 tsf; + u64 tsfdelta; + + spin_lock_bh(&ifmsh->sync_offset_lock); + if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) { + msync_dbg(sdata, "TSF : max clockdrift=%lld; adjusting\n", + (long long) ifmsh->sync_offset_clockdrift_max); + tsfdelta = -ifmsh->sync_offset_clockdrift_max; + ifmsh->sync_offset_clockdrift_max = 0; + } else { + msync_dbg(sdata, "TSF : max clockdrift=%lld; adjusting by %llu\n", + (long long) ifmsh->sync_offset_clockdrift_max, + (unsigned long long) beacon_int_fraction); + tsfdelta = -beacon_int_fraction; + ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction; + } + spin_unlock_bh(&ifmsh->sync_offset_lock); + + if (local->ops->offset_tsf) { + drv_offset_tsf(local, sdata, tsfdelta); + } else { + tsf = drv_get_tsf(local, sdata); + if (tsf != -1ULL) + drv_set_tsf(local, sdata, tsf + tsfdelta); + } +} + +static void +mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, u16 stype, + struct ieee80211_mgmt *mgmt, unsigned int len, + const struct ieee80211_meshconf_ie *mesh_cfg, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + u64 t_t, t_r; + + WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET); + + /* standard mentions only beacons */ + if (stype != IEEE80211_STYPE_BEACON) + return; + + /* + * Get time when timestamp field was received. If we don't + * have rx timestamps, then use current tsf as an approximation. + * drv_get_tsf() must be called before entering the rcu-read + * section. + */ + if (ieee80211_have_rx_timestamp(rx_status)) + t_r = ieee80211_calculate_rx_timestamp(local, rx_status, + len + FCS_LEN, 24); + else + t_r = drv_get_tsf(local, sdata); + + rcu_read_lock(); + sta = sta_info_get(sdata, mgmt->sa); + if (!sta) + goto no_sync; + + /* check offset sync conditions (13.13.2.2.1) + * + * TODO also sync to + * dot11MeshNbrOffsetMaxNeighbor non-peer non-MBSS neighbors + */ + + if (mesh_peer_tbtt_adjusting(mesh_cfg)) { + msync_dbg(sdata, "STA %pM : is adjusting TBTT\n", + sta->sta.addr); + goto no_sync; + } + + /* Timing offset calculation (see 13.13.2.2.2) */ + t_t = le64_to_cpu(mgmt->u.beacon.timestamp); + sta->mesh->t_offset = t_t - t_r; + + if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { + s64 t_clockdrift = sta->mesh->t_offset_setpoint - sta->mesh->t_offset; + msync_dbg(sdata, + "STA %pM : t_offset=%lld, t_offset_setpoint=%lld, t_clockdrift=%lld\n", + sta->sta.addr, (long long) sta->mesh->t_offset, + (long long) sta->mesh->t_offset_setpoint, + (long long) t_clockdrift); + + if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT || + t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) { + msync_dbg(sdata, + "STA %pM : t_clockdrift=%lld too large, setpoint reset\n", + sta->sta.addr, + (long long) t_clockdrift); + clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); + goto no_sync; + } + + spin_lock_bh(&ifmsh->sync_offset_lock); + if (t_clockdrift > ifmsh->sync_offset_clockdrift_max) + ifmsh->sync_offset_clockdrift_max = t_clockdrift; + spin_unlock_bh(&ifmsh->sync_offset_lock); + } else { + sta->mesh->t_offset_setpoint = sta->mesh->t_offset - TOFFSET_SET_MARGIN; + set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); + msync_dbg(sdata, + "STA %pM : offset was invalid, t_offset=%lld\n", + sta->sta.addr, + (long long) sta->mesh->t_offset); + } + +no_sync: + rcu_read_unlock(); +} + +static void mesh_sync_offset_adjust_tsf(struct ieee80211_sub_if_data *sdata, + struct beacon_data *beacon) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET); + WARN_ON(!rcu_read_lock_held()); + + spin_lock_bh(&ifmsh->sync_offset_lock); + + if (ifmsh->sync_offset_clockdrift_max > TOFFSET_MINIMUM_ADJUSTMENT) { + /* Since ajusting the tsf here would + * require a possibly blocking call + * to the driver tsf setter, we punt + * the tsf adjustment to the mesh tasklet + */ + msync_dbg(sdata, + "TSF : kicking off TSF adjustment with clockdrift_max=%lld\n", + ifmsh->sync_offset_clockdrift_max); + set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags); + } else { + msync_dbg(sdata, + "TSF : max clockdrift=%lld; too small to adjust\n", + (long long)ifmsh->sync_offset_clockdrift_max); + ifmsh->sync_offset_clockdrift_max = 0; + } + spin_unlock_bh(&ifmsh->sync_offset_lock); +} + +static const struct sync_method sync_methods[] = { + { + .method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, + .ops = { + .rx_bcn_presp = &mesh_sync_offset_rx_bcn_presp, + .adjust_tsf = &mesh_sync_offset_adjust_tsf, + } + }, +}; + +const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method) +{ + int i; + + for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) { + if (sync_methods[i].method == method) + return &sync_methods[i].ops; + } + return NULL; +} diff --git a/net/mac80211/michael.c b/net/mac80211/michael.c new file mode 100644 index 0000000000..a57502d9ff --- /dev/null +++ b/net/mac80211/michael.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Michael MIC implementation - optimized for TKIP MIC operations + * Copyright 2002-2003, Instant802 Networks, Inc. + */ +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/ieee80211.h> +#include <asm/unaligned.h> + +#include "michael.h" + +static void michael_block(struct michael_mic_ctx *mctx, u32 val) +{ + mctx->l ^= val; + mctx->r ^= rol32(mctx->l, 17); + mctx->l += mctx->r; + mctx->r ^= ((mctx->l & 0xff00ff00) >> 8) | + ((mctx->l & 0x00ff00ff) << 8); + mctx->l += mctx->r; + mctx->r ^= rol32(mctx->l, 3); + mctx->l += mctx->r; + mctx->r ^= ror32(mctx->l, 2); + mctx->l += mctx->r; +} + +static void michael_mic_hdr(struct michael_mic_ctx *mctx, const u8 *key, + struct ieee80211_hdr *hdr) +{ + u8 *da, *sa, tid; + + da = ieee80211_get_DA(hdr); + sa = ieee80211_get_SA(hdr); + if (ieee80211_is_data_qos(hdr->frame_control)) + tid = ieee80211_get_tid(hdr); + else + tid = 0; + + mctx->l = get_unaligned_le32(key); + mctx->r = get_unaligned_le32(key + 4); + + /* + * A pseudo header (DA, SA, Priority, 0, 0, 0) is used in Michael MIC + * calculation, but it is _not_ transmitted + */ + michael_block(mctx, get_unaligned_le32(da)); + michael_block(mctx, get_unaligned_le16(&da[4]) | + (get_unaligned_le16(sa) << 16)); + michael_block(mctx, get_unaligned_le32(&sa[2])); + michael_block(mctx, tid); +} + +void michael_mic(const u8 *key, struct ieee80211_hdr *hdr, + const u8 *data, size_t data_len, u8 *mic) +{ + u32 val; + size_t block, blocks, left; + struct michael_mic_ctx mctx; + + michael_mic_hdr(&mctx, key, hdr); + + /* Real data */ + blocks = data_len / 4; + left = data_len % 4; + + for (block = 0; block < blocks; block++) + michael_block(&mctx, get_unaligned_le32(&data[block * 4])); + + /* Partial block of 0..3 bytes and padding: 0x5a + 4..7 zeros to make + * total length a multiple of 4. */ + val = 0x5a; + while (left > 0) { + val <<= 8; + left--; + val |= data[blocks * 4 + left]; + } + + michael_block(&mctx, val); + michael_block(&mctx, 0); + + put_unaligned_le32(mctx.l, mic); + put_unaligned_le32(mctx.r, mic + 4); +} diff --git a/net/mac80211/michael.h b/net/mac80211/michael.h new file mode 100644 index 0000000000..a7fdb8e846 --- /dev/null +++ b/net/mac80211/michael.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Michael MIC implementation - optimized for TKIP MIC operations + * Copyright 2002-2003, Instant802 Networks, Inc. + */ + +#ifndef MICHAEL_H +#define MICHAEL_H + +#include <linux/types.h> +#include <linux/ieee80211.h> + +#define MICHAEL_MIC_LEN 8 + +struct michael_mic_ctx { + u32 l, r; +}; + +void michael_mic(const u8 *key, struct ieee80211_hdr *hdr, + const u8 *data, size_t data_len, u8 *mic); + +#endif /* MICHAEL_H */ diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c new file mode 100644 index 0000000000..73f8df03d1 --- /dev/null +++ b/net/mac80211/mlme.c @@ -0,0 +1,7956 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * BSS client mode implementation + * Copyright 2003-2008, Jouni Malinen <j@w1.fi> + * Copyright 2004, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2007, Michael Wu <flamingice@sourmilk.net> + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 - 2023 Intel Corporation + */ + +#include <linux/delay.h> +#include <linux/fips.h> +#include <linux/if_ether.h> +#include <linux/skbuff.h> +#include <linux/if_arp.h> +#include <linux/etherdevice.h> +#include <linux/moduleparam.h> +#include <linux/rtnetlink.h> +#include <linux/crc32.h> +#include <linux/slab.h> +#include <linux/export.h> +#include <net/mac80211.h> +#include <asm/unaligned.h> + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" +#include "led.h" +#include "fils_aead.h" + +#define IEEE80211_AUTH_TIMEOUT (HZ / 5) +#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2) +#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) +#define IEEE80211_AUTH_TIMEOUT_SAE (HZ * 2) +#define IEEE80211_AUTH_MAX_TRIES 3 +#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) +#define IEEE80211_AUTH_WAIT_SAE_RETRY (HZ * 2) +#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) +#define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2) +#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) +#define IEEE80211_ASSOC_MAX_TRIES 3 + +static int max_nullfunc_tries = 2; +module_param(max_nullfunc_tries, int, 0644); +MODULE_PARM_DESC(max_nullfunc_tries, + "Maximum nullfunc tx tries before disconnecting (reason 4)."); + +static int max_probe_tries = 5; +module_param(max_probe_tries, int, 0644); +MODULE_PARM_DESC(max_probe_tries, + "Maximum probe tries before disconnecting (reason 4)."); + +/* + * Beacon loss timeout is calculated as N frames times the + * advertised beacon interval. This may need to be somewhat + * higher than what hardware might detect to account for + * delays in the host processing frames. But since we also + * probe on beacon miss before declaring the connection lost + * default to what we want. + */ +static int beacon_loss_count = 7; +module_param(beacon_loss_count, int, 0644); +MODULE_PARM_DESC(beacon_loss_count, + "Number of beacon intervals before we decide beacon was lost."); + +/* + * Time the connection can be idle before we probe + * it to see if we can still talk to the AP. + */ +#define IEEE80211_CONNECTION_IDLE_TIME (30 * HZ) +/* + * Time we wait for a probe response after sending + * a probe request because of beacon loss or for + * checking the connection still works. + */ +static int probe_wait_ms = 500; +module_param(probe_wait_ms, int, 0644); +MODULE_PARM_DESC(probe_wait_ms, + "Maximum time(ms) to wait for probe response" + " before disconnecting (reason 4)."); + +/* + * How many Beacon frames need to have been used in average signal strength + * before starting to indicate signal change events. + */ +#define IEEE80211_SIGNAL_AVE_MIN_COUNT 4 + +/* + * Extract from the given disabled subchannel bitmap (raw format + * from the EHT Operation Element) the bits for the subchannel + * we're using right now. + */ +static u16 +ieee80211_extract_dis_subch_bmap(const struct ieee80211_eht_operation *eht_oper, + struct cfg80211_chan_def *chandef, u16 bitmap) +{ + struct ieee80211_eht_operation_info *info = (void *)eht_oper->optional; + struct cfg80211_chan_def ap_chandef = *chandef; + u32 ap_center_freq, local_center_freq; + u32 ap_bw, local_bw; + int ap_start_freq, local_start_freq; + u16 shift, mask; + + if (!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) || + !(eht_oper->params & + IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)) + return 0; + + /* set 160/320 supported to get the full AP definition */ + ieee80211_chandef_eht_oper(eht_oper, true, true, &ap_chandef); + ap_center_freq = ap_chandef.center_freq1; + ap_bw = 20 * BIT(u8_get_bits(info->control, + IEEE80211_EHT_OPER_CHAN_WIDTH)); + ap_start_freq = ap_center_freq - ap_bw / 2; + local_center_freq = chandef->center_freq1; + local_bw = 20 * BIT(ieee80211_chan_width_to_rx_bw(chandef->width)); + local_start_freq = local_center_freq - local_bw / 2; + shift = (local_start_freq - ap_start_freq) / 20; + mask = BIT(local_bw / 20) - 1; + + return (bitmap >> shift) & mask; +} + +/* + * Handle the puncturing bitmap, possibly downgrading bandwidth to get a + * valid bitmap. + */ +static void +ieee80211_handle_puncturing_bitmap(struct ieee80211_link_data *link, + const struct ieee80211_eht_operation *eht_oper, + u16 bitmap, u64 *changed) +{ + struct cfg80211_chan_def *chandef = &link->conf->chandef; + u16 extracted; + u64 _changed = 0; + + if (!changed) + changed = &_changed; + + while (chandef->width > NL80211_CHAN_WIDTH_40) { + extracted = + ieee80211_extract_dis_subch_bmap(eht_oper, chandef, + bitmap); + + if (cfg80211_valid_disable_subchannel_bitmap(&bitmap, + chandef)) + break; + link->u.mgd.conn_flags |= + ieee80211_chandef_downgrade(chandef); + *changed |= BSS_CHANGED_BANDWIDTH; + } + + if (chandef->width <= NL80211_CHAN_WIDTH_40) + extracted = 0; + + if (link->conf->eht_puncturing != extracted) { + link->conf->eht_puncturing = extracted; + *changed |= BSS_CHANGED_EHT_PUNCTURING; + } +} + +/* + * We can have multiple work items (and connection probing) + * scheduling this timer, but we need to take care to only + * reschedule it when it should fire _earlier_ than it was + * asked for before, or if it's not pending right now. This + * function ensures that. Note that it then is required to + * run this function for all timeouts after the first one + * has happened -- the work that runs from this timer will + * do that. + */ +static void run_again(struct ieee80211_sub_if_data *sdata, + unsigned long timeout) +{ + sdata_assert_lock(sdata); + + if (!timer_pending(&sdata->u.mgd.timer) || + time_before(timeout, sdata->u.mgd.timer.expires)) + mod_timer(&sdata->u.mgd.timer, timeout); +} + +void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata) +{ + if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER) + return; + + if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) + return; + + mod_timer(&sdata->u.mgd.bcn_mon_timer, + round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout)); +} + +void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + if (unlikely(!ifmgd->associated)) + return; + + if (ifmgd->probe_send_count) + ifmgd->probe_send_count = 0; + + if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) + return; + + mod_timer(&ifmgd->conn_mon_timer, + round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); +} + +static int ecw2cw(int ecw) +{ + return (1 << ecw) - 1; +} + +static ieee80211_conn_flags_t +ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link, + ieee80211_conn_flags_t conn_flags, + struct ieee80211_supported_band *sband, + struct ieee80211_channel *channel, + u32 vht_cap_info, + const struct ieee80211_ht_operation *ht_oper, + const struct ieee80211_vht_operation *vht_oper, + const struct ieee80211_he_operation *he_oper, + const struct ieee80211_eht_operation *eht_oper, + const struct ieee80211_s1g_oper_ie *s1g_oper, + struct cfg80211_chan_def *chandef, bool tracking) +{ + struct cfg80211_chan_def vht_chandef; + struct ieee80211_sta_ht_cap sta_ht_cap; + ieee80211_conn_flags_t ret; + u32 ht_cfreq; + + memset(chandef, 0, sizeof(struct cfg80211_chan_def)); + chandef->chan = channel; + chandef->width = NL80211_CHAN_WIDTH_20_NOHT; + chandef->center_freq1 = channel->center_freq; + chandef->freq1_offset = channel->freq_offset; + + if (channel->band == NL80211_BAND_6GHZ) { + if (!ieee80211_chandef_he_6ghz_oper(sdata, he_oper, eht_oper, + chandef)) { + mlme_dbg(sdata, + "bad 6 GHz operation, disabling HT/VHT/HE/EHT\n"); + ret = IEEE80211_CONN_DISABLE_HT | + IEEE80211_CONN_DISABLE_VHT | + IEEE80211_CONN_DISABLE_HE | + IEEE80211_CONN_DISABLE_EHT; + } else { + ret = 0; + } + vht_chandef = *chandef; + goto out; + } else if (sband->band == NL80211_BAND_S1GHZ) { + if (!ieee80211_chandef_s1g_oper(s1g_oper, chandef)) { + sdata_info(sdata, + "Missing S1G Operation Element? Trying operating == primary\n"); + chandef->width = ieee80211_s1g_channel_width(channel); + } + + ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_40MHZ | + IEEE80211_CONN_DISABLE_VHT | + IEEE80211_CONN_DISABLE_80P80MHZ | + IEEE80211_CONN_DISABLE_160MHZ; + goto out; + } + + memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap)); + ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap); + + if (!ht_oper || !sta_ht_cap.ht_supported) { + mlme_dbg(sdata, "HT operation missing / HT not supported\n"); + ret = IEEE80211_CONN_DISABLE_HT | + IEEE80211_CONN_DISABLE_VHT | + IEEE80211_CONN_DISABLE_HE | + IEEE80211_CONN_DISABLE_EHT; + goto out; + } + + chandef->width = NL80211_CHAN_WIDTH_20; + + ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, + channel->band); + /* check that channel matches the right operating channel */ + if (!tracking && channel->center_freq != ht_cfreq) { + /* + * It's possible that some APs are confused here; + * Netgear WNDR3700 sometimes reports 4 higher than + * the actual channel in association responses, but + * since we look at probe response/beacon data here + * it should be OK. + */ + sdata_info(sdata, + "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", + channel->center_freq, ht_cfreq, + ht_oper->primary_chan, channel->band); + ret = IEEE80211_CONN_DISABLE_HT | + IEEE80211_CONN_DISABLE_VHT | + IEEE80211_CONN_DISABLE_HE | + IEEE80211_CONN_DISABLE_EHT; + goto out; + } + + /* check 40 MHz support, if we have it */ + if (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) { + ieee80211_chandef_ht_oper(ht_oper, chandef); + } else { + mlme_dbg(sdata, "40 MHz not supported\n"); + /* 40 MHz (and 80 MHz) must be supported for VHT */ + ret = IEEE80211_CONN_DISABLE_VHT; + /* also mark 40 MHz disabled */ + ret |= IEEE80211_CONN_DISABLE_40MHZ; + goto out; + } + + if (!vht_oper || !sband->vht_cap.vht_supported) { + mlme_dbg(sdata, "VHT operation missing / VHT not supported\n"); + ret = IEEE80211_CONN_DISABLE_VHT; + goto out; + } + + vht_chandef = *chandef; + if (!(conn_flags & IEEE80211_CONN_DISABLE_HE) && + he_oper && + (le32_to_cpu(he_oper->he_oper_params) & + IEEE80211_HE_OPERATION_VHT_OPER_INFO)) { + struct ieee80211_vht_operation he_oper_vht_cap; + + /* + * Set only first 3 bytes (other 2 aren't used in + * ieee80211_chandef_vht_oper() anyway) + */ + memcpy(&he_oper_vht_cap, he_oper->optional, 3); + he_oper_vht_cap.basic_mcs_set = cpu_to_le16(0); + + if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info, + &he_oper_vht_cap, ht_oper, + &vht_chandef)) { + if (!(conn_flags & IEEE80211_CONN_DISABLE_HE)) + sdata_info(sdata, + "HE AP VHT information is invalid, disabling HE\n"); + ret = IEEE80211_CONN_DISABLE_HE | IEEE80211_CONN_DISABLE_EHT; + goto out; + } + } else if (!ieee80211_chandef_vht_oper(&sdata->local->hw, + vht_cap_info, + vht_oper, ht_oper, + &vht_chandef)) { + if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT)) + sdata_info(sdata, + "AP VHT information is invalid, disabling VHT\n"); + ret = IEEE80211_CONN_DISABLE_VHT; + goto out; + } + + if (!cfg80211_chandef_valid(&vht_chandef)) { + if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT)) + sdata_info(sdata, + "AP VHT information is invalid, disabling VHT\n"); + ret = IEEE80211_CONN_DISABLE_VHT; + goto out; + } + + if (cfg80211_chandef_identical(chandef, &vht_chandef)) { + ret = 0; + goto out; + } + + if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { + if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT)) + sdata_info(sdata, + "AP VHT information doesn't match HT, disabling VHT\n"); + ret = IEEE80211_CONN_DISABLE_VHT; + goto out; + } + + *chandef = vht_chandef; + + /* + * handle the case that the EHT operation indicates that it holds EHT + * operation information (in case that the channel width differs from + * the channel width reported in HT/VHT/HE). + */ + if (eht_oper && (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT)) { + struct cfg80211_chan_def eht_chandef = *chandef; + + ieee80211_chandef_eht_oper(eht_oper, + eht_chandef.width == + NL80211_CHAN_WIDTH_160, + false, &eht_chandef); + + if (!cfg80211_chandef_valid(&eht_chandef)) { + if (!(conn_flags & IEEE80211_CONN_DISABLE_EHT)) + sdata_info(sdata, + "AP EHT information is invalid, disabling EHT\n"); + ret = IEEE80211_CONN_DISABLE_EHT; + goto out; + } + + if (!cfg80211_chandef_compatible(chandef, &eht_chandef)) { + if (!(conn_flags & IEEE80211_CONN_DISABLE_EHT)) + sdata_info(sdata, + "AP EHT information is incompatible, disabling EHT\n"); + ret = IEEE80211_CONN_DISABLE_EHT; + goto out; + } + + *chandef = eht_chandef; + } + + ret = 0; + +out: + /* + * When tracking the current AP, don't do any further checks if the + * new chandef is identical to the one we're currently using for the + * connection. This keeps us from playing ping-pong with regulatory, + * without it the following can happen (for example): + * - connect to an AP with 80 MHz, world regdom allows 80 MHz + * - AP advertises regdom US + * - CRDA loads regdom US with 80 MHz prohibited (old database) + * - the code below detects an unsupported channel, downgrades, and + * we disconnect from the AP in the caller + * - disconnect causes CRDA to reload world regdomain and the game + * starts anew. + * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881) + * + * It seems possible that there are still scenarios with CSA or real + * bandwidth changes where a this could happen, but those cases are + * less common and wouldn't completely prevent using the AP. + */ + if (tracking && + cfg80211_chandef_identical(chandef, &link->conf->chandef)) + return ret; + + /* don't print the message below for VHT mismatch if VHT is disabled */ + if (ret & IEEE80211_CONN_DISABLE_VHT) + vht_chandef = *chandef; + + /* + * Ignore the DISABLED flag when we're already connected and only + * tracking the APs beacon for bandwidth changes - otherwise we + * might get disconnected here if we connect to an AP, update our + * regulatory information based on the AP's country IE and the + * information we have is wrong/outdated and disables the channel + * that we're actually using for the connection to the AP. + */ + while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, + tracking ? 0 : + IEEE80211_CHAN_DISABLED)) { + if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { + ret = IEEE80211_CONN_DISABLE_HT | + IEEE80211_CONN_DISABLE_VHT | + IEEE80211_CONN_DISABLE_HE | + IEEE80211_CONN_DISABLE_EHT; + break; + } + + ret |= ieee80211_chandef_downgrade(chandef); + } + + if (!he_oper || !cfg80211_chandef_usable(sdata->wdev.wiphy, chandef, + IEEE80211_CHAN_NO_HE)) + ret |= IEEE80211_CONN_DISABLE_HE | IEEE80211_CONN_DISABLE_EHT; + + if (!eht_oper || !cfg80211_chandef_usable(sdata->wdev.wiphy, chandef, + IEEE80211_CHAN_NO_EHT)) + ret |= IEEE80211_CONN_DISABLE_EHT; + + if (chandef->width != vht_chandef.width && !tracking) + sdata_info(sdata, + "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); + + WARN_ON_ONCE(!cfg80211_chandef_valid(chandef)); + return ret; +} + +static int ieee80211_config_bw(struct ieee80211_link_data *link, + const struct ieee80211_ht_cap *ht_cap, + const struct ieee80211_vht_cap *vht_cap, + const struct ieee80211_ht_operation *ht_oper, + const struct ieee80211_vht_operation *vht_oper, + const struct ieee80211_he_operation *he_oper, + const struct ieee80211_eht_operation *eht_oper, + const struct ieee80211_s1g_oper_ie *s1g_oper, + const u8 *bssid, u64 *changed) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_channel *chan = link->conf->chandef.chan; + struct ieee80211_supported_band *sband = + local->hw.wiphy->bands[chan->band]; + struct cfg80211_chan_def chandef; + u16 ht_opmode; + ieee80211_conn_flags_t flags; + u32 vht_cap_info = 0; + int ret; + + /* if HT was/is disabled, don't track any bandwidth changes */ + if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT || !ht_oper) + return 0; + + /* don't check VHT if we associated as non-VHT station */ + if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT) + vht_oper = NULL; + + /* don't check HE if we associated as non-HE station */ + if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE || + !ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) { + he_oper = NULL; + eht_oper = NULL; + } + + /* don't check EHT if we associated as non-EHT station */ + if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT || + !ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif)) + eht_oper = NULL; + + /* + * if bss configuration changed store the new one - + * this may be applicable even if channel is identical + */ + ht_opmode = le16_to_cpu(ht_oper->operation_mode); + if (link->conf->ht_operation_mode != ht_opmode) { + *changed |= BSS_CHANGED_HT; + link->conf->ht_operation_mode = ht_opmode; + } + + if (vht_cap) + vht_cap_info = le32_to_cpu(vht_cap->vht_cap_info); + + /* calculate new channel (type) based on HT/VHT/HE operation IEs */ + flags = ieee80211_determine_chantype(sdata, link, + link->u.mgd.conn_flags, + sband, chan, vht_cap_info, + ht_oper, vht_oper, + he_oper, eht_oper, + s1g_oper, &chandef, true); + + /* + * Downgrade the new channel if we associated with restricted + * capabilities. For example, if we associated as a 20 MHz STA + * to a 40 MHz AP (due to regulatory, capabilities or config + * reasons) then switching to a 40 MHz channel now won't do us + * any good -- we couldn't use it with the AP. + */ + if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ && + chandef.width == NL80211_CHAN_WIDTH_80P80) + flags |= ieee80211_chandef_downgrade(&chandef); + if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_160MHZ && + chandef.width == NL80211_CHAN_WIDTH_160) + flags |= ieee80211_chandef_downgrade(&chandef); + if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_40MHZ && + chandef.width > NL80211_CHAN_WIDTH_20) + flags |= ieee80211_chandef_downgrade(&chandef); + + if (cfg80211_chandef_identical(&chandef, &link->conf->chandef)) + return 0; + + link_info(link, + "AP %pM changed bandwidth, new config is %d.%03d MHz, width %d (%d.%03d/%d MHz)\n", + link->u.mgd.bssid, chandef.chan->center_freq, + chandef.chan->freq_offset, chandef.width, + chandef.center_freq1, chandef.freq1_offset, + chandef.center_freq2); + + if (flags != (link->u.mgd.conn_flags & + (IEEE80211_CONN_DISABLE_HT | + IEEE80211_CONN_DISABLE_VHT | + IEEE80211_CONN_DISABLE_HE | + IEEE80211_CONN_DISABLE_EHT | + IEEE80211_CONN_DISABLE_40MHZ | + IEEE80211_CONN_DISABLE_80P80MHZ | + IEEE80211_CONN_DISABLE_160MHZ | + IEEE80211_CONN_DISABLE_320MHZ)) || + !cfg80211_chandef_valid(&chandef)) { + sdata_info(sdata, + "AP %pM changed caps/bw in a way we can't support (0x%x/0x%x) - disconnect\n", + link->u.mgd.bssid, flags, ifmgd->flags); + return -EINVAL; + } + + ret = ieee80211_link_change_bandwidth(link, &chandef, changed); + + if (ret) { + sdata_info(sdata, + "AP %pM changed bandwidth to incompatible one - disconnect\n", + link->u.mgd.bssid); + return ret; + } + + return 0; +} + +/* frame sending functions */ + +static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u8 ap_ht_param, + struct ieee80211_supported_band *sband, + struct ieee80211_channel *channel, + enum ieee80211_smps_mode smps, + ieee80211_conn_flags_t conn_flags) +{ + u8 *pos; + u32 flags = channel->flags; + u16 cap; + struct ieee80211_sta_ht_cap ht_cap; + + BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap)); + + memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); + ieee80211_apply_htcap_overrides(sdata, &ht_cap); + + /* determine capability flags */ + cap = ht_cap.cap; + + switch (ap_ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + if (flags & IEEE80211_CHAN_NO_HT40PLUS) { + cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + cap &= ~IEEE80211_HT_CAP_SGI_40; + } + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + if (flags & IEEE80211_CHAN_NO_HT40MINUS) { + cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + cap &= ~IEEE80211_HT_CAP_SGI_40; + } + break; + } + + /* + * If 40 MHz was disabled associate as though we weren't + * capable of 40 MHz -- some broken APs will never fall + * back to trying to transmit in 20 MHz. + */ + if (conn_flags & IEEE80211_CONN_DISABLE_40MHZ) { + cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + cap &= ~IEEE80211_HT_CAP_SGI_40; + } + + /* set SM PS mode properly */ + cap &= ~IEEE80211_HT_CAP_SM_PS; + switch (smps) { + case IEEE80211_SMPS_AUTOMATIC: + case IEEE80211_SMPS_NUM_MODES: + WARN_ON(1); + fallthrough; + case IEEE80211_SMPS_OFF: + cap |= WLAN_HT_CAP_SM_PS_DISABLED << + IEEE80211_HT_CAP_SM_PS_SHIFT; + break; + case IEEE80211_SMPS_STATIC: + cap |= WLAN_HT_CAP_SM_PS_STATIC << + IEEE80211_HT_CAP_SM_PS_SHIFT; + break; + case IEEE80211_SMPS_DYNAMIC: + cap |= WLAN_HT_CAP_SM_PS_DYNAMIC << + IEEE80211_HT_CAP_SM_PS_SHIFT; + break; + } + + /* reserve and fill IE */ + pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); + ieee80211_ie_build_ht_cap(pos, &ht_cap, cap); +} + +/* This function determines vht capability flags for the association + * and builds the IE. + * Note - the function returns true to own the MU-MIMO capability + */ +static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, + struct ieee80211_supported_band *sband, + struct ieee80211_vht_cap *ap_vht_cap, + ieee80211_conn_flags_t conn_flags) +{ + struct ieee80211_local *local = sdata->local; + u8 *pos; + u32 cap; + struct ieee80211_sta_vht_cap vht_cap; + u32 mask, ap_bf_sts, our_bf_sts; + bool mu_mimo_owner = false; + + BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap)); + + memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap)); + ieee80211_apply_vhtcap_overrides(sdata, &vht_cap); + + /* determine capability flags */ + cap = vht_cap.cap; + + if (conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ) { + u32 bw = cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; + + cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; + if (bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ || + bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) + cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; + } + + if (conn_flags & IEEE80211_CONN_DISABLE_160MHZ) { + cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160; + cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; + } + + /* + * Some APs apparently get confused if our capabilities are better + * than theirs, so restrict what we advertise in the assoc request. + */ + if (!(ap_vht_cap->vht_cap_info & + cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE))) + cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); + else if (!(ap_vht_cap->vht_cap_info & + cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) + cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + + /* + * If some other vif is using the MU-MIMO capability we cannot associate + * using MU-MIMO - this will lead to contradictions in the group-id + * mechanism. + * Ownership is defined since association request, in order to avoid + * simultaneous associations with MU-MIMO. + */ + if (cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) { + bool disable_mu_mimo = false; + struct ieee80211_sub_if_data *other; + + list_for_each_entry_rcu(other, &local->interfaces, list) { + if (other->vif.bss_conf.mu_mimo_owner) { + disable_mu_mimo = true; + break; + } + } + if (disable_mu_mimo) + cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + else + mu_mimo_owner = true; + } + + mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; + + ap_bf_sts = le32_to_cpu(ap_vht_cap->vht_cap_info) & mask; + our_bf_sts = cap & mask; + + if (ap_bf_sts < our_bf_sts) { + cap &= ~mask; + cap |= ap_bf_sts; + } + + /* reserve and fill IE */ + pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2); + ieee80211_ie_build_vht_cap(pos, &vht_cap, cap); + + return mu_mimo_owner; +} + +/* This function determines HE capability flags for the association + * and builds the IE. + */ +static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, + struct ieee80211_supported_band *sband, + enum ieee80211_smps_mode smps_mode, + ieee80211_conn_flags_t conn_flags) +{ + u8 *pos, *pre_he_pos; + const struct ieee80211_sta_he_cap *he_cap; + u8 he_cap_size; + + he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); + if (WARN_ON(!he_cap)) + return; + + /* get a max size estimate */ + he_cap_size = + 2 + 1 + sizeof(he_cap->he_cap_elem) + + ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) + + ieee80211_he_ppe_size(he_cap->ppe_thres[0], + he_cap->he_cap_elem.phy_cap_info); + pos = skb_put(skb, he_cap_size); + pre_he_pos = pos; + pos = ieee80211_ie_build_he_cap(conn_flags, + pos, he_cap, pos + he_cap_size); + /* trim excess if any */ + skb_trim(skb, skb->len - (pre_he_pos + he_cap_size - pos)); + + ieee80211_ie_build_he_6ghz_cap(sdata, smps_mode, skb); +} + +static void ieee80211_add_eht_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, + struct ieee80211_supported_band *sband) +{ + u8 *pos; + const struct ieee80211_sta_he_cap *he_cap; + const struct ieee80211_sta_eht_cap *eht_cap; + u8 eht_cap_size; + + he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); + eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif); + + /* + * EHT capabilities element is only added if the HE capabilities element + * was added so assume that 'he_cap' is valid and don't check it. + */ + if (WARN_ON(!he_cap || !eht_cap)) + return; + + eht_cap_size = + 2 + 1 + sizeof(eht_cap->eht_cap_elem) + + ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem, + &eht_cap->eht_cap_elem, + false) + + ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0], + eht_cap->eht_cap_elem.phy_cap_info); + pos = skb_put(skb, eht_cap_size); + ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + eht_cap_size, + false); +} + +static void ieee80211_assoc_add_rates(struct sk_buff *skb, + enum nl80211_chan_width width, + struct ieee80211_supported_band *sband, + struct ieee80211_mgd_assoc_data *assoc_data) +{ + unsigned int shift = ieee80211_chanwidth_get_shift(width); + unsigned int rates_len, supp_rates_len; + u32 rates = 0; + int i, count; + u8 *pos; + + if (assoc_data->supp_rates_len) { + /* + * Get all rates supported by the device and the AP as + * some APs don't like getting a superset of their rates + * in the association request (e.g. D-Link DAP 1353 in + * b-only mode)... + */ + rates_len = ieee80211_parse_bitrates(width, sband, + assoc_data->supp_rates, + assoc_data->supp_rates_len, + &rates); + } else { + /* + * In case AP not provide any supported rates information + * before association, we send information element(s) with + * all rates that we support. + */ + rates_len = sband->n_bitrates; + for (i = 0; i < sband->n_bitrates; i++) + rates |= BIT(i); + } + + supp_rates_len = rates_len; + if (supp_rates_len > 8) + supp_rates_len = 8; + + pos = skb_put(skb, supp_rates_len + 2); + *pos++ = WLAN_EID_SUPP_RATES; + *pos++ = supp_rates_len; + + count = 0; + for (i = 0; i < sband->n_bitrates; i++) { + if (BIT(i) & rates) { + int rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, + 5 * (1 << shift)); + *pos++ = (u8)rate; + if (++count == 8) + break; + } + } + + if (rates_len > count) { + pos = skb_put(skb, rates_len - count + 2); + *pos++ = WLAN_EID_EXT_SUPP_RATES; + *pos++ = rates_len - count; + + for (i++; i < sband->n_bitrates; i++) { + if (BIT(i) & rates) { + int rate; + + rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, + 5 * (1 << shift)); + *pos++ = (u8)rate; + } + } + } +} + +static size_t ieee80211_add_before_ht_elems(struct sk_buff *skb, + const u8 *elems, + size_t elems_len, + size_t offset) +{ + size_t noffset; + + static const u8 before_ht[] = { + WLAN_EID_SSID, + WLAN_EID_SUPP_RATES, + WLAN_EID_EXT_SUPP_RATES, + WLAN_EID_PWR_CAPABILITY, + WLAN_EID_SUPPORTED_CHANNELS, + WLAN_EID_RSN, + WLAN_EID_QOS_CAPA, + WLAN_EID_RRM_ENABLED_CAPABILITIES, + WLAN_EID_MOBILITY_DOMAIN, + WLAN_EID_FAST_BSS_TRANSITION, /* reassoc only */ + WLAN_EID_RIC_DATA, /* reassoc only */ + WLAN_EID_SUPPORTED_REGULATORY_CLASSES, + }; + static const u8 after_ric[] = { + WLAN_EID_SUPPORTED_REGULATORY_CLASSES, + WLAN_EID_HT_CAPABILITY, + WLAN_EID_BSS_COEX_2040, + /* luckily this is almost always there */ + WLAN_EID_EXT_CAPABILITY, + WLAN_EID_QOS_TRAFFIC_CAPA, + WLAN_EID_TIM_BCAST_REQ, + WLAN_EID_INTERWORKING, + /* 60 GHz (Multi-band, DMG, MMS) can't happen */ + WLAN_EID_VHT_CAPABILITY, + WLAN_EID_OPMODE_NOTIF, + }; + + if (!elems_len) + return offset; + + noffset = ieee80211_ie_split_ric(elems, elems_len, + before_ht, + ARRAY_SIZE(before_ht), + after_ric, + ARRAY_SIZE(after_ric), + offset); + skb_put_data(skb, elems + offset, noffset - offset); + + return noffset; +} + +static size_t ieee80211_add_before_vht_elems(struct sk_buff *skb, + const u8 *elems, + size_t elems_len, + size_t offset) +{ + static const u8 before_vht[] = { + /* + * no need to list the ones split off before HT + * or generated here + */ + WLAN_EID_BSS_COEX_2040, + WLAN_EID_EXT_CAPABILITY, + WLAN_EID_QOS_TRAFFIC_CAPA, + WLAN_EID_TIM_BCAST_REQ, + WLAN_EID_INTERWORKING, + /* 60 GHz (Multi-band, DMG, MMS) can't happen */ + }; + size_t noffset; + + if (!elems_len) + return offset; + + /* RIC already taken care of in ieee80211_add_before_ht_elems() */ + noffset = ieee80211_ie_split(elems, elems_len, + before_vht, ARRAY_SIZE(before_vht), + offset); + skb_put_data(skb, elems + offset, noffset - offset); + + return noffset; +} + +static size_t ieee80211_add_before_he_elems(struct sk_buff *skb, + const u8 *elems, + size_t elems_len, + size_t offset) +{ + static const u8 before_he[] = { + /* + * no need to list the ones split off before VHT + * or generated here + */ + WLAN_EID_OPMODE_NOTIF, + WLAN_EID_EXTENSION, WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE, + /* 11ai elements */ + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_SESSION, + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_PUBLIC_KEY, + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_KEY_CONFIRM, + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_HLP_CONTAINER, + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN, + /* TODO: add 11ah/11aj/11ak elements */ + }; + size_t noffset; + + if (!elems_len) + return offset; + + /* RIC already taken care of in ieee80211_add_before_ht_elems() */ + noffset = ieee80211_ie_split(elems, elems_len, + before_he, ARRAY_SIZE(before_he), + offset); + skb_put_data(skb, elems + offset, noffset - offset); + + return noffset; +} + +#define PRESENT_ELEMS_MAX 8 +#define PRESENT_ELEM_EXT_OFFS 0x100 + +static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u16 capab, + const struct element *ext_capa, + const u16 *present_elems); + +static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u16 *capab, + const struct element *ext_capa, + const u8 *extra_elems, + size_t extra_elems_len, + unsigned int link_id, + struct ieee80211_link_data *link, + u16 *present_elems) +{ + enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; + struct cfg80211_bss *cbss = assoc_data->link[link_id].bss; + struct ieee80211_channel *chan = cbss->channel; + const struct ieee80211_sband_iftype_data *iftd; + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20; + struct ieee80211_chanctx_conf *chanctx_conf; + enum ieee80211_smps_mode smps_mode; + u16 orig_capab = *capab; + size_t offset = 0; + int present_elems_len = 0; + u8 *pos; + int i; + +#define ADD_PRESENT_ELEM(id) do { \ + /* need a last for termination - we use 0 == SSID */ \ + if (!WARN_ON(present_elems_len >= PRESENT_ELEMS_MAX - 1)) \ + present_elems[present_elems_len++] = (id); \ +} while (0) +#define ADD_PRESENT_EXT_ELEM(id) ADD_PRESENT_ELEM(PRESENT_ELEM_EXT_OFFS | (id)) + + if (link) + smps_mode = link->smps_mode; + else if (sdata->u.mgd.powersave) + smps_mode = IEEE80211_SMPS_DYNAMIC; + else + smps_mode = IEEE80211_SMPS_OFF; + + if (link) { + /* + * 5/10 MHz scenarios are only viable without MLO, in which + * case this pointer should be used ... All of this is a bit + * unclear though, not sure this even works at all. + */ + rcu_read_lock(); + chanctx_conf = rcu_dereference(link->conf->chanctx_conf); + if (chanctx_conf) + width = chanctx_conf->def.width; + rcu_read_unlock(); + } + + sband = local->hw.wiphy->bands[chan->band]; + iftd = ieee80211_get_sband_iftype_data(sband, iftype); + + if (sband->band == NL80211_BAND_2GHZ) { + *capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME; + *capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; + } + + if ((cbss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT) && + ieee80211_hw_check(&local->hw, SPECTRUM_MGMT)) + *capab |= WLAN_CAPABILITY_SPECTRUM_MGMT; + + if (sband->band != NL80211_BAND_S1GHZ) + ieee80211_assoc_add_rates(skb, width, sband, assoc_data); + + if (*capab & WLAN_CAPABILITY_SPECTRUM_MGMT || + *capab & WLAN_CAPABILITY_RADIO_MEASURE) { + struct cfg80211_chan_def chandef = { + .width = width, + .chan = chan, + }; + + pos = skb_put(skb, 4); + *pos++ = WLAN_EID_PWR_CAPABILITY; + *pos++ = 2; + *pos++ = 0; /* min tx power */ + /* max tx power */ + *pos++ = ieee80211_chandef_max_power(&chandef); + ADD_PRESENT_ELEM(WLAN_EID_PWR_CAPABILITY); + } + + /* + * Per spec, we shouldn't include the list of channels if we advertise + * support for extended channel switching, but we've always done that; + * (for now?) apply this restriction only on the (new) 6 GHz band. + */ + if (*capab & WLAN_CAPABILITY_SPECTRUM_MGMT && + (sband->band != NL80211_BAND_6GHZ || + !ext_capa || ext_capa->datalen < 1 || + !(ext_capa->data[0] & WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING))) { + /* TODO: get this in reg domain format */ + pos = skb_put(skb, 2 * sband->n_channels + 2); + *pos++ = WLAN_EID_SUPPORTED_CHANNELS; + *pos++ = 2 * sband->n_channels; + for (i = 0; i < sband->n_channels; i++) { + int cf = sband->channels[i].center_freq; + + *pos++ = ieee80211_frequency_to_channel(cf); + *pos++ = 1; /* one channel in the subband*/ + } + ADD_PRESENT_ELEM(WLAN_EID_SUPPORTED_CHANNELS); + } + + /* if present, add any custom IEs that go before HT */ + offset = ieee80211_add_before_ht_elems(skb, extra_elems, + extra_elems_len, + offset); + + if (sband->band != NL80211_BAND_6GHZ && + !(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HT)) { + ieee80211_add_ht_ie(sdata, skb, + assoc_data->link[link_id].ap_ht_param, + sband, chan, smps_mode, + assoc_data->link[link_id].conn_flags); + ADD_PRESENT_ELEM(WLAN_EID_HT_CAPABILITY); + } + + /* if present, add any custom IEs that go before VHT */ + offset = ieee80211_add_before_vht_elems(skb, extra_elems, + extra_elems_len, + offset); + + if (sband->band != NL80211_BAND_6GHZ && + !(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_VHT)) { + bool mu_mimo_owner = + ieee80211_add_vht_ie(sdata, skb, sband, + &assoc_data->link[link_id].ap_vht_cap, + assoc_data->link[link_id].conn_flags); + + if (link) + link->conf->mu_mimo_owner = mu_mimo_owner; + ADD_PRESENT_ELEM(WLAN_EID_VHT_CAPABILITY); + } + + /* + * If AP doesn't support HT, mark HE and EHT as disabled. + * If on the 5GHz band, make sure it supports VHT. + */ + if (assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HT || + (sband->band == NL80211_BAND_5GHZ && + assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_VHT)) + assoc_data->link[link_id].conn_flags |= + IEEE80211_CONN_DISABLE_HE | + IEEE80211_CONN_DISABLE_EHT; + + /* if present, add any custom IEs that go before HE */ + offset = ieee80211_add_before_he_elems(skb, extra_elems, + extra_elems_len, + offset); + + if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HE)) { + ieee80211_add_he_ie(sdata, skb, sband, smps_mode, + assoc_data->link[link_id].conn_flags); + ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_HE_CAPABILITY); + } + + /* + * careful - need to know about all the present elems before + * calling ieee80211_assoc_add_ml_elem(), so add this one if + * we're going to put it after the ML element + */ + if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_EHT)) + ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_EHT_CAPABILITY); + + if (link_id == assoc_data->assoc_link_id) + ieee80211_assoc_add_ml_elem(sdata, skb, orig_capab, ext_capa, + present_elems); + + /* crash if somebody gets it wrong */ + present_elems = NULL; + + if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_EHT)) + ieee80211_add_eht_ie(sdata, skb, sband); + + if (sband->band == NL80211_BAND_S1GHZ) { + ieee80211_add_aid_request_ie(sdata, skb); + ieee80211_add_s1g_capab_ie(sdata, &sband->s1g_cap, skb); + } + + if (iftd && iftd->vendor_elems.data && iftd->vendor_elems.len) + skb_put_data(skb, iftd->vendor_elems.data, iftd->vendor_elems.len); + + if (link) + link->u.mgd.conn_flags = assoc_data->link[link_id].conn_flags; + + return offset; +} + +static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb, + const u16 *outer, + const u16 *inner) +{ + unsigned int skb_len = skb->len; + bool at_extension = false; + bool added = false; + int i, j; + u8 *len, *list_len = NULL; + + skb_put_u8(skb, WLAN_EID_EXTENSION); + len = skb_put(skb, 1); + skb_put_u8(skb, WLAN_EID_EXT_NON_INHERITANCE); + + for (i = 0; i < PRESENT_ELEMS_MAX && outer[i]; i++) { + u16 elem = outer[i]; + bool have_inner = false; + + /* should at least be sorted in the sense of normal -> ext */ + WARN_ON(at_extension && elem < PRESENT_ELEM_EXT_OFFS); + + /* switch to extension list */ + if (!at_extension && elem >= PRESENT_ELEM_EXT_OFFS) { + at_extension = true; + if (!list_len) + skb_put_u8(skb, 0); + list_len = NULL; + } + + for (j = 0; j < PRESENT_ELEMS_MAX && inner[j]; j++) { + if (elem == inner[j]) { + have_inner = true; + break; + } + } + + if (have_inner) + continue; + + if (!list_len) { + list_len = skb_put(skb, 1); + *list_len = 0; + } + *list_len += 1; + skb_put_u8(skb, (u8)elem); + added = true; + } + + /* if we added a list but no extension list, make a zero-len one */ + if (added && (!at_extension || !list_len)) + skb_put_u8(skb, 0); + + /* if nothing added remove extension element completely */ + if (!added) + skb_trim(skb, skb_len); + else + *len = skb->len - skb_len - 2; +} + +static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u16 capab, + const struct element *ext_capa, + const u16 *outer_present_elems) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; + struct ieee80211_multi_link_elem *ml_elem; + struct ieee80211_mle_basic_common_info *common; + const struct wiphy_iftype_ext_capab *ift_ext_capa; + __le16 eml_capa = 0, mld_capa_ops = 0; + unsigned int link_id; + u8 *ml_elem_len; + void *capab_pos; + + if (!ieee80211_vif_is_mld(&sdata->vif)) + return; + + ift_ext_capa = cfg80211_get_iftype_ext_capa(local->hw.wiphy, + ieee80211_vif_type_p2p(&sdata->vif)); + if (ift_ext_capa) { + eml_capa = cpu_to_le16(ift_ext_capa->eml_capabilities); + mld_capa_ops = cpu_to_le16(ift_ext_capa->mld_capa_and_ops); + } + + skb_put_u8(skb, WLAN_EID_EXTENSION); + ml_elem_len = skb_put(skb, 1); + skb_put_u8(skb, WLAN_EID_EXT_EHT_MULTI_LINK); + ml_elem = skb_put(skb, sizeof(*ml_elem)); + ml_elem->control = + cpu_to_le16(IEEE80211_ML_CONTROL_TYPE_BASIC | + IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP); + common = skb_put(skb, sizeof(*common)); + common->len = sizeof(*common) + + 2; /* MLD capa/ops */ + memcpy(common->mld_mac_addr, sdata->vif.addr, ETH_ALEN); + + /* add EML_CAPA only if needed, see Draft P802.11be_D2.1, 35.3.17 */ + if (eml_capa & + cpu_to_le16((IEEE80211_EML_CAP_EMLSR_SUPP | + IEEE80211_EML_CAP_EMLMR_SUPPORT))) { + common->len += 2; /* EML capabilities */ + ml_elem->control |= + cpu_to_le16(IEEE80211_MLC_BASIC_PRES_EML_CAPA); + skb_put_data(skb, &eml_capa, sizeof(eml_capa)); + } + /* need indication from userspace to support this */ + mld_capa_ops &= ~cpu_to_le16(IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP); + skb_put_data(skb, &mld_capa_ops, sizeof(mld_capa_ops)); + + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { + u16 link_present_elems[PRESENT_ELEMS_MAX] = {}; + const u8 *extra_elems; + size_t extra_elems_len; + size_t extra_used; + u8 *subelem_len = NULL; + __le16 ctrl; + + if (!assoc_data->link[link_id].bss || + link_id == assoc_data->assoc_link_id) + continue; + + extra_elems = assoc_data->link[link_id].elems; + extra_elems_len = assoc_data->link[link_id].elems_len; + + skb_put_u8(skb, IEEE80211_MLE_SUBELEM_PER_STA_PROFILE); + subelem_len = skb_put(skb, 1); + + ctrl = cpu_to_le16(link_id | + IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE | + IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT); + skb_put_data(skb, &ctrl, sizeof(ctrl)); + skb_put_u8(skb, 1 + ETH_ALEN); /* STA Info Length */ + skb_put_data(skb, assoc_data->link[link_id].addr, + ETH_ALEN); + /* + * Now add the contents of the (re)association request, + * but the "listen interval" and "current AP address" + * (if applicable) are skipped. So we only have + * the capability field (remember the position and fill + * later), followed by the elements added below by + * calling ieee80211_assoc_link_elems(). + */ + capab_pos = skb_put(skb, 2); + + extra_used = ieee80211_assoc_link_elems(sdata, skb, &capab, + ext_capa, + extra_elems, + extra_elems_len, + link_id, NULL, + link_present_elems); + if (extra_elems) + skb_put_data(skb, extra_elems + extra_used, + extra_elems_len - extra_used); + + put_unaligned_le16(capab, capab_pos); + + ieee80211_add_non_inheritance_elem(skb, outer_present_elems, + link_present_elems); + + ieee80211_fragment_element(skb, subelem_len, + IEEE80211_MLE_SUBELEM_FRAGMENT); + } + + ieee80211_fragment_element(skb, ml_elem_len, WLAN_EID_FRAGMENT); +} + +static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; + struct ieee80211_link_data *link; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + u8 *pos, qos_info, *ie_start; + size_t offset, noffset; + u16 capab = WLAN_CAPABILITY_ESS, link_capab; + __le16 listen_int; + struct element *ext_capa = NULL; + enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif); + struct ieee80211_prep_tx_info info = {}; + unsigned int link_id, n_links = 0; + u16 present_elems[PRESENT_ELEMS_MAX] = {}; + void *capab_pos; + size_t size; + int ret; + + /* we know it's writable, cast away the const */ + if (assoc_data->ie_len) + ext_capa = (void *)cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, + assoc_data->ie, + assoc_data->ie_len); + + sdata_assert_lock(sdata); + + size = local->hw.extra_tx_headroom + + sizeof(*mgmt) + /* bit too much but doesn't matter */ + 2 + assoc_data->ssid_len + /* SSID */ + assoc_data->ie_len + /* extra IEs */ + (assoc_data->fils_kek_len ? 16 /* AES-SIV */ : 0) + + 9; /* WMM */ + + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { + struct cfg80211_bss *cbss = assoc_data->link[link_id].bss; + const struct ieee80211_sband_iftype_data *iftd; + struct ieee80211_supported_band *sband; + + if (!cbss) + continue; + + sband = local->hw.wiphy->bands[cbss->channel->band]; + + n_links++; + /* add STA profile elements length */ + size += assoc_data->link[link_id].elems_len; + /* and supported rates length */ + size += 4 + sband->n_bitrates; + /* supported channels */ + size += 2 + 2 * sband->n_channels; + + iftd = ieee80211_get_sband_iftype_data(sband, iftype); + if (iftd) + size += iftd->vendor_elems.len; + + /* power capability */ + size += 4; + + /* HT, VHT, HE, EHT */ + size += 2 + sizeof(struct ieee80211_ht_cap); + size += 2 + sizeof(struct ieee80211_vht_cap); + size += 2 + 1 + sizeof(struct ieee80211_he_cap_elem) + + sizeof(struct ieee80211_he_mcs_nss_supp) + + IEEE80211_HE_PPE_THRES_MAX_LEN; + + if (sband->band == NL80211_BAND_6GHZ) + size += 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa); + + size += 2 + 1 + sizeof(struct ieee80211_eht_cap_elem) + + sizeof(struct ieee80211_eht_mcs_nss_supp) + + IEEE80211_EHT_PPE_THRES_MAX_LEN; + + /* non-inheritance element */ + size += 2 + 2 + PRESENT_ELEMS_MAX; + + /* should be the same across all BSSes */ + if (cbss->capability & WLAN_CAPABILITY_PRIVACY) + capab |= WLAN_CAPABILITY_PRIVACY; + } + + if (ieee80211_vif_is_mld(&sdata->vif)) { + /* consider the multi-link element with STA profile */ + size += sizeof(struct ieee80211_multi_link_elem); + /* max common info field in basic multi-link element */ + size += sizeof(struct ieee80211_mle_basic_common_info) + + 2 + /* capa & op */ + 2; /* EML capa */ + + /* + * The capability elements were already considered above; + * note this over-estimates a bit because there's no + * STA profile for the assoc link. + */ + size += (n_links - 1) * + (1 + 1 + /* subelement ID/length */ + 2 + /* STA control */ + 1 + ETH_ALEN + 2 /* STA Info field */); + } + + link = sdata_dereference(sdata->link[assoc_data->assoc_link_id], sdata); + if (WARN_ON(!link)) + return -EINVAL; + + if (WARN_ON(!assoc_data->link[assoc_data->assoc_link_id].bss)) + return -EINVAL; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, local->hw.extra_tx_headroom); + + if (ifmgd->flags & IEEE80211_STA_ENABLE_RRM) + capab |= WLAN_CAPABILITY_RADIO_MEASURE; + + /* Set MBSSID support for HE AP if needed */ + if (ieee80211_hw_check(&local->hw, SUPPORTS_ONLY_HE_MULTI_BSSID) && + !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) && + ext_capa && ext_capa->datalen >= 3) + ext_capa->data[2] |= WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT; + + mgmt = skb_put_zero(skb, 24); + memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); + + listen_int = cpu_to_le16(assoc_data->s1g ? + ieee80211_encode_usf(local->hw.conf.listen_interval) : + local->hw.conf.listen_interval); + if (!is_zero_ether_addr(assoc_data->prev_ap_addr)) { + skb_put(skb, 10); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_REASSOC_REQ); + capab_pos = &mgmt->u.reassoc_req.capab_info; + mgmt->u.reassoc_req.listen_interval = listen_int; + memcpy(mgmt->u.reassoc_req.current_ap, + assoc_data->prev_ap_addr, ETH_ALEN); + info.subtype = IEEE80211_STYPE_REASSOC_REQ; + } else { + skb_put(skb, 4); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ASSOC_REQ); + capab_pos = &mgmt->u.assoc_req.capab_info; + mgmt->u.assoc_req.listen_interval = listen_int; + info.subtype = IEEE80211_STYPE_ASSOC_REQ; + } + + /* SSID */ + pos = skb_put(skb, 2 + assoc_data->ssid_len); + ie_start = pos; + *pos++ = WLAN_EID_SSID; + *pos++ = assoc_data->ssid_len; + memcpy(pos, assoc_data->ssid, assoc_data->ssid_len); + + /* add the elements for the assoc (main) link */ + link_capab = capab; + offset = ieee80211_assoc_link_elems(sdata, skb, &link_capab, + ext_capa, + assoc_data->ie, + assoc_data->ie_len, + assoc_data->assoc_link_id, link, + present_elems); + put_unaligned_le16(link_capab, capab_pos); + + /* if present, add any custom non-vendor IEs */ + if (assoc_data->ie_len) { + noffset = ieee80211_ie_split_vendor(assoc_data->ie, + assoc_data->ie_len, + offset); + skb_put_data(skb, assoc_data->ie + offset, noffset - offset); + offset = noffset; + } + + if (assoc_data->wmm) { + if (assoc_data->uapsd) { + qos_info = ifmgd->uapsd_queues; + qos_info |= (ifmgd->uapsd_max_sp_len << + IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT); + } else { + qos_info = 0; + } + + pos = ieee80211_add_wmm_info_ie(skb_put(skb, 9), qos_info); + } + + /* add any remaining custom (i.e. vendor specific here) IEs */ + if (assoc_data->ie_len) { + noffset = assoc_data->ie_len; + skb_put_data(skb, assoc_data->ie + offset, noffset - offset); + } + + if (assoc_data->fils_kek_len) { + ret = fils_encrypt_assoc_req(skb, assoc_data); + if (ret < 0) { + dev_kfree_skb(skb); + return ret; + } + } + + pos = skb_tail_pointer(skb); + kfree(ifmgd->assoc_req_ies); + ifmgd->assoc_req_ies = kmemdup(ie_start, pos - ie_start, GFP_ATOMIC); + if (!ifmgd->assoc_req_ies) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + ifmgd->assoc_req_ies_len = pos - ie_start; + + drv_mgd_prepare_tx(local, sdata, &info); + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS | + IEEE80211_TX_INTFL_MLME_CONN_TX; + ieee80211_tx_skb(sdata, skb); + + return 0; +} + +void ieee80211_send_pspoll(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_pspoll *pspoll; + struct sk_buff *skb; + + skb = ieee80211_pspoll_get(&local->hw, &sdata->vif); + if (!skb) + return; + + pspoll = (struct ieee80211_pspoll *) skb->data; + pspoll->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + ieee80211_tx_skb(sdata, skb); +} + +void ieee80211_send_nullfunc(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + bool powersave) +{ + struct sk_buff *skb; + struct ieee80211_hdr_3addr *nullfunc; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, -1, + !ieee80211_hw_check(&local->hw, + DOESNT_SUPPORT_QOS_NDP)); + if (!skb) + return; + + nullfunc = (struct ieee80211_hdr_3addr *) skb->data; + if (powersave) + nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | + IEEE80211_TX_INTFL_OFFCHAN_TX_OK; + + if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + + if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE; + + ieee80211_tx_skb(sdata, skb); +} + +void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + struct sk_buff *skb; + struct ieee80211_hdr *nullfunc; + __le16 fc; + + if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) + return; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30); + if (!skb) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom); + + nullfunc = skb_put_zero(skb, 30); + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | + IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); + nullfunc->frame_control = fc; + memcpy(nullfunc->addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN); + memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); + memcpy(nullfunc->addr3, sdata->deflink.u.mgd.bssid, ETH_ALEN); + memcpy(nullfunc->addr4, sdata->vif.addr, ETH_ALEN); + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE; + ieee80211_tx_skb(sdata, skb); +} + +/* spectrum management related things */ +static void ieee80211_chswitch_work(struct wiphy *wiphy, + struct wiphy_work *work) +{ + struct ieee80211_link_data *link = + container_of(work, struct ieee80211_link_data, + u.mgd.chswitch_work.work); + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + int ret; + + if (!ieee80211_sdata_running(sdata)) + return; + + sdata_lock(sdata); + mutex_lock(&local->mtx); + mutex_lock(&local->chanctx_mtx); + + if (!ifmgd->associated) + goto out; + + if (!link->conf->csa_active) + goto out; + + /* + * using reservation isn't immediate as it may be deferred until later + * with multi-vif. once reservation is complete it will re-schedule the + * work with no reserved_chanctx so verify chandef to check if it + * completed successfully + */ + + if (link->reserved_chanctx) { + /* + * with multi-vif csa driver may call ieee80211_csa_finish() + * many times while waiting for other interfaces to use their + * reservations + */ + if (link->reserved_ready) + goto out; + + ret = ieee80211_link_use_reserved_context(link); + if (ret) { + sdata_info(sdata, + "failed to use reserved channel context, disconnecting (err=%d)\n", + ret); + wiphy_work_queue(sdata->local->hw.wiphy, + &ifmgd->csa_connection_drop_work); + goto out; + } + + goto out; + } + + if (!cfg80211_chandef_identical(&link->conf->chandef, + &link->csa_chandef)) { + sdata_info(sdata, + "failed to finalize channel switch, disconnecting\n"); + wiphy_work_queue(sdata->local->hw.wiphy, + &ifmgd->csa_connection_drop_work); + goto out; + } + + link->u.mgd.csa_waiting_bcn = true; + + ieee80211_sta_reset_beacon_monitor(sdata); + ieee80211_sta_reset_conn_monitor(sdata); + +out: + mutex_unlock(&local->chanctx_mtx); + mutex_unlock(&local->mtx); + sdata_unlock(sdata); +} + +static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + int ret; + + sdata_assert_lock(sdata); + + WARN_ON(!link->conf->csa_active); + + if (link->csa_block_tx) { + ieee80211_wake_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_CSA); + link->csa_block_tx = false; + } + + link->conf->csa_active = false; + link->u.mgd.csa_waiting_bcn = false; + /* + * If the CSA IE is still present on the beacon after the switch, + * we need to consider it as a new CSA (possibly to self). + */ + link->u.mgd.beacon_crc_valid = false; + + ret = drv_post_channel_switch(sdata); + if (ret) { + sdata_info(sdata, + "driver post channel switch failed, disconnecting\n"); + wiphy_work_queue(sdata->local->hw.wiphy, + &ifmgd->csa_connection_drop_work); + return; + } + + cfg80211_ch_switch_notify(sdata->dev, &link->reserved_chandef, 0, 0); +} + +void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif))) + success = false; + + trace_api_chswitch_done(sdata, success); + if (!success) { + sdata_info(sdata, + "driver channel switch failed, disconnecting\n"); + wiphy_work_queue(sdata->local->hw.wiphy, + &ifmgd->csa_connection_drop_work); + } else { + wiphy_delayed_work_queue(sdata->local->hw.wiphy, + &sdata->deflink.u.mgd.chswitch_work, + 0); + } +} +EXPORT_SYMBOL(ieee80211_chswitch_done); + +static void +ieee80211_sta_abort_chanswitch(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + + if (!local->ops->abort_channel_switch) + return; + + mutex_lock(&local->mtx); + + mutex_lock(&local->chanctx_mtx); + ieee80211_link_unreserve_chanctx(link); + mutex_unlock(&local->chanctx_mtx); + + if (link->csa_block_tx) + ieee80211_wake_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_CSA); + + link->csa_block_tx = false; + link->conf->csa_active = false; + + mutex_unlock(&local->mtx); + + drv_abort_channel_switch(sdata); +} + +static void +ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link, + u64 timestamp, u32 device_timestamp, + struct ieee802_11_elems *elems, + bool beacon) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct cfg80211_bss *cbss = link->u.mgd.bss; + struct ieee80211_chanctx_conf *conf; + struct ieee80211_chanctx *chanctx; + enum nl80211_band current_band; + struct ieee80211_csa_ie csa_ie; + struct ieee80211_channel_switch ch_switch; + struct ieee80211_bss *bss; + unsigned long timeout; + int res; + + sdata_assert_lock(sdata); + + if (!cbss) + return; + + current_band = cbss->channel->band; + bss = (void *)cbss->priv; + res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band, + bss->vht_cap_info, + link->u.mgd.conn_flags, + link->u.mgd.bssid, &csa_ie); + + if (!res) { + ch_switch.timestamp = timestamp; + ch_switch.device_timestamp = device_timestamp; + ch_switch.block_tx = csa_ie.mode; + ch_switch.chandef = csa_ie.chandef; + ch_switch.count = csa_ie.count; + ch_switch.delay = csa_ie.max_switch_time; + } + + if (res < 0) + goto lock_and_drop_connection; + + if (beacon && link->conf->csa_active && + !link->u.mgd.csa_waiting_bcn) { + if (res) + ieee80211_sta_abort_chanswitch(link); + else + drv_channel_switch_rx_beacon(sdata, &ch_switch); + return; + } else if (link->conf->csa_active || res) { + /* disregard subsequent announcements if already processing */ + return; + } + + if (link->conf->chandef.chan->band != + csa_ie.chandef.chan->band) { + sdata_info(sdata, + "AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", + link->u.mgd.bssid, + csa_ie.chandef.chan->center_freq, + csa_ie.chandef.width, csa_ie.chandef.center_freq1, + csa_ie.chandef.center_freq2); + goto lock_and_drop_connection; + } + + if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chandef, + IEEE80211_CHAN_DISABLED)) { + sdata_info(sdata, + "AP %pM switches to unsupported channel " + "(%d.%03d MHz, width:%d, CF1/2: %d.%03d/%d MHz), " + "disconnecting\n", + link->u.mgd.bssid, + csa_ie.chandef.chan->center_freq, + csa_ie.chandef.chan->freq_offset, + csa_ie.chandef.width, csa_ie.chandef.center_freq1, + csa_ie.chandef.freq1_offset, + csa_ie.chandef.center_freq2); + goto lock_and_drop_connection; + } + + if (cfg80211_chandef_identical(&csa_ie.chandef, + &link->conf->chandef) && + (!csa_ie.mode || !beacon)) { + if (link->u.mgd.csa_ignored_same_chan) + return; + sdata_info(sdata, + "AP %pM tries to chanswitch to same channel, ignore\n", + link->u.mgd.bssid); + link->u.mgd.csa_ignored_same_chan = true; + return; + } + + /* + * Drop all TDLS peers - either we disconnect or move to a different + * channel from this point on. There's no telling what our peer will do. + * The TDLS WIDER_BW scenario is also problematic, as peers might now + * have an incompatible wider chandef. + */ + ieee80211_teardown_tdls_peers(sdata); + + mutex_lock(&local->mtx); + mutex_lock(&local->chanctx_mtx); + conf = rcu_dereference_protected(link->conf->chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + if (!conf) { + sdata_info(sdata, + "no channel context assigned to vif?, disconnecting\n"); + goto drop_connection; + } + + chanctx = container_of(conf, struct ieee80211_chanctx, conf); + + if (local->use_chanctx && + !ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) { + sdata_info(sdata, + "driver doesn't support chan-switch with channel contexts\n"); + goto drop_connection; + } + + if (drv_pre_channel_switch(sdata, &ch_switch)) { + sdata_info(sdata, + "preparing for channel switch failed, disconnecting\n"); + goto drop_connection; + } + + res = ieee80211_link_reserve_chanctx(link, &csa_ie.chandef, + chanctx->mode, false); + if (res) { + sdata_info(sdata, + "failed to reserve channel context for channel switch, disconnecting (err=%d)\n", + res); + goto drop_connection; + } + mutex_unlock(&local->chanctx_mtx); + + link->conf->csa_active = true; + link->csa_chandef = csa_ie.chandef; + link->csa_block_tx = csa_ie.mode; + link->u.mgd.csa_ignored_same_chan = false; + link->u.mgd.beacon_crc_valid = false; + + if (link->csa_block_tx) + ieee80211_stop_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_CSA); + mutex_unlock(&local->mtx); + + cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef, + link->link_id, csa_ie.count, + csa_ie.mode, 0); + + if (local->ops->channel_switch) { + /* use driver's channel switch callback */ + drv_channel_switch(local, sdata, &ch_switch); + return; + } + + /* channel switch handled in software */ + timeout = TU_TO_JIFFIES((max_t(int, csa_ie.count, 1) - 1) * + cbss->beacon_interval); + wiphy_delayed_work_queue(local->hw.wiphy, + &link->u.mgd.chswitch_work, + timeout); + return; + lock_and_drop_connection: + mutex_lock(&local->mtx); + mutex_lock(&local->chanctx_mtx); + drop_connection: + /* + * This is just so that the disconnect flow will know that + * we were trying to switch channel and failed. In case the + * mode is 1 (we are not allowed to Tx), we will know not to + * send a deauthentication frame. Those two fields will be + * reset when the disconnection worker runs. + */ + link->conf->csa_active = true; + link->csa_block_tx = csa_ie.mode; + + wiphy_work_queue(sdata->local->hw.wiphy, + &ifmgd->csa_connection_drop_work); + mutex_unlock(&local->chanctx_mtx); + mutex_unlock(&local->mtx); +} + +static bool +ieee80211_find_80211h_pwr_constr(struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel *channel, + const u8 *country_ie, u8 country_ie_len, + const u8 *pwr_constr_elem, + int *chan_pwr, int *pwr_reduction) +{ + struct ieee80211_country_ie_triplet *triplet; + int chan = ieee80211_frequency_to_channel(channel->center_freq); + int i, chan_increment; + bool have_chan_pwr = false; + + /* Invalid IE */ + if (country_ie_len % 2 || country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) + return false; + + triplet = (void *)(country_ie + 3); + country_ie_len -= 3; + + switch (channel->band) { + default: + WARN_ON_ONCE(1); + fallthrough; + case NL80211_BAND_2GHZ: + case NL80211_BAND_60GHZ: + case NL80211_BAND_LC: + chan_increment = 1; + break; + case NL80211_BAND_5GHZ: + chan_increment = 4; + break; + case NL80211_BAND_6GHZ: + /* + * In the 6 GHz band, the "maximum transmit power level" + * field in the triplets is reserved, and thus will be + * zero and we shouldn't use it to control TX power. + * The actual TX power will be given in the transmit + * power envelope element instead. + */ + return false; + } + + /* find channel */ + while (country_ie_len >= 3) { + u8 first_channel = triplet->chans.first_channel; + + if (first_channel >= IEEE80211_COUNTRY_EXTENSION_ID) + goto next; + + for (i = 0; i < triplet->chans.num_channels; i++) { + if (first_channel + i * chan_increment == chan) { + have_chan_pwr = true; + *chan_pwr = triplet->chans.max_power; + break; + } + } + if (have_chan_pwr) + break; + + next: + triplet++; + country_ie_len -= 3; + } + + if (have_chan_pwr && pwr_constr_elem) + *pwr_reduction = *pwr_constr_elem; + else + *pwr_reduction = 0; + + return have_chan_pwr; +} + +static void ieee80211_find_cisco_dtpc(struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel *channel, + const u8 *cisco_dtpc_ie, + int *pwr_level) +{ + /* From practical testing, the first data byte of the DTPC element + * seems to contain the requested dBm level, and the CLI on Cisco + * APs clearly state the range is -127 to 127 dBm, which indicates + * a signed byte, although it seemingly never actually goes negative. + * The other byte seems to always be zero. + */ + *pwr_level = (__s8)cisco_dtpc_ie[4]; +} + +static u64 ieee80211_handle_pwr_constr(struct ieee80211_link_data *link, + struct ieee80211_channel *channel, + struct ieee80211_mgmt *mgmt, + const u8 *country_ie, u8 country_ie_len, + const u8 *pwr_constr_ie, + const u8 *cisco_dtpc_ie) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + bool has_80211h_pwr = false, has_cisco_pwr = false; + int chan_pwr = 0, pwr_reduction_80211h = 0; + int pwr_level_cisco, pwr_level_80211h; + int new_ap_level; + __le16 capab = mgmt->u.probe_resp.capab_info; + + if (ieee80211_is_s1g_beacon(mgmt->frame_control)) + return 0; /* TODO */ + + if (country_ie && + (capab & cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT) || + capab & cpu_to_le16(WLAN_CAPABILITY_RADIO_MEASURE))) { + has_80211h_pwr = ieee80211_find_80211h_pwr_constr( + sdata, channel, country_ie, country_ie_len, + pwr_constr_ie, &chan_pwr, &pwr_reduction_80211h); + pwr_level_80211h = + max_t(int, 0, chan_pwr - pwr_reduction_80211h); + } + + if (cisco_dtpc_ie) { + ieee80211_find_cisco_dtpc( + sdata, channel, cisco_dtpc_ie, &pwr_level_cisco); + has_cisco_pwr = true; + } + + if (!has_80211h_pwr && !has_cisco_pwr) + return 0; + + /* If we have both 802.11h and Cisco DTPC, apply both limits + * by picking the smallest of the two power levels advertised. + */ + if (has_80211h_pwr && + (!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) { + new_ap_level = pwr_level_80211h; + + if (link->ap_power_level == new_ap_level) + return 0; + + sdata_dbg(sdata, + "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n", + pwr_level_80211h, chan_pwr, pwr_reduction_80211h, + link->u.mgd.bssid); + } else { /* has_cisco_pwr is always true here. */ + new_ap_level = pwr_level_cisco; + + if (link->ap_power_level == new_ap_level) + return 0; + + sdata_dbg(sdata, + "Limiting TX power to %d dBm as advertised by %pM\n", + pwr_level_cisco, link->u.mgd.bssid); + } + + link->ap_power_level = new_ap_level; + if (__ieee80211_recalc_txpower(sdata)) + return BSS_CHANGED_TXPOWER; + return 0; +} + +/* powersave */ +static void ieee80211_enable_ps(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_conf *conf = &local->hw.conf; + + /* + * If we are scanning right now then the parameters will + * take effect when scan finishes. + */ + if (local->scanning) + return; + + if (conf->dynamic_ps_timeout > 0 && + !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) { + mod_timer(&local->dynamic_ps_timer, jiffies + + msecs_to_jiffies(conf->dynamic_ps_timeout)); + } else { + if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) + ieee80211_send_nullfunc(local, sdata, true); + + if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) && + ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) + return; + + conf->flags |= IEEE80211_CONF_PS; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + } +} + +static void ieee80211_change_ps(struct ieee80211_local *local) +{ + struct ieee80211_conf *conf = &local->hw.conf; + + if (local->ps_sdata) { + ieee80211_enable_ps(local, local->ps_sdata); + } else if (conf->flags & IEEE80211_CONF_PS) { + conf->flags &= ~IEEE80211_CONF_PS; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + del_timer_sync(&local->dynamic_ps_timer); + cancel_work_sync(&local->dynamic_ps_enable_work); + } +} + +static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *mgd = &sdata->u.mgd; + struct sta_info *sta = NULL; + bool authorized = false; + + if (!mgd->powersave) + return false; + + if (mgd->broken_ap) + return false; + + if (!mgd->associated) + return false; + + if (mgd->flags & IEEE80211_STA_CONNECTION_POLL) + return false; + + if (!(local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO) && + !sdata->deflink.u.mgd.have_beacon) + return false; + + rcu_read_lock(); + sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr); + if (sta) + authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); + rcu_read_unlock(); + + return authorized; +} + +/* need to hold RTNL or interface lock */ +void ieee80211_recalc_ps(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata, *found = NULL; + int count = 0; + int timeout; + + if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS) || + ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) { + local->ps_sdata = NULL; + return; + } + + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + if (sdata->vif.type == NL80211_IFTYPE_AP) { + /* If an AP vif is found, then disable PS + * by setting the count to zero thereby setting + * ps_sdata to NULL. + */ + count = 0; + break; + } + if (sdata->vif.type != NL80211_IFTYPE_STATION) + continue; + found = sdata; + count++; + } + + if (count == 1 && ieee80211_powersave_allowed(found)) { + u8 dtimper = found->deflink.u.mgd.dtim_period; + + timeout = local->dynamic_ps_forced_timeout; + if (timeout < 0) + timeout = 100; + local->hw.conf.dynamic_ps_timeout = timeout; + + /* If the TIM IE is invalid, pretend the value is 1 */ + if (!dtimper) + dtimper = 1; + + local->hw.conf.ps_dtim_period = dtimper; + local->ps_sdata = found; + } else { + local->ps_sdata = NULL; + } + + ieee80211_change_ps(local); +} + +void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata) +{ + bool ps_allowed = ieee80211_powersave_allowed(sdata); + + if (sdata->vif.cfg.ps != ps_allowed) { + sdata->vif.cfg.ps = ps_allowed; + ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_PS); + } +} + +void ieee80211_dynamic_ps_disable_work(struct work_struct *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, + dynamic_ps_disable_work); + + if (local->hw.conf.flags & IEEE80211_CONF_PS) { + local->hw.conf.flags &= ~IEEE80211_CONF_PS; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + } + + ieee80211_wake_queues_by_reason(&local->hw, + IEEE80211_MAX_QUEUE_MAP, + IEEE80211_QUEUE_STOP_REASON_PS, + false); +} + +void ieee80211_dynamic_ps_enable_work(struct work_struct *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, + dynamic_ps_enable_work); + struct ieee80211_sub_if_data *sdata = local->ps_sdata; + struct ieee80211_if_managed *ifmgd; + unsigned long flags; + int q; + + /* can only happen when PS was just disabled anyway */ + if (!sdata) + return; + + ifmgd = &sdata->u.mgd; + + if (local->hw.conf.flags & IEEE80211_CONF_PS) + return; + + if (local->hw.conf.dynamic_ps_timeout > 0) { + /* don't enter PS if TX frames are pending */ + if (drv_tx_frames_pending(local)) { + mod_timer(&local->dynamic_ps_timer, jiffies + + msecs_to_jiffies( + local->hw.conf.dynamic_ps_timeout)); + return; + } + + /* + * transmission can be stopped by others which leads to + * dynamic_ps_timer expiry. Postpone the ps timer if it + * is not the actual idle state. + */ + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + for (q = 0; q < local->hw.queues; q++) { + if (local->queue_stop_reasons[q]) { + spin_unlock_irqrestore(&local->queue_stop_reason_lock, + flags); + mod_timer(&local->dynamic_ps_timer, jiffies + + msecs_to_jiffies( + local->hw.conf.dynamic_ps_timeout)); + return; + } + } + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + } + + if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) && + !(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { + if (drv_tx_frames_pending(local)) { + mod_timer(&local->dynamic_ps_timer, jiffies + + msecs_to_jiffies( + local->hw.conf.dynamic_ps_timeout)); + } else { + ieee80211_send_nullfunc(local, sdata, true); + /* Flush to get the tx status of nullfunc frame */ + ieee80211_flush_queues(local, sdata, false); + } + } + + if (!(ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) && + ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) || + (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { + ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; + local->hw.conf.flags |= IEEE80211_CONF_PS; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + } +} + +void ieee80211_dynamic_ps_timer(struct timer_list *t) +{ + struct ieee80211_local *local = from_timer(local, t, dynamic_ps_timer); + + ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work); +} + +void ieee80211_dfs_cac_timer_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct ieee80211_link_data *link = + container_of(delayed_work, struct ieee80211_link_data, + dfs_cac_timer_work); + struct cfg80211_chan_def chandef = link->conf->chandef; + struct ieee80211_sub_if_data *sdata = link->sdata; + + mutex_lock(&sdata->local->mtx); + if (sdata->wdev.cac_started) { + ieee80211_link_release_channel(link); + cfg80211_cac_event(sdata->dev, &chandef, + NL80211_RADAR_CAC_FINISHED, + GFP_KERNEL); + } + mutex_unlock(&sdata->local->mtx); +} + +static bool +__ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + bool ret = false; + int ac; + + if (local->hw.queues < IEEE80211_NUM_ACS) + return false; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac]; + int non_acm_ac; + unsigned long now = jiffies; + + if (tx_tspec->action == TX_TSPEC_ACTION_NONE && + tx_tspec->admitted_time && + time_after(now, tx_tspec->time_slice_start + HZ)) { + tx_tspec->consumed_tx_time = 0; + tx_tspec->time_slice_start = now; + + if (tx_tspec->downgraded) + tx_tspec->action = + TX_TSPEC_ACTION_STOP_DOWNGRADE; + } + + switch (tx_tspec->action) { + case TX_TSPEC_ACTION_STOP_DOWNGRADE: + /* take the original parameters */ + if (drv_conf_tx(local, &sdata->deflink, ac, + &sdata->deflink.tx_conf[ac])) + link_err(&sdata->deflink, + "failed to set TX queue parameters for queue %d\n", + ac); + tx_tspec->action = TX_TSPEC_ACTION_NONE; + tx_tspec->downgraded = false; + ret = true; + break; + case TX_TSPEC_ACTION_DOWNGRADE: + if (time_after(now, tx_tspec->time_slice_start + HZ)) { + tx_tspec->action = TX_TSPEC_ACTION_NONE; + ret = true; + break; + } + /* downgrade next lower non-ACM AC */ + for (non_acm_ac = ac + 1; + non_acm_ac < IEEE80211_NUM_ACS; + non_acm_ac++) + if (!(sdata->wmm_acm & BIT(7 - 2 * non_acm_ac))) + break; + /* Usually the loop will result in using BK even if it + * requires admission control, but such a configuration + * makes no sense and we have to transmit somehow - the + * AC selection does the same thing. + * If we started out trying to downgrade from BK, then + * the extra condition here might be needed. + */ + if (non_acm_ac >= IEEE80211_NUM_ACS) + non_acm_ac = IEEE80211_AC_BK; + if (drv_conf_tx(local, &sdata->deflink, ac, + &sdata->deflink.tx_conf[non_acm_ac])) + link_err(&sdata->deflink, + "failed to set TX queue parameters for queue %d\n", + ac); + tx_tspec->action = TX_TSPEC_ACTION_NONE; + ret = true; + schedule_delayed_work(&ifmgd->tx_tspec_wk, + tx_tspec->time_slice_start + HZ - now + 1); + break; + case TX_TSPEC_ACTION_NONE: + /* nothing now */ + break; + } + } + + return ret; +} + +void ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata) +{ + if (__ieee80211_sta_handle_tspec_ac_params(sdata)) + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_QOS); +} + +static void ieee80211_sta_handle_tspec_ac_params_wk(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata; + + sdata = container_of(work, struct ieee80211_sub_if_data, + u.mgd.tx_tspec_wk.work); + ieee80211_sta_handle_tspec_ac_params(sdata); +} + +void ieee80211_mgd_set_link_qos_params(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_tx_queue_params *params = link->tx_conf; + u8 ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + mlme_dbg(sdata, + "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n", + ac, params[ac].acm, + params[ac].aifs, params[ac].cw_min, params[ac].cw_max, + params[ac].txop, params[ac].uapsd, + ifmgd->tx_tspec[ac].downgraded); + if (!ifmgd->tx_tspec[ac].downgraded && + drv_conf_tx(local, link, ac, ¶ms[ac])) + link_err(link, + "failed to set TX queue parameters for AC %d\n", + ac); + } +} + +/* MLME */ +static bool +ieee80211_sta_wmm_params(struct ieee80211_local *local, + struct ieee80211_link_data *link, + const u8 *wmm_param, size_t wmm_param_len, + const struct ieee80211_mu_edca_param_set *mu_edca) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_tx_queue_params params[IEEE80211_NUM_ACS]; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + size_t left; + int count, mu_edca_count, ac; + const u8 *pos; + u8 uapsd_queues = 0; + + if (!local->ops->conf_tx) + return false; + + if (local->hw.queues < IEEE80211_NUM_ACS) + return false; + + if (!wmm_param) + return false; + + if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) + return false; + + if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) + uapsd_queues = ifmgd->uapsd_queues; + + count = wmm_param[6] & 0x0f; + /* -1 is the initial value of ifmgd->mu_edca_last_param_set. + * if mu_edca was preset before and now it disappeared tell + * the driver about it. + */ + mu_edca_count = mu_edca ? mu_edca->mu_qos_info & 0x0f : -1; + if (count == link->u.mgd.wmm_last_param_set && + mu_edca_count == link->u.mgd.mu_edca_last_param_set) + return false; + link->u.mgd.wmm_last_param_set = count; + link->u.mgd.mu_edca_last_param_set = mu_edca_count; + + pos = wmm_param + 8; + left = wmm_param_len - 8; + + memset(¶ms, 0, sizeof(params)); + + sdata->wmm_acm = 0; + for (; left >= 4; left -= 4, pos += 4) { + int aci = (pos[0] >> 5) & 0x03; + int acm = (pos[0] >> 4) & 0x01; + bool uapsd = false; + + switch (aci) { + case 1: /* AC_BK */ + ac = IEEE80211_AC_BK; + if (acm) + sdata->wmm_acm |= BIT(1) | BIT(2); /* BK/- */ + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) + uapsd = true; + params[ac].mu_edca = !!mu_edca; + if (mu_edca) + params[ac].mu_edca_param_rec = mu_edca->ac_bk; + break; + case 2: /* AC_VI */ + ac = IEEE80211_AC_VI; + if (acm) + sdata->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */ + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) + uapsd = true; + params[ac].mu_edca = !!mu_edca; + if (mu_edca) + params[ac].mu_edca_param_rec = mu_edca->ac_vi; + break; + case 3: /* AC_VO */ + ac = IEEE80211_AC_VO; + if (acm) + sdata->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */ + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) + uapsd = true; + params[ac].mu_edca = !!mu_edca; + if (mu_edca) + params[ac].mu_edca_param_rec = mu_edca->ac_vo; + break; + case 0: /* AC_BE */ + default: + ac = IEEE80211_AC_BE; + if (acm) + sdata->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */ + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) + uapsd = true; + params[ac].mu_edca = !!mu_edca; + if (mu_edca) + params[ac].mu_edca_param_rec = mu_edca->ac_be; + break; + } + + params[ac].aifs = pos[0] & 0x0f; + + if (params[ac].aifs < 2) { + link_info(link, + "AP has invalid WMM params (AIFSN=%d for ACI %d), will use 2\n", + params[ac].aifs, aci); + params[ac].aifs = 2; + } + params[ac].cw_max = ecw2cw((pos[1] & 0xf0) >> 4); + params[ac].cw_min = ecw2cw(pos[1] & 0x0f); + params[ac].txop = get_unaligned_le16(pos + 2); + params[ac].acm = acm; + params[ac].uapsd = uapsd; + + if (params[ac].cw_min == 0 || + params[ac].cw_min > params[ac].cw_max) { + link_info(link, + "AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n", + params[ac].cw_min, params[ac].cw_max, aci); + return false; + } + ieee80211_regulatory_limit_wmm_params(sdata, ¶ms[ac], ac); + } + + /* WMM specification requires all 4 ACIs. */ + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + if (params[ac].cw_min == 0) { + link_info(link, + "AP has invalid WMM params (missing AC %d), using defaults\n", + ac); + return false; + } + } + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + link->tx_conf[ac] = params[ac]; + + ieee80211_mgd_set_link_qos_params(link); + + /* enable WMM or activate new settings */ + link->conf->qos = true; + return true; +} + +static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) +{ + lockdep_assert_held(&sdata->local->mtx); + + sdata->u.mgd.flags &= ~IEEE80211_STA_CONNECTION_POLL; + ieee80211_run_deferred_scan(sdata->local); +} + +static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) +{ + mutex_lock(&sdata->local->mtx); + __ieee80211_stop_poll(sdata); + mutex_unlock(&sdata->local->mtx); +} + +static u64 ieee80211_handle_bss_capability(struct ieee80211_link_data *link, + u16 capab, bool erp_valid, u8 erp) +{ + struct ieee80211_bss_conf *bss_conf = link->conf; + struct ieee80211_supported_band *sband; + u64 changed = 0; + bool use_protection; + bool use_short_preamble; + bool use_short_slot; + + sband = ieee80211_get_link_sband(link); + if (!sband) + return changed; + + if (erp_valid) { + use_protection = (erp & WLAN_ERP_USE_PROTECTION) != 0; + use_short_preamble = (erp & WLAN_ERP_BARKER_PREAMBLE) == 0; + } else { + use_protection = false; + use_short_preamble = !!(capab & WLAN_CAPABILITY_SHORT_PREAMBLE); + } + + use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); + if (sband->band == NL80211_BAND_5GHZ || + sband->band == NL80211_BAND_6GHZ) + use_short_slot = true; + + if (use_protection != bss_conf->use_cts_prot) { + bss_conf->use_cts_prot = use_protection; + changed |= BSS_CHANGED_ERP_CTS_PROT; + } + + if (use_short_preamble != bss_conf->use_short_preamble) { + bss_conf->use_short_preamble = use_short_preamble; + changed |= BSS_CHANGED_ERP_PREAMBLE; + } + + if (use_short_slot != bss_conf->use_short_slot) { + bss_conf->use_short_slot = use_short_slot; + changed |= BSS_CHANGED_ERP_SLOT; + } + + return changed; +} + +static u64 ieee80211_link_set_associated(struct ieee80211_link_data *link, + struct cfg80211_bss *cbss) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_bss_conf *bss_conf = link->conf; + struct ieee80211_bss *bss = (void *)cbss->priv; + u64 changed = BSS_CHANGED_QOS; + + /* not really used in MLO */ + sdata->u.mgd.beacon_timeout = + usecs_to_jiffies(ieee80211_tu_to_usec(beacon_loss_count * + bss_conf->beacon_int)); + + changed |= ieee80211_handle_bss_capability(link, + bss_conf->assoc_capability, + bss->has_erp_value, + bss->erp_value); + + ieee80211_check_rate_mask(link); + + link->u.mgd.bss = cbss; + memcpy(link->u.mgd.bssid, cbss->bssid, ETH_ALEN); + + if (sdata->vif.p2p || + sdata->vif.driver_flags & IEEE80211_VIF_GET_NOA_UPDATE) { + const struct cfg80211_bss_ies *ies; + + rcu_read_lock(); + ies = rcu_dereference(cbss->ies); + if (ies) { + int ret; + + ret = cfg80211_get_p2p_attr( + ies->data, ies->len, + IEEE80211_P2P_ATTR_ABSENCE_NOTICE, + (u8 *) &bss_conf->p2p_noa_attr, + sizeof(bss_conf->p2p_noa_attr)); + if (ret >= 2) { + link->u.mgd.p2p_noa_index = + bss_conf->p2p_noa_attr.index; + changed |= BSS_CHANGED_P2P_PS; + } + } + rcu_read_unlock(); + } + + if (link->u.mgd.have_beacon) { + bss_conf->beacon_rate = bss->beacon_rate; + changed |= BSS_CHANGED_BEACON_INFO; + } else { + bss_conf->beacon_rate = NULL; + } + + /* Tell the driver to monitor connection quality (if supported) */ + if (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI && + bss_conf->cqm_rssi_thold) + changed |= BSS_CHANGED_CQM; + + return changed; +} + +static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgd_assoc_data *assoc_data, + u64 changed[IEEE80211_MLD_MAX_NUM_LINKS]) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg; + u64 vif_changed = BSS_CHANGED_ASSOC; + unsigned int link_id; + + sdata->u.mgd.associated = true; + + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { + struct cfg80211_bss *cbss = assoc_data->link[link_id].bss; + struct ieee80211_link_data *link; + + if (!cbss || + assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) + continue; + + if (ieee80211_vif_is_mld(&sdata->vif) && + !(ieee80211_vif_usable_links(&sdata->vif) & BIT(link_id))) + continue; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (WARN_ON(!link)) + return; + + changed[link_id] |= ieee80211_link_set_associated(link, cbss); + } + + /* just to be sure */ + ieee80211_stop_poll(sdata); + + ieee80211_led_assoc(local, 1); + + vif_cfg->assoc = 1; + + /* Enable ARP filtering */ + if (vif_cfg->arp_addr_cnt) + vif_changed |= BSS_CHANGED_ARP_FILTER; + + if (ieee80211_vif_is_mld(&sdata->vif)) { + for (link_id = 0; + link_id < IEEE80211_MLD_MAX_NUM_LINKS; + link_id++) { + struct ieee80211_link_data *link; + struct cfg80211_bss *cbss = assoc_data->link[link_id].bss; + + if (!cbss || + !(BIT(link_id) & + ieee80211_vif_usable_links(&sdata->vif)) || + assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) + continue; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (WARN_ON(!link)) + return; + + ieee80211_link_info_change_notify(sdata, link, + changed[link_id]); + + ieee80211_recalc_smps(sdata, link); + } + + ieee80211_vif_cfg_change_notify(sdata, vif_changed); + } else { + ieee80211_bss_info_change_notify(sdata, + vif_changed | changed[0]); + } + + mutex_lock(&local->iflist_mtx); + ieee80211_recalc_ps(local); + mutex_unlock(&local->iflist_mtx); + + /* leave this here to not change ordering in non-MLO cases */ + if (!ieee80211_vif_is_mld(&sdata->vif)) + ieee80211_recalc_smps(sdata, &sdata->deflink); + ieee80211_recalc_ps_vif(sdata); + + netif_carrier_on(sdata->dev); +} + +static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, + u16 stype, u16 reason, bool tx, + u8 *frame_buf) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = sdata->local; + unsigned int link_id; + u64 changed = 0; + struct ieee80211_prep_tx_info info = { + .subtype = stype, + }; + + sdata_assert_lock(sdata); + + if (WARN_ON_ONCE(tx && !frame_buf)) + return; + + if (WARN_ON(!ifmgd->associated)) + return; + + ieee80211_stop_poll(sdata); + + ifmgd->associated = false; + + /* other links will be destroyed */ + sdata->deflink.u.mgd.bss = NULL; + + netif_carrier_off(sdata->dev); + + /* + * if we want to get out of ps before disassoc (why?) we have + * to do it before sending disassoc, as otherwise the null-packet + * won't be valid. + */ + if (local->hw.conf.flags & IEEE80211_CONF_PS) { + local->hw.conf.flags &= ~IEEE80211_CONF_PS; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + } + local->ps_sdata = NULL; + + /* disable per-vif ps */ + ieee80211_recalc_ps_vif(sdata); + + /* make sure ongoing transmission finishes */ + synchronize_net(); + + /* + * drop any frame before deauth/disassoc, this can be data or + * management frame. Since we are disconnecting, we should not + * insist sending these frames which can take time and delay + * the disconnection and possible the roaming. + */ + if (tx) + ieee80211_flush_queues(local, sdata, true); + + /* deauthenticate/disassociate now */ + if (tx || frame_buf) { + /* + * In multi channel scenarios guarantee that the virtual + * interface is granted immediate airtime to transmit the + * deauthentication frame by calling mgd_prepare_tx, if the + * driver requested so. + */ + if (ieee80211_hw_check(&local->hw, DEAUTH_NEED_MGD_TX_PREP) && + !sdata->deflink.u.mgd.have_beacon) { + drv_mgd_prepare_tx(sdata->local, sdata, &info); + } + + ieee80211_send_deauth_disassoc(sdata, sdata->vif.cfg.ap_addr, + sdata->vif.cfg.ap_addr, stype, + reason, tx, frame_buf); + } + + /* flush out frame - make sure the deauth was actually sent */ + if (tx) + ieee80211_flush_queues(local, sdata, false); + + drv_mgd_complete_tx(sdata->local, sdata, &info); + + /* clear AP addr only after building the needed mgmt frames */ + eth_zero_addr(sdata->deflink.u.mgd.bssid); + eth_zero_addr(sdata->vif.cfg.ap_addr); + + sdata->vif.cfg.ssid_len = 0; + + /* remove AP and TDLS peers */ + sta_info_flush(sdata); + + /* finally reset all BSS / config parameters */ + if (!ieee80211_vif_is_mld(&sdata->vif)) + changed |= ieee80211_reset_erp_info(sdata); + + ieee80211_led_assoc(local, 0); + changed |= BSS_CHANGED_ASSOC; + sdata->vif.cfg.assoc = false; + + sdata->deflink.u.mgd.p2p_noa_index = -1; + memset(&sdata->vif.bss_conf.p2p_noa_attr, 0, + sizeof(sdata->vif.bss_conf.p2p_noa_attr)); + + /* on the next assoc, re-program HT/VHT parameters */ + memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); + memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); + memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa)); + memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask)); + + /* + * reset MU-MIMO ownership and group data in default link, + * if used, other links are destroyed + */ + memset(sdata->vif.bss_conf.mu_group.membership, 0, + sizeof(sdata->vif.bss_conf.mu_group.membership)); + memset(sdata->vif.bss_conf.mu_group.position, 0, + sizeof(sdata->vif.bss_conf.mu_group.position)); + if (!ieee80211_vif_is_mld(&sdata->vif)) + changed |= BSS_CHANGED_MU_GROUPS; + sdata->vif.bss_conf.mu_mimo_owner = false; + + sdata->deflink.ap_power_level = IEEE80211_UNSET_POWER_LEVEL; + + del_timer_sync(&local->dynamic_ps_timer); + cancel_work_sync(&local->dynamic_ps_enable_work); + + /* Disable ARP filtering */ + if (sdata->vif.cfg.arp_addr_cnt) + changed |= BSS_CHANGED_ARP_FILTER; + + sdata->vif.bss_conf.qos = false; + if (!ieee80211_vif_is_mld(&sdata->vif)) { + changed |= BSS_CHANGED_QOS; + /* The BSSID (not really interesting) and HT changed */ + changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; + ieee80211_bss_info_change_notify(sdata, changed); + } else { + ieee80211_vif_cfg_change_notify(sdata, changed); + } + + /* disassociated - set to defaults now */ + ieee80211_set_wmm_default(&sdata->deflink, false, false); + + del_timer_sync(&sdata->u.mgd.conn_mon_timer); + del_timer_sync(&sdata->u.mgd.bcn_mon_timer); + del_timer_sync(&sdata->u.mgd.timer); + + sdata->vif.bss_conf.dtim_period = 0; + sdata->vif.bss_conf.beacon_rate = NULL; + + sdata->deflink.u.mgd.have_beacon = false; + sdata->deflink.u.mgd.tracking_signal_avg = false; + sdata->deflink.u.mgd.disable_wmm_tracking = false; + + ifmgd->flags = 0; + sdata->deflink.u.mgd.conn_flags = 0; + mutex_lock(&local->mtx); + + for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { + struct ieee80211_link_data *link; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (!link) + continue; + ieee80211_link_release_channel(link); + } + + sdata->vif.bss_conf.csa_active = false; + sdata->deflink.u.mgd.csa_waiting_bcn = false; + sdata->deflink.u.mgd.csa_ignored_same_chan = false; + if (sdata->deflink.csa_block_tx) { + ieee80211_wake_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_CSA); + sdata->deflink.csa_block_tx = false; + } + mutex_unlock(&local->mtx); + + /* existing TX TSPEC sessions no longer exist */ + memset(ifmgd->tx_tspec, 0, sizeof(ifmgd->tx_tspec)); + cancel_delayed_work_sync(&ifmgd->tx_tspec_wk); + + sdata->vif.bss_conf.pwr_reduction = 0; + sdata->vif.bss_conf.tx_pwr_env_num = 0; + memset(sdata->vif.bss_conf.tx_pwr_env, 0, + sizeof(sdata->vif.bss_conf.tx_pwr_env)); + + ieee80211_vif_set_links(sdata, 0, 0); +} + +static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = sdata->local; + + mutex_lock(&local->mtx); + if (!(ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)) + goto out; + + __ieee80211_stop_poll(sdata); + + mutex_lock(&local->iflist_mtx); + ieee80211_recalc_ps(local); + mutex_unlock(&local->iflist_mtx); + + if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) + goto out; + + /* + * We've received a probe response, but are not sure whether + * we have or will be receiving any beacons or data, so let's + * schedule the timers again, just in case. + */ + ieee80211_sta_reset_beacon_monitor(sdata); + + mod_timer(&ifmgd->conn_mon_timer, + round_jiffies_up(jiffies + + IEEE80211_CONNECTION_IDLE_TIME)); +out: + mutex_unlock(&local->mtx); +} + +static void ieee80211_sta_tx_wmm_ac_notify(struct ieee80211_sub_if_data *sdata, + struct ieee80211_hdr *hdr, + u16 tx_time) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + u16 tid; + int ac; + struct ieee80211_sta_tx_tspec *tx_tspec; + unsigned long now = jiffies; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return; + + tid = ieee80211_get_tid(hdr); + ac = ieee80211_ac_from_tid(tid); + tx_tspec = &ifmgd->tx_tspec[ac]; + + if (likely(!tx_tspec->admitted_time)) + return; + + if (time_after(now, tx_tspec->time_slice_start + HZ)) { + tx_tspec->consumed_tx_time = 0; + tx_tspec->time_slice_start = now; + + if (tx_tspec->downgraded) { + tx_tspec->action = TX_TSPEC_ACTION_STOP_DOWNGRADE; + schedule_delayed_work(&ifmgd->tx_tspec_wk, 0); + } + } + + if (tx_tspec->downgraded) + return; + + tx_tspec->consumed_tx_time += tx_time; + + if (tx_tspec->consumed_tx_time >= tx_tspec->admitted_time) { + tx_tspec->downgraded = true; + tx_tspec->action = TX_TSPEC_ACTION_DOWNGRADE; + schedule_delayed_work(&ifmgd->tx_tspec_wk, 0); + } +} + +void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, + struct ieee80211_hdr *hdr, bool ack, u16 tx_time) +{ + ieee80211_sta_tx_wmm_ac_notify(sdata, hdr, tx_time); + + if (!ieee80211_is_any_nullfunc(hdr->frame_control) || + !sdata->u.mgd.probe_send_count) + return; + + if (ack) + sdata->u.mgd.probe_send_count = 0; + else + sdata->u.mgd.nullfunc_failed = true; + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); +} + +static void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata, + const u8 *src, const u8 *dst, + const u8 *ssid, size_t ssid_len, + struct ieee80211_channel *channel) +{ + struct sk_buff *skb; + + skb = ieee80211_build_probe_req(sdata, src, dst, (u32)-1, channel, + ssid, ssid_len, NULL, 0, + IEEE80211_PROBE_FLAG_DIRECTED); + if (skb) + ieee80211_tx_skb(sdata, skb); +} + +static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + u8 *dst = sdata->vif.cfg.ap_addr; + u8 unicast_limit = max(1, max_probe_tries - 3); + struct sta_info *sta; + + if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif))) + return; + + /* + * Try sending broadcast probe requests for the last three + * probe requests after the first ones failed since some + * buggy APs only support broadcast probe requests. + */ + if (ifmgd->probe_send_count >= unicast_limit) + dst = NULL; + + /* + * When the hardware reports an accurate Tx ACK status, it's + * better to send a nullfunc frame instead of a probe request, + * as it will kick us off the AP quickly if we aren't associated + * anymore. The timeout will be reset if the frame is ACKed by + * the AP. + */ + ifmgd->probe_send_count++; + + if (dst) { + mutex_lock(&sdata->local->sta_mtx); + sta = sta_info_get(sdata, dst); + if (!WARN_ON(!sta)) + ieee80211_check_fast_rx(sta); + mutex_unlock(&sdata->local->sta_mtx); + } + + if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) { + ifmgd->nullfunc_failed = false; + ieee80211_send_nullfunc(sdata->local, sdata, false); + } else { + ieee80211_mlme_send_probe_req(sdata, sdata->vif.addr, dst, + sdata->vif.cfg.ssid, + sdata->vif.cfg.ssid_len, + sdata->deflink.u.mgd.bss->channel); + } + + ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); + run_again(sdata, ifmgd->probe_timeout); +} + +static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, + bool beacon) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + bool already = false; + + if (WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif))) + return; + + if (!ieee80211_sdata_running(sdata)) + return; + + sdata_lock(sdata); + + if (!ifmgd->associated) + goto out; + + mutex_lock(&sdata->local->mtx); + + if (sdata->local->tmp_channel || sdata->local->scanning) { + mutex_unlock(&sdata->local->mtx); + goto out; + } + + if (sdata->local->suspending) { + /* reschedule after resume */ + mutex_unlock(&sdata->local->mtx); + ieee80211_reset_ap_probe(sdata); + goto out; + } + + if (beacon) { + mlme_dbg_ratelimited(sdata, + "detected beacon loss from AP (missed %d beacons) - probing\n", + beacon_loss_count); + + ieee80211_cqm_beacon_loss_notify(&sdata->vif, GFP_KERNEL); + } + + /* + * The driver/our work has already reported this event or the + * connection monitoring has kicked in and we have already sent + * a probe request. Or maybe the AP died and the driver keeps + * reporting until we disassociate... + * + * In either case we have to ignore the current call to this + * function (except for setting the correct probe reason bit) + * because otherwise we would reset the timer every time and + * never check whether we received a probe response! + */ + if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) + already = true; + + ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL; + + mutex_unlock(&sdata->local->mtx); + + if (already) + goto out; + + mutex_lock(&sdata->local->iflist_mtx); + ieee80211_recalc_ps(sdata->local); + mutex_unlock(&sdata->local->iflist_mtx); + + ifmgd->probe_send_count = 0; + ieee80211_mgd_probe_ap_send(sdata); + out: + sdata_unlock(sdata); +} + +struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct cfg80211_bss *cbss; + struct sk_buff *skb; + const struct element *ssid; + int ssid_len; + + if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION || + ieee80211_vif_is_mld(&sdata->vif))) + return NULL; + + sdata_assert_lock(sdata); + + if (ifmgd->associated) + cbss = sdata->deflink.u.mgd.bss; + else if (ifmgd->auth_data) + cbss = ifmgd->auth_data->bss; + else if (ifmgd->assoc_data && ifmgd->assoc_data->link[0].bss) + cbss = ifmgd->assoc_data->link[0].bss; + else + return NULL; + + rcu_read_lock(); + ssid = ieee80211_bss_get_elem(cbss, WLAN_EID_SSID); + if (WARN_ONCE(!ssid || ssid->datalen > IEEE80211_MAX_SSID_LEN, + "invalid SSID element (len=%d)", + ssid ? ssid->datalen : -1)) + ssid_len = 0; + else + ssid_len = ssid->datalen; + + skb = ieee80211_build_probe_req(sdata, sdata->vif.addr, cbss->bssid, + (u32) -1, cbss->channel, + ssid->data, ssid_len, + NULL, 0, IEEE80211_PROBE_FLAG_DIRECTED); + rcu_read_unlock(); + + return skb; +} +EXPORT_SYMBOL(ieee80211_ap_probereq_get); + +static void ieee80211_report_disconnect(struct ieee80211_sub_if_data *sdata, + const u8 *buf, size_t len, bool tx, + u16 reason, bool reconnect) +{ + struct ieee80211_event event = { + .type = MLME_EVENT, + .u.mlme.data = tx ? DEAUTH_TX_EVENT : DEAUTH_RX_EVENT, + .u.mlme.reason = reason, + }; + + if (tx) + cfg80211_tx_mlme_mgmt(sdata->dev, buf, len, reconnect); + else + cfg80211_rx_mlme_mgmt(sdata->dev, buf, len); + + drv_event_callback(sdata->local, sdata, &event); +} + +static void ___ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + bool tx; + + if (!ifmgd->associated) + return; + + /* in MLO assume we have a link where we can TX the frame */ + tx = ieee80211_vif_is_mld(&sdata->vif) || + !sdata->deflink.csa_block_tx; + + if (!ifmgd->driver_disconnect) { + unsigned int link_id; + + /* + * AP is probably out of range (or not reachable for another + * reason) so remove the bss structs for that AP. In the case + * of multi-link, it's not clear that all of them really are + * out of range, but if they weren't the driver likely would + * have switched to just have a single link active? + */ + for (link_id = 0; + link_id < ARRAY_SIZE(sdata->link); + link_id++) { + struct ieee80211_link_data *link; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (!link) + continue; + cfg80211_unlink_bss(local->hw.wiphy, link->u.mgd.bss); + link->u.mgd.bss = NULL; + } + } + + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, + ifmgd->driver_disconnect ? + WLAN_REASON_DEAUTH_LEAVING : + WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, + tx, frame_buf); + mutex_lock(&local->mtx); + /* the other links will be destroyed */ + sdata->vif.bss_conf.csa_active = false; + sdata->deflink.u.mgd.csa_waiting_bcn = false; + if (sdata->deflink.csa_block_tx) { + ieee80211_wake_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_CSA); + sdata->deflink.csa_block_tx = false; + } + mutex_unlock(&local->mtx); + + ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx, + WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, + ifmgd->reconnect); + ifmgd->reconnect = false; +} + +static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) +{ + sdata_lock(sdata); + ___ieee80211_disconnect(sdata); + sdata_unlock(sdata); +} + +static void ieee80211_beacon_connection_loss_work(struct wiphy *wiphy, + struct wiphy_work *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, + u.mgd.beacon_connection_loss_work); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + if (ifmgd->connection_loss) { + sdata_info(sdata, "Connection to AP %pM lost\n", + sdata->vif.cfg.ap_addr); + __ieee80211_disconnect(sdata); + ifmgd->connection_loss = false; + } else if (ifmgd->driver_disconnect) { + sdata_info(sdata, + "Driver requested disconnection from AP %pM\n", + sdata->vif.cfg.ap_addr); + __ieee80211_disconnect(sdata); + ifmgd->driver_disconnect = false; + } else { + if (ifmgd->associated) + sdata->deflink.u.mgd.beacon_loss_count++; + ieee80211_mgd_probe_ap(sdata, true); + } +} + +static void ieee80211_csa_connection_drop_work(struct wiphy *wiphy, + struct wiphy_work *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, + u.mgd.csa_connection_drop_work); + + __ieee80211_disconnect(sdata); +} + +void ieee80211_beacon_loss(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_hw *hw = &sdata->local->hw; + + trace_api_beacon_loss(sdata); + + sdata->u.mgd.connection_loss = false; + wiphy_work_queue(hw->wiphy, &sdata->u.mgd.beacon_connection_loss_work); +} +EXPORT_SYMBOL(ieee80211_beacon_loss); + +void ieee80211_connection_loss(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_hw *hw = &sdata->local->hw; + + trace_api_connection_loss(sdata); + + sdata->u.mgd.connection_loss = true; + wiphy_work_queue(hw->wiphy, &sdata->u.mgd.beacon_connection_loss_work); +} +EXPORT_SYMBOL(ieee80211_connection_loss); + +void ieee80211_disconnect(struct ieee80211_vif *vif, bool reconnect) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_hw *hw = &sdata->local->hw; + + trace_api_disconnect(sdata, reconnect); + + if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) + return; + + sdata->u.mgd.driver_disconnect = true; + sdata->u.mgd.reconnect = reconnect; + wiphy_work_queue(hw->wiphy, &sdata->u.mgd.beacon_connection_loss_work); +} +EXPORT_SYMBOL(ieee80211_disconnect); + +static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata, + bool assoc) +{ + struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data; + + sdata_assert_lock(sdata); + + if (!assoc) { + /* + * we are not authenticated yet, the only timer that could be + * running is the timeout for the authentication response which + * which is not relevant anymore. + */ + del_timer_sync(&sdata->u.mgd.timer); + sta_info_destroy_addr(sdata, auth_data->ap_addr); + + /* other links are destroyed */ + sdata->deflink.u.mgd.conn_flags = 0; + eth_zero_addr(sdata->deflink.u.mgd.bssid); + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_BSSID); + sdata->u.mgd.flags = 0; + + mutex_lock(&sdata->local->mtx); + ieee80211_link_release_channel(&sdata->deflink); + ieee80211_vif_set_links(sdata, 0, 0); + mutex_unlock(&sdata->local->mtx); + } + + cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss); + kfree(auth_data); + sdata->u.mgd.auth_data = NULL; +} + +enum assoc_status { + ASSOC_SUCCESS, + ASSOC_REJECTED, + ASSOC_TIMEOUT, + ASSOC_ABANDON, +}; + +static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata, + enum assoc_status status) +{ + struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data; + + sdata_assert_lock(sdata); + + if (status != ASSOC_SUCCESS) { + /* + * we are not associated yet, the only timer that could be + * running is the timeout for the association response which + * which is not relevant anymore. + */ + del_timer_sync(&sdata->u.mgd.timer); + sta_info_destroy_addr(sdata, assoc_data->ap_addr); + + sdata->deflink.u.mgd.conn_flags = 0; + eth_zero_addr(sdata->deflink.u.mgd.bssid); + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_BSSID); + sdata->u.mgd.flags = 0; + sdata->vif.bss_conf.mu_mimo_owner = false; + + if (status != ASSOC_REJECTED) { + struct cfg80211_assoc_failure data = { + .timeout = status == ASSOC_TIMEOUT, + }; + int i; + + BUILD_BUG_ON(ARRAY_SIZE(data.bss) != + ARRAY_SIZE(assoc_data->link)); + + for (i = 0; i < ARRAY_SIZE(data.bss); i++) + data.bss[i] = assoc_data->link[i].bss; + + if (ieee80211_vif_is_mld(&sdata->vif)) + data.ap_mld_addr = assoc_data->ap_addr; + + cfg80211_assoc_failure(sdata->dev, &data); + } + + mutex_lock(&sdata->local->mtx); + ieee80211_link_release_channel(&sdata->deflink); + ieee80211_vif_set_links(sdata, 0, 0); + mutex_unlock(&sdata->local->mtx); + } + + kfree(assoc_data); + sdata->u.mgd.assoc_data = NULL; +} + +static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data; + const struct element *challenge; + u8 *pos; + u32 tx_flags = 0; + struct ieee80211_prep_tx_info info = { + .subtype = IEEE80211_STYPE_AUTH, + }; + + pos = mgmt->u.auth.variable; + challenge = cfg80211_find_elem(WLAN_EID_CHALLENGE, pos, + len - (pos - (u8 *)mgmt)); + if (!challenge) + return; + auth_data->expected_transaction = 4; + drv_mgd_prepare_tx(sdata->local, sdata, &info); + if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) + tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | + IEEE80211_TX_INTFL_MLME_CONN_TX; + ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0, + (void *)challenge, + challenge->datalen + sizeof(*challenge), + auth_data->ap_addr, auth_data->ap_addr, + auth_data->key, auth_data->key_len, + auth_data->key_idx, tx_flags); +} + +static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + const u8 *ap_addr = ifmgd->auth_data->ap_addr; + struct sta_info *sta; + bool result = true; + + sdata_info(sdata, "authenticated\n"); + ifmgd->auth_data->done = true; + ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; + ifmgd->auth_data->timeout_started = true; + run_again(sdata, ifmgd->auth_data->timeout); + + /* move station state to auth */ + mutex_lock(&sdata->local->sta_mtx); + sta = sta_info_get(sdata, ap_addr); + if (!sta) { + WARN_ONCE(1, "%s: STA %pM not found", sdata->name, ap_addr); + result = false; + goto out; + } + if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) { + sdata_info(sdata, "failed moving %pM to auth\n", ap_addr); + result = false; + goto out; + } + +out: + mutex_unlock(&sdata->local->sta_mtx); + return result; +} + +static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + u16 auth_alg, auth_transaction, status_code; + struct ieee80211_event event = { + .type = MLME_EVENT, + .u.mlme.data = AUTH_EVENT, + }; + struct ieee80211_prep_tx_info info = { + .subtype = IEEE80211_STYPE_AUTH, + }; + + sdata_assert_lock(sdata); + + if (len < 24 + 6) + return; + + if (!ifmgd->auth_data || ifmgd->auth_data->done) + return; + + if (!ether_addr_equal(ifmgd->auth_data->ap_addr, mgmt->bssid)) + return; + + auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); + auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); + status_code = le16_to_cpu(mgmt->u.auth.status_code); + + if (auth_alg != ifmgd->auth_data->algorithm || + (auth_alg != WLAN_AUTH_SAE && + auth_transaction != ifmgd->auth_data->expected_transaction) || + (auth_alg == WLAN_AUTH_SAE && + (auth_transaction < ifmgd->auth_data->expected_transaction || + auth_transaction > 2))) { + sdata_info(sdata, "%pM unexpected authentication state: alg %d (expected %d) transact %d (expected %d)\n", + mgmt->sa, auth_alg, ifmgd->auth_data->algorithm, + auth_transaction, + ifmgd->auth_data->expected_transaction); + goto notify_driver; + } + + if (status_code != WLAN_STATUS_SUCCESS) { + cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); + + if (auth_alg == WLAN_AUTH_SAE && + (status_code == WLAN_STATUS_ANTI_CLOG_REQUIRED || + (auth_transaction == 1 && + (status_code == WLAN_STATUS_SAE_HASH_TO_ELEMENT || + status_code == WLAN_STATUS_SAE_PK)))) { + /* waiting for userspace now */ + ifmgd->auth_data->waiting = true; + ifmgd->auth_data->timeout = + jiffies + IEEE80211_AUTH_WAIT_SAE_RETRY; + ifmgd->auth_data->timeout_started = true; + run_again(sdata, ifmgd->auth_data->timeout); + goto notify_driver; + } + + sdata_info(sdata, "%pM denied authentication (status %d)\n", + mgmt->sa, status_code); + ieee80211_destroy_auth_data(sdata, false); + event.u.mlme.status = MLME_DENIED; + event.u.mlme.reason = status_code; + drv_event_callback(sdata->local, sdata, &event); + goto notify_driver; + } + + switch (ifmgd->auth_data->algorithm) { + case WLAN_AUTH_OPEN: + case WLAN_AUTH_LEAP: + case WLAN_AUTH_FT: + case WLAN_AUTH_SAE: + case WLAN_AUTH_FILS_SK: + case WLAN_AUTH_FILS_SK_PFS: + case WLAN_AUTH_FILS_PK: + break; + case WLAN_AUTH_SHARED_KEY: + if (ifmgd->auth_data->expected_transaction != 4) { + ieee80211_auth_challenge(sdata, mgmt, len); + /* need another frame */ + return; + } + break; + default: + WARN_ONCE(1, "invalid auth alg %d", + ifmgd->auth_data->algorithm); + goto notify_driver; + } + + event.u.mlme.status = MLME_SUCCESS; + info.success = 1; + drv_event_callback(sdata->local, sdata, &event); + if (ifmgd->auth_data->algorithm != WLAN_AUTH_SAE || + (auth_transaction == 2 && + ifmgd->auth_data->expected_transaction == 2)) { + if (!ieee80211_mark_sta_auth(sdata)) + return; /* ignore frame -- wait for timeout */ + } else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && + auth_transaction == 2) { + sdata_info(sdata, "SAE peer confirmed\n"); + ifmgd->auth_data->peer_confirmed = true; + } + + cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); +notify_driver: + drv_mgd_complete_tx(sdata->local, sdata, &info); +} + +#define case_WLAN(type) \ + case WLAN_REASON_##type: return #type + +const char *ieee80211_get_reason_code_string(u16 reason_code) +{ + switch (reason_code) { + case_WLAN(UNSPECIFIED); + case_WLAN(PREV_AUTH_NOT_VALID); + case_WLAN(DEAUTH_LEAVING); + case_WLAN(DISASSOC_DUE_TO_INACTIVITY); + case_WLAN(DISASSOC_AP_BUSY); + case_WLAN(CLASS2_FRAME_FROM_NONAUTH_STA); + case_WLAN(CLASS3_FRAME_FROM_NONASSOC_STA); + case_WLAN(DISASSOC_STA_HAS_LEFT); + case_WLAN(STA_REQ_ASSOC_WITHOUT_AUTH); + case_WLAN(DISASSOC_BAD_POWER); + case_WLAN(DISASSOC_BAD_SUPP_CHAN); + case_WLAN(INVALID_IE); + case_WLAN(MIC_FAILURE); + case_WLAN(4WAY_HANDSHAKE_TIMEOUT); + case_WLAN(GROUP_KEY_HANDSHAKE_TIMEOUT); + case_WLAN(IE_DIFFERENT); + case_WLAN(INVALID_GROUP_CIPHER); + case_WLAN(INVALID_PAIRWISE_CIPHER); + case_WLAN(INVALID_AKMP); + case_WLAN(UNSUPP_RSN_VERSION); + case_WLAN(INVALID_RSN_IE_CAP); + case_WLAN(IEEE8021X_FAILED); + case_WLAN(CIPHER_SUITE_REJECTED); + case_WLAN(DISASSOC_UNSPECIFIED_QOS); + case_WLAN(DISASSOC_QAP_NO_BANDWIDTH); + case_WLAN(DISASSOC_LOW_ACK); + case_WLAN(DISASSOC_QAP_EXCEED_TXOP); + case_WLAN(QSTA_LEAVE_QBSS); + case_WLAN(QSTA_NOT_USE); + case_WLAN(QSTA_REQUIRE_SETUP); + case_WLAN(QSTA_TIMEOUT); + case_WLAN(QSTA_CIPHER_NOT_SUPP); + case_WLAN(MESH_PEER_CANCELED); + case_WLAN(MESH_MAX_PEERS); + case_WLAN(MESH_CONFIG); + case_WLAN(MESH_CLOSE); + case_WLAN(MESH_MAX_RETRIES); + case_WLAN(MESH_CONFIRM_TIMEOUT); + case_WLAN(MESH_INVALID_GTK); + case_WLAN(MESH_INCONSISTENT_PARAM); + case_WLAN(MESH_INVALID_SECURITY); + case_WLAN(MESH_PATH_ERROR); + case_WLAN(MESH_PATH_NOFORWARD); + case_WLAN(MESH_PATH_DEST_UNREACHABLE); + case_WLAN(MAC_EXISTS_IN_MBSS); + case_WLAN(MESH_CHAN_REGULATORY); + case_WLAN(MESH_CHAN); + default: return "<unknown>"; + } +} + +static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); + + sdata_assert_lock(sdata); + + if (len < 24 + 2) + return; + + if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) { + ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code); + return; + } + + if (ifmgd->associated && + ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) { + sdata_info(sdata, "deauthenticated from %pM (Reason: %u=%s)\n", + sdata->vif.cfg.ap_addr, reason_code, + ieee80211_get_reason_code_string(reason_code)); + + ieee80211_set_disassoc(sdata, 0, 0, false, NULL); + + ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, + reason_code, false); + return; + } + + if (ifmgd->assoc_data && + ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->ap_addr)) { + sdata_info(sdata, + "deauthenticated from %pM while associating (Reason: %u=%s)\n", + ifmgd->assoc_data->ap_addr, reason_code, + ieee80211_get_reason_code_string(reason_code)); + + ieee80211_destroy_assoc_data(sdata, ASSOC_ABANDON); + + cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); + return; + } +} + + +static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + u16 reason_code; + + sdata_assert_lock(sdata); + + if (len < 24 + 2) + return; + + if (!ifmgd->associated || + !ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) + return; + + reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); + + if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) { + ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code); + return; + } + + sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n", + sdata->vif.cfg.ap_addr, reason_code, + ieee80211_get_reason_code_string(reason_code)); + + ieee80211_set_disassoc(sdata, 0, 0, false, NULL); + + ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code, + false); +} + +static void ieee80211_get_rates(struct ieee80211_supported_band *sband, + u8 *supp_rates, unsigned int supp_rates_len, + u32 *rates, u32 *basic_rates, + bool *have_higher_than_11mbit, + int *min_rate, int *min_rate_index, + int shift) +{ + int i, j; + + for (i = 0; i < supp_rates_len; i++) { + int rate = supp_rates[i] & 0x7f; + bool is_basic = !!(supp_rates[i] & 0x80); + + if ((rate * 5 * (1 << shift)) > 110) + *have_higher_than_11mbit = true; + + /* + * Skip HT, VHT, HE, EHT and SAE H2E only BSS membership + * selectors since they're not rates. + * + * Note: Even though the membership selector and the basic + * rate flag share the same bit, they are not exactly + * the same. + */ + if (supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HT_PHY) || + supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_VHT_PHY) || + supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HE_PHY) || + supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_EHT_PHY) || + supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_SAE_H2E)) + continue; + + for (j = 0; j < sband->n_bitrates; j++) { + struct ieee80211_rate *br; + int brate; + + br = &sband->bitrates[j]; + + brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5); + if (brate == rate) { + *rates |= BIT(j); + if (is_basic) + *basic_rates |= BIT(j); + if ((rate * 5) < *min_rate) { + *min_rate = rate * 5; + *min_rate_index = j; + } + break; + } + } + } +} + +static bool ieee80211_twt_req_supported(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const struct link_sta_info *link_sta, + const struct ieee802_11_elems *elems) +{ + const struct ieee80211_sta_he_cap *own_he_cap = + ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); + + if (elems->ext_capab_len < 10) + return false; + + if (!(elems->ext_capab[9] & WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT)) + return false; + + return link_sta->pub->he_cap.he_cap_elem.mac_cap_info[0] & + IEEE80211_HE_MAC_CAP0_TWT_RES && + own_he_cap && + (own_he_cap->he_cap_elem.mac_cap_info[0] & + IEEE80211_HE_MAC_CAP0_TWT_REQ); +} + +static u64 ieee80211_recalc_twt_req(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + struct ieee80211_link_data *link, + struct link_sta_info *link_sta, + struct ieee802_11_elems *elems) +{ + bool twt = ieee80211_twt_req_supported(sdata, sband, link_sta, elems); + + if (link->conf->twt_requester != twt) { + link->conf->twt_requester = twt; + return BSS_CHANGED_TWT; + } + return 0; +} + +static bool ieee80211_twt_bcast_support(struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *bss_conf, + struct ieee80211_supported_band *sband, + struct link_sta_info *link_sta) +{ + const struct ieee80211_sta_he_cap *own_he_cap = + ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); + + return bss_conf->he_support && + (link_sta->pub->he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_BCAST_TWT) && + own_he_cap && + (own_he_cap->he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_BCAST_TWT); +} + +static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, + struct link_sta_info *link_sta, + struct cfg80211_bss *cbss, + struct ieee80211_mgmt *mgmt, + const u8 *elem_start, + unsigned int elem_len, + u64 *changed) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data; + struct ieee80211_bss_conf *bss_conf = link->conf; + struct ieee80211_local *local = sdata->local; + unsigned int link_id = link->link_id; + struct ieee80211_elems_parse_params parse_params = { + .start = elem_start, + .len = elem_len, + .link_id = link_id == assoc_data->assoc_link_id ? -1 : link_id, + .from_ap = true, + }; + bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ; + bool is_s1g = cbss->channel->band == NL80211_BAND_S1GHZ; + const struct cfg80211_bss_ies *bss_ies = NULL; + struct ieee80211_supported_band *sband; + struct ieee802_11_elems *elems; + const __le16 prof_bss_param_ch_present = + cpu_to_le16(IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT); + u16 capab_info; + bool ret; + + elems = ieee802_11_parse_elems_full(&parse_params); + if (!elems) + return false; + + if (link_id == assoc_data->assoc_link_id) { + capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); + + /* + * we should not get to this flow unless the association was + * successful, so set the status directly to success + */ + assoc_data->link[link_id].status = WLAN_STATUS_SUCCESS; + if (elems->ml_basic) { + if (!(elems->ml_basic->control & + cpu_to_le16(IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))) { + ret = false; + goto out; + } + link->u.mgd.bss_param_ch_cnt = + ieee80211_mle_get_bss_param_ch_cnt(elems->ml_basic); + } + } else if (!elems->prof || + !(elems->prof->control & prof_bss_param_ch_present)) { + ret = false; + goto out; + } else { + const u8 *ptr = elems->prof->variable + + elems->prof->sta_info_len - 1; + + /* + * During parsing, we validated that these fields exist, + * otherwise elems->prof would have been set to NULL. + */ + capab_info = get_unaligned_le16(ptr); + assoc_data->link[link_id].status = get_unaligned_le16(ptr + 2); + link->u.mgd.bss_param_ch_cnt = + ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(elems->prof); + + if (assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) { + link_info(link, "association response status code=%u\n", + assoc_data->link[link_id].status); + ret = true; + goto out; + } + } + + if (!is_s1g && !elems->supp_rates) { + sdata_info(sdata, "no SuppRates element in AssocResp\n"); + ret = false; + goto out; + } + + link->u.mgd.tdls_chan_switch_prohibited = + elems->ext_capab && elems->ext_capab_len >= 5 && + (elems->ext_capab[4] & WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED); + + /* + * Some APs are erroneously not including some information in their + * (re)association response frames. Try to recover by using the data + * from the beacon or probe response. This seems to afflict mobile + * 2G/3G/4G wifi routers, reported models include the "Onda PN51T", + * "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device. + */ + if (!is_6ghz && + ((assoc_data->wmm && !elems->wmm_param) || + (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT) && + (!elems->ht_cap_elem || !elems->ht_operation)) || + (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT) && + (!elems->vht_cap_elem || !elems->vht_operation)))) { + const struct cfg80211_bss_ies *ies; + struct ieee802_11_elems *bss_elems; + + rcu_read_lock(); + ies = rcu_dereference(cbss->ies); + if (ies) + bss_ies = kmemdup(ies, sizeof(*ies) + ies->len, + GFP_ATOMIC); + rcu_read_unlock(); + if (!bss_ies) { + ret = false; + goto out; + } + + parse_params.start = bss_ies->data; + parse_params.len = bss_ies->len; + parse_params.bss = cbss; + bss_elems = ieee802_11_parse_elems_full(&parse_params); + if (!bss_elems) { + ret = false; + goto out; + } + + if (assoc_data->wmm && + !elems->wmm_param && bss_elems->wmm_param) { + elems->wmm_param = bss_elems->wmm_param; + sdata_info(sdata, + "AP bug: WMM param missing from AssocResp\n"); + } + + /* + * Also check if we requested HT/VHT, otherwise the AP doesn't + * have to include the IEs in the (re)association response. + */ + if (!elems->ht_cap_elem && bss_elems->ht_cap_elem && + !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)) { + elems->ht_cap_elem = bss_elems->ht_cap_elem; + sdata_info(sdata, + "AP bug: HT capability missing from AssocResp\n"); + } + if (!elems->ht_operation && bss_elems->ht_operation && + !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)) { + elems->ht_operation = bss_elems->ht_operation; + sdata_info(sdata, + "AP bug: HT operation missing from AssocResp\n"); + } + if (!elems->vht_cap_elem && bss_elems->vht_cap_elem && + !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) { + elems->vht_cap_elem = bss_elems->vht_cap_elem; + sdata_info(sdata, + "AP bug: VHT capa missing from AssocResp\n"); + } + if (!elems->vht_operation && bss_elems->vht_operation && + !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) { + elems->vht_operation = bss_elems->vht_operation; + sdata_info(sdata, + "AP bug: VHT operation missing from AssocResp\n"); + } + + kfree(bss_elems); + } + + /* + * We previously checked these in the beacon/probe response, so + * they should be present here. This is just a safety net. + */ + if (!is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT) && + (!elems->wmm_param || !elems->ht_cap_elem || !elems->ht_operation)) { + sdata_info(sdata, + "HT AP is missing WMM params or HT capability/operation\n"); + ret = false; + goto out; + } + + if (!is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT) && + (!elems->vht_cap_elem || !elems->vht_operation)) { + sdata_info(sdata, + "VHT AP is missing VHT capability/operation\n"); + ret = false; + goto out; + } + + if (is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) && + !elems->he_6ghz_capa) { + sdata_info(sdata, + "HE 6 GHz AP is missing HE 6 GHz band capability\n"); + ret = false; + goto out; + } + + if (WARN_ON(!link->conf->chandef.chan)) { + ret = false; + goto out; + } + sband = local->hw.wiphy->bands[link->conf->chandef.chan->band]; + + if (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) && + (!elems->he_cap || !elems->he_operation)) { + sdata_info(sdata, + "HE AP is missing HE capability/operation\n"); + ret = false; + goto out; + } + + /* Set up internal HT/VHT capabilities */ + if (elems->ht_cap_elem && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)) + ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, + elems->ht_cap_elem, + link_sta); + + if (elems->vht_cap_elem && + !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) { + const struct ieee80211_vht_cap *bss_vht_cap = NULL; + const struct cfg80211_bss_ies *ies; + + /* + * Cisco AP module 9115 with FW 17.3 has a bug and sends a + * too large maximum MPDU length in the association response + * (indicating 12k) that it cannot actually process ... + * Work around that. + */ + rcu_read_lock(); + ies = rcu_dereference(cbss->ies); + if (ies) { + const struct element *elem; + + elem = cfg80211_find_elem(WLAN_EID_VHT_CAPABILITY, + ies->data, ies->len); + if (elem && elem->datalen >= sizeof(*bss_vht_cap)) + bss_vht_cap = (const void *)elem->data; + } + + ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, + elems->vht_cap_elem, + bss_vht_cap, link_sta); + rcu_read_unlock(); + } + + if (elems->he_operation && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) && + elems->he_cap) { + ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, + elems->he_cap, + elems->he_cap_len, + elems->he_6ghz_capa, + link_sta); + + bss_conf->he_support = link_sta->pub->he_cap.has_he; + if (elems->rsnx && elems->rsnx_len && + (elems->rsnx[0] & WLAN_RSNX_CAPA_PROTECTED_TWT) && + wiphy_ext_feature_isset(local->hw.wiphy, + NL80211_EXT_FEATURE_PROTECTED_TWT)) + bss_conf->twt_protected = true; + else + bss_conf->twt_protected = false; + + *changed |= ieee80211_recalc_twt_req(sdata, sband, link, + link_sta, elems); + + if (elems->eht_operation && elems->eht_cap && + !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)) { + ieee80211_eht_cap_ie_to_sta_eht_cap(sdata, sband, + elems->he_cap, + elems->he_cap_len, + elems->eht_cap, + elems->eht_cap_len, + link_sta); + + bss_conf->eht_support = link_sta->pub->eht_cap.has_eht; + *changed |= BSS_CHANGED_EHT_PUNCTURING; + } else { + bss_conf->eht_support = false; + } + } else { + bss_conf->he_support = false; + bss_conf->twt_requester = false; + bss_conf->twt_protected = false; + bss_conf->eht_support = false; + } + + bss_conf->twt_broadcast = + ieee80211_twt_bcast_support(sdata, bss_conf, sband, link_sta); + + if (bss_conf->he_support) { + bss_conf->he_bss_color.color = + le32_get_bits(elems->he_operation->he_oper_params, + IEEE80211_HE_OPERATION_BSS_COLOR_MASK); + bss_conf->he_bss_color.partial = + le32_get_bits(elems->he_operation->he_oper_params, + IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR); + bss_conf->he_bss_color.enabled = + !le32_get_bits(elems->he_operation->he_oper_params, + IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED); + + if (bss_conf->he_bss_color.enabled) + *changed |= BSS_CHANGED_HE_BSS_COLOR; + + bss_conf->htc_trig_based_pkt_ext = + le32_get_bits(elems->he_operation->he_oper_params, + IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK); + bss_conf->frame_time_rts_th = + le32_get_bits(elems->he_operation->he_oper_params, + IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK); + + bss_conf->uora_exists = !!elems->uora_element; + if (elems->uora_element) + bss_conf->uora_ocw_range = elems->uora_element[0]; + + ieee80211_he_op_ie_to_bss_conf(&sdata->vif, elems->he_operation); + ieee80211_he_spr_ie_to_bss_conf(&sdata->vif, elems->he_spr); + /* TODO: OPEN: what happens if BSS color disable is set? */ + } + + if (cbss->transmitted_bss) { + bss_conf->nontransmitted = true; + ether_addr_copy(bss_conf->transmitter_bssid, + cbss->transmitted_bss->bssid); + bss_conf->bssid_indicator = cbss->max_bssid_indicator; + bss_conf->bssid_index = cbss->bssid_index; + } + + /* + * Some APs, e.g. Netgear WNDR3700, report invalid HT operation data + * in their association response, so ignore that data for our own + * configuration. If it changed since the last beacon, we'll get the + * next beacon and update then. + */ + + /* + * If an operating mode notification IE is present, override the + * NSS calculation (that would be done in rate_control_rate_init()) + * and use the # of streams from that element. + */ + if (elems->opmode_notif && + !(*elems->opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)) { + u8 nss; + + nss = *elems->opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK; + nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; + nss += 1; + link_sta->pub->rx_nss = nss; + } + + /* + * Always handle WMM once after association regardless + * of the first value the AP uses. Setting -1 here has + * that effect because the AP values is an unsigned + * 4-bit value. + */ + link->u.mgd.wmm_last_param_set = -1; + link->u.mgd.mu_edca_last_param_set = -1; + + if (link->u.mgd.disable_wmm_tracking) { + ieee80211_set_wmm_default(link, false, false); + } else if (!ieee80211_sta_wmm_params(local, link, elems->wmm_param, + elems->wmm_param_len, + elems->mu_edca_param_set)) { + /* still enable QoS since we might have HT/VHT */ + ieee80211_set_wmm_default(link, false, true); + /* disable WMM tracking in this case to disable + * tracking WMM parameter changes in the beacon if + * the parameters weren't actually valid. Doing so + * avoids changing parameters very strangely when + * the AP is going back and forth between valid and + * invalid parameters. + */ + link->u.mgd.disable_wmm_tracking = true; + } + + if (elems->max_idle_period_ie) { + bss_conf->max_idle_period = + le16_to_cpu(elems->max_idle_period_ie->max_idle_period); + bss_conf->protected_keep_alive = + !!(elems->max_idle_period_ie->idle_options & + WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE); + *changed |= BSS_CHANGED_KEEP_ALIVE; + } else { + bss_conf->max_idle_period = 0; + bss_conf->protected_keep_alive = false; + } + + /* set assoc capability (AID was already set earlier), + * ieee80211_set_associated() will tell the driver */ + bss_conf->assoc_capability = capab_info; + + ret = true; +out: + kfree(elems); + kfree(bss_ies); + return ret; +} + +static int ieee80211_mgd_setup_link_sta(struct ieee80211_link_data *link, + struct sta_info *sta, + struct link_sta_info *link_sta, + struct cfg80211_bss *cbss) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_bss *bss = (void *)cbss->priv; + u32 rates = 0, basic_rates = 0; + bool have_higher_than_11mbit = false; + int min_rate = INT_MAX, min_rate_index = -1; + /* this is clearly wrong for MLO but we'll just remove it later */ + int shift = ieee80211_vif_get_shift(&sdata->vif); + struct ieee80211_supported_band *sband; + + memcpy(link_sta->addr, cbss->bssid, ETH_ALEN); + memcpy(link_sta->pub->addr, cbss->bssid, ETH_ALEN); + + /* TODO: S1G Basic Rate Set is expressed elsewhere */ + if (cbss->channel->band == NL80211_BAND_S1GHZ) { + ieee80211_s1g_sta_rate_init(sta); + return 0; + } + + sband = local->hw.wiphy->bands[cbss->channel->band]; + + ieee80211_get_rates(sband, bss->supp_rates, bss->supp_rates_len, + &rates, &basic_rates, &have_higher_than_11mbit, + &min_rate, &min_rate_index, shift); + + /* + * This used to be a workaround for basic rates missing + * in the association response frame. Now that we no + * longer use the basic rates from there, it probably + * doesn't happen any more, but keep the workaround so + * in case some *other* APs are buggy in different ways + * we can connect -- with a warning. + * Allow this workaround only in case the AP provided at least + * one rate. + */ + if (min_rate_index < 0) { + link_info(link, "No legacy rates in association response\n"); + return -EINVAL; + } else if (!basic_rates) { + link_info(link, "No basic rates, using min rate instead\n"); + basic_rates = BIT(min_rate_index); + } + + if (rates) + link_sta->pub->supp_rates[cbss->channel->band] = rates; + else + link_info(link, "No rates found, keeping mandatory only\n"); + + link->conf->basic_rates = basic_rates; + + /* cf. IEEE 802.11 9.2.12 */ + link->operating_11g_mode = sband->band == NL80211_BAND_2GHZ && + have_higher_than_11mbit; + + return 0; +} + +static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link, + struct cfg80211_bss *cbss) +{ + struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp; + const struct element *ht_cap_elem, *vht_cap_elem; + const struct cfg80211_bss_ies *ies; + const struct ieee80211_ht_cap *ht_cap; + const struct ieee80211_vht_cap *vht_cap; + const struct ieee80211_he_cap_elem *he_cap; + const struct element *he_cap_elem; + u16 mcs_80_map, mcs_160_map; + int i, mcs_nss_size; + bool support_160; + u8 chains = 1; + + if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT) + return chains; + + ht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_CAPABILITY); + if (ht_cap_elem && ht_cap_elem->datalen >= sizeof(*ht_cap)) { + ht_cap = (void *)ht_cap_elem->data; + chains = ieee80211_mcs_to_chains(&ht_cap->mcs); + /* + * TODO: use "Tx Maximum Number Spatial Streams Supported" and + * "Tx Unequal Modulation Supported" fields. + */ + } + + if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT) + return chains; + + vht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY); + if (vht_cap_elem && vht_cap_elem->datalen >= sizeof(*vht_cap)) { + u8 nss; + u16 tx_mcs_map; + + vht_cap = (void *)vht_cap_elem->data; + tx_mcs_map = le16_to_cpu(vht_cap->supp_mcs.tx_mcs_map); + for (nss = 8; nss > 0; nss--) { + if (((tx_mcs_map >> (2 * (nss - 1))) & 3) != + IEEE80211_VHT_MCS_NOT_SUPPORTED) + break; + } + /* TODO: use "Tx Highest Supported Long GI Data Rate" field? */ + chains = max(chains, nss); + } + + if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) + return chains; + + ies = rcu_dereference(cbss->ies); + he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, + ies->data, ies->len); + + if (!he_cap_elem || he_cap_elem->datalen < sizeof(*he_cap)) + return chains; + + /* skip one byte ext_tag_id */ + he_cap = (void *)(he_cap_elem->data + 1); + mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap); + + /* invalid HE IE */ + if (he_cap_elem->datalen < 1 + mcs_nss_size + sizeof(*he_cap)) + return chains; + + /* mcs_nss is right after he_cap info */ + he_mcs_nss_supp = (void *)(he_cap + 1); + + mcs_80_map = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80); + + for (i = 7; i >= 0; i--) { + u8 mcs_80 = mcs_80_map >> (2 * i) & 3; + + if (mcs_80 != IEEE80211_VHT_MCS_NOT_SUPPORTED) { + chains = max_t(u8, chains, i + 1); + break; + } + } + + support_160 = he_cap->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; + + if (!support_160) + return chains; + + mcs_160_map = le16_to_cpu(he_mcs_nss_supp->tx_mcs_160); + for (i = 7; i >= 0; i--) { + u8 mcs_160 = mcs_160_map >> (2 * i) & 3; + + if (mcs_160 != IEEE80211_VHT_MCS_NOT_SUPPORTED) { + chains = max_t(u8, chains, i + 1); + break; + } + } + + return chains; +} + +static bool +ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata, + const struct cfg80211_bss_ies *ies, + const struct ieee80211_he_operation *he_op) +{ + const struct element *he_cap_elem; + const struct ieee80211_he_cap_elem *he_cap; + struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp; + u16 mcs_80_map_tx, mcs_80_map_rx; + u16 ap_min_req_set; + int mcs_nss_size; + int nss; + + he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, + ies->data, ies->len); + + if (!he_cap_elem) + return false; + + /* invalid HE IE */ + if (he_cap_elem->datalen < 1 + sizeof(*he_cap)) { + sdata_info(sdata, + "Invalid HE elem, Disable HE\n"); + return false; + } + + /* skip one byte ext_tag_id */ + he_cap = (void *)(he_cap_elem->data + 1); + mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap); + + /* invalid HE IE */ + if (he_cap_elem->datalen < 1 + sizeof(*he_cap) + mcs_nss_size) { + sdata_info(sdata, + "Invalid HE elem with nss size, Disable HE\n"); + return false; + } + + /* mcs_nss is right after he_cap info */ + he_mcs_nss_supp = (void *)(he_cap + 1); + + mcs_80_map_tx = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80); + mcs_80_map_rx = le16_to_cpu(he_mcs_nss_supp->rx_mcs_80); + + /* P802.11-REVme/D0.3 + * 27.1.1 Introduction to the HE PHY + * ... + * An HE STA shall support the following features: + * ... + * Single spatial stream HE-MCSs 0 to 7 (transmit and receive) in all + * supported channel widths for HE SU PPDUs + */ + if ((mcs_80_map_tx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED || + (mcs_80_map_rx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED) { + sdata_info(sdata, + "Missing mandatory rates for 1 Nss, rx 0x%x, tx 0x%x, disable HE\n", + mcs_80_map_tx, mcs_80_map_rx); + return false; + } + + if (!he_op) + return true; + + ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set); + + /* + * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all + * zeroes, which is nonsense, and completely inconsistent with itself + * (it doesn't have 8 streams). Accept the settings in this case anyway. + */ + if (!ap_min_req_set) + return true; + + /* make sure the AP is consistent with itself + * + * P802.11-REVme/D0.3 + * 26.17.1 Basic HE BSS operation + * + * A STA that is operating in an HE BSS shall be able to receive and + * transmit at each of the <HE-MCS, NSS> tuple values indicated by the + * Basic HE-MCS And NSS Set field of the HE Operation parameter of the + * MLME-START.request primitive and shall be able to receive at each of + * the <HE-MCS, NSS> tuple values indicated by the Supported HE-MCS and + * NSS Set field in the HE Capabilities parameter of the MLMESTART.request + * primitive + */ + for (nss = 8; nss > 0; nss--) { + u8 ap_op_val = (ap_min_req_set >> (2 * (nss - 1))) & 3; + u8 ap_rx_val; + u8 ap_tx_val; + + if (ap_op_val == IEEE80211_HE_MCS_NOT_SUPPORTED) + continue; + + ap_rx_val = (mcs_80_map_rx >> (2 * (nss - 1))) & 3; + ap_tx_val = (mcs_80_map_tx >> (2 * (nss - 1))) & 3; + + if (ap_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || + ap_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || + ap_rx_val < ap_op_val || ap_tx_val < ap_op_val) { + sdata_info(sdata, + "Invalid rates for %d Nss, rx %d, tx %d oper %d, disable HE\n", + nss, ap_rx_val, ap_rx_val, ap_op_val); + return false; + } + } + + return true; +} + +static bool +ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const struct ieee80211_he_operation *he_op) +{ + const struct ieee80211_sta_he_cap *sta_he_cap = + ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); + u16 ap_min_req_set; + int i; + + if (!sta_he_cap || !he_op) + return false; + + ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set); + + /* + * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all + * zeroes, which is nonsense, and completely inconsistent with itself + * (it doesn't have 8 streams). Accept the settings in this case anyway. + */ + if (!ap_min_req_set) + return true; + + /* Need to go over for 80MHz, 160MHz and for 80+80 */ + for (i = 0; i < 3; i++) { + const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp = + &sta_he_cap->he_mcs_nss_supp; + u16 sta_mcs_map_rx = + le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]); + u16 sta_mcs_map_tx = + le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]); + u8 nss; + bool verified = true; + + /* + * For each band there is a maximum of 8 spatial streams + * possible. Each of the sta_mcs_map_* is a 16-bit struct built + * of 2 bits per NSS (1-8), with the values defined in enum + * ieee80211_he_mcs_support. Need to make sure STA TX and RX + * capabilities aren't less than the AP's minimum requirements + * for this HE BSS per SS. + * It is enough to find one such band that meets the reqs. + */ + for (nss = 8; nss > 0; nss--) { + u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3; + u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3; + u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3; + + if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED) + continue; + + /* + * Make sure the HE AP doesn't require MCSs that aren't + * supported by the client as required by spec + * + * P802.11-REVme/D0.3 + * 26.17.1 Basic HE BSS operation + * + * An HE STA shall not attempt to join * (MLME-JOIN.request primitive) + * a BSS, unless it supports (i.e., is able to both transmit and + * receive using) all of the <HE-MCS, NSS> tuples in the basic + * HE-MCS and NSS set. + */ + if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || + sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || + (ap_val > sta_rx_val) || (ap_val > sta_tx_val)) { + verified = false; + break; + } + } + + if (verified) + return true; + } + + /* If here, STA doesn't meet AP's HE min requirements */ + return false; +} + +static u8 +ieee80211_get_eht_cap_mcs_nss(const struct ieee80211_sta_he_cap *sta_he_cap, + const struct ieee80211_sta_eht_cap *sta_eht_cap, + unsigned int idx, int bw) +{ + u8 he_phy_cap0 = sta_he_cap->he_cap_elem.phy_cap_info[0]; + u8 eht_phy_cap0 = sta_eht_cap->eht_cap_elem.phy_cap_info[0]; + + /* handle us being a 20 MHz-only EHT STA - with four values + * for MCS 0-7, 8-9, 10-11, 12-13. + */ + if (!(he_phy_cap0 & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) + return sta_eht_cap->eht_mcs_nss_supp.only_20mhz.rx_tx_max_nss[idx]; + + /* the others have MCS 0-9 together, rather than separately from 0-7 */ + if (idx > 0) + idx--; + + switch (bw) { + case 0: + return sta_eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_max_nss[idx]; + case 1: + if (!(he_phy_cap0 & + (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G))) + return 0xff; /* pass check */ + return sta_eht_cap->eht_mcs_nss_supp.bw._160.rx_tx_max_nss[idx]; + case 2: + if (!(eht_phy_cap0 & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)) + return 0xff; /* pass check */ + return sta_eht_cap->eht_mcs_nss_supp.bw._320.rx_tx_max_nss[idx]; + } + + WARN_ON(1); + return 0; +} + +static bool +ieee80211_verify_sta_eht_mcs_support(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const struct ieee80211_eht_operation *eht_op) +{ + const struct ieee80211_sta_he_cap *sta_he_cap = + ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); + const struct ieee80211_sta_eht_cap *sta_eht_cap = + ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif); + const struct ieee80211_eht_mcs_nss_supp_20mhz_only *req; + unsigned int i; + + if (!sta_he_cap || !sta_eht_cap || !eht_op) + return false; + + req = &eht_op->basic_mcs_nss; + + for (i = 0; i < ARRAY_SIZE(req->rx_tx_max_nss); i++) { + u8 req_rx_nss, req_tx_nss; + unsigned int bw; + + req_rx_nss = u8_get_bits(req->rx_tx_max_nss[i], + IEEE80211_EHT_MCS_NSS_RX); + req_tx_nss = u8_get_bits(req->rx_tx_max_nss[i], + IEEE80211_EHT_MCS_NSS_TX); + + for (bw = 0; bw < 3; bw++) { + u8 have, have_rx_nss, have_tx_nss; + + have = ieee80211_get_eht_cap_mcs_nss(sta_he_cap, + sta_eht_cap, + i, bw); + have_rx_nss = u8_get_bits(have, + IEEE80211_EHT_MCS_NSS_RX); + have_tx_nss = u8_get_bits(have, + IEEE80211_EHT_MCS_NSS_TX); + + if (req_rx_nss > have_rx_nss || + req_tx_nss > have_tx_nss) + return false; + } + } + + return true; +} + +static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link, + struct cfg80211_bss *cbss, + ieee80211_conn_flags_t *conn_flags) +{ + struct ieee80211_local *local = sdata->local; + const struct ieee80211_ht_cap *ht_cap = NULL; + const struct ieee80211_ht_operation *ht_oper = NULL; + const struct ieee80211_vht_operation *vht_oper = NULL; + const struct ieee80211_he_operation *he_oper = NULL; + const struct ieee80211_eht_operation *eht_oper = NULL; + const struct ieee80211_s1g_oper_ie *s1g_oper = NULL; + struct ieee80211_supported_band *sband; + struct cfg80211_chan_def chandef; + bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ; + bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ; + struct ieee80211_bss *bss = (void *)cbss->priv; + struct ieee80211_elems_parse_params parse_params = { + .link_id = -1, + .from_ap = true, + }; + struct ieee802_11_elems *elems; + const struct cfg80211_bss_ies *ies; + int ret; + u32 i; + bool have_80mhz; + + rcu_read_lock(); + + ies = rcu_dereference(cbss->ies); + parse_params.start = ies->data; + parse_params.len = ies->len; + elems = ieee802_11_parse_elems_full(&parse_params); + if (!elems) { + rcu_read_unlock(); + return -ENOMEM; + } + + sband = local->hw.wiphy->bands[cbss->channel->band]; + + *conn_flags &= ~(IEEE80211_CONN_DISABLE_40MHZ | + IEEE80211_CONN_DISABLE_80P80MHZ | + IEEE80211_CONN_DISABLE_160MHZ); + + /* disable HT/VHT/HE if we don't support them */ + if (!sband->ht_cap.ht_supported && !is_6ghz) { + mlme_dbg(sdata, "HT not supported, disabling HT/VHT/HE/EHT\n"); + *conn_flags |= IEEE80211_CONN_DISABLE_HT; + *conn_flags |= IEEE80211_CONN_DISABLE_VHT; + *conn_flags |= IEEE80211_CONN_DISABLE_HE; + *conn_flags |= IEEE80211_CONN_DISABLE_EHT; + } + + if (!sband->vht_cap.vht_supported && is_5ghz) { + mlme_dbg(sdata, "VHT not supported, disabling VHT/HE/EHT\n"); + *conn_flags |= IEEE80211_CONN_DISABLE_VHT; + *conn_flags |= IEEE80211_CONN_DISABLE_HE; + *conn_flags |= IEEE80211_CONN_DISABLE_EHT; + } + + if (!ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) { + mlme_dbg(sdata, "HE not supported, disabling HE and EHT\n"); + *conn_flags |= IEEE80211_CONN_DISABLE_HE; + *conn_flags |= IEEE80211_CONN_DISABLE_EHT; + } + + if (!ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif)) { + mlme_dbg(sdata, "EHT not supported, disabling EHT\n"); + *conn_flags |= IEEE80211_CONN_DISABLE_EHT; + } + + if (!(*conn_flags & IEEE80211_CONN_DISABLE_HT) && !is_6ghz) { + ht_oper = elems->ht_operation; + ht_cap = elems->ht_cap_elem; + + if (!ht_cap) { + *conn_flags |= IEEE80211_CONN_DISABLE_HT; + ht_oper = NULL; + } + } + + if (!(*conn_flags & IEEE80211_CONN_DISABLE_VHT) && !is_6ghz) { + vht_oper = elems->vht_operation; + if (vht_oper && !ht_oper) { + vht_oper = NULL; + sdata_info(sdata, + "AP advertised VHT without HT, disabling HT/VHT/HE\n"); + *conn_flags |= IEEE80211_CONN_DISABLE_HT; + *conn_flags |= IEEE80211_CONN_DISABLE_VHT; + *conn_flags |= IEEE80211_CONN_DISABLE_HE; + *conn_flags |= IEEE80211_CONN_DISABLE_EHT; + } + + if (!elems->vht_cap_elem) { + *conn_flags |= IEEE80211_CONN_DISABLE_VHT; + vht_oper = NULL; + } + } + + if (!(*conn_flags & IEEE80211_CONN_DISABLE_HE)) { + he_oper = elems->he_operation; + + if (link && is_6ghz) { + struct ieee80211_bss_conf *bss_conf; + u8 j = 0; + + bss_conf = link->conf; + + if (elems->pwr_constr_elem) + bss_conf->pwr_reduction = *elems->pwr_constr_elem; + + BUILD_BUG_ON(ARRAY_SIZE(bss_conf->tx_pwr_env) != + ARRAY_SIZE(elems->tx_pwr_env)); + + for (i = 0; i < elems->tx_pwr_env_num; i++) { + if (elems->tx_pwr_env_len[i] > + sizeof(bss_conf->tx_pwr_env[j])) + continue; + + bss_conf->tx_pwr_env_num++; + memcpy(&bss_conf->tx_pwr_env[j], elems->tx_pwr_env[i], + elems->tx_pwr_env_len[i]); + j++; + } + } + + if (!ieee80211_verify_peer_he_mcs_support(sdata, ies, he_oper) || + !ieee80211_verify_sta_he_mcs_support(sdata, sband, he_oper)) + *conn_flags |= IEEE80211_CONN_DISABLE_HE | + IEEE80211_CONN_DISABLE_EHT; + } + + /* + * EHT requires HE to be supported as well. Specifically for 6 GHz + * channels, the operation channel information can only be deduced from + * both the 6 GHz operation information (from the HE operation IE) and + * EHT operation. + */ + if (!(*conn_flags & + (IEEE80211_CONN_DISABLE_HE | + IEEE80211_CONN_DISABLE_EHT)) && + he_oper) { + const struct cfg80211_bss_ies *cbss_ies; + const struct element *eht_ml_elem; + const u8 *eht_oper_ie; + + cbss_ies = rcu_dereference(cbss->ies); + eht_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_EHT_OPERATION, + cbss_ies->data, cbss_ies->len); + if (eht_oper_ie && eht_oper_ie[1] >= + 1 + sizeof(struct ieee80211_eht_operation)) + eht_oper = (void *)(eht_oper_ie + 3); + else + eht_oper = NULL; + + if (!ieee80211_verify_sta_eht_mcs_support(sdata, sband, eht_oper)) + *conn_flags |= IEEE80211_CONN_DISABLE_EHT; + + eht_ml_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK, + cbss_ies->data, cbss_ies->len); + + /* data + 1 / datalen - 1 since it's an extended element */ + if (!(*conn_flags & IEEE80211_CONN_DISABLE_EHT) && + eht_ml_elem && + ieee80211_mle_type_ok(eht_ml_elem->data + 1, + IEEE80211_ML_CONTROL_TYPE_BASIC, + eht_ml_elem->datalen - 1)) { + sdata->vif.cfg.eml_cap = + ieee80211_mle_get_eml_cap(eht_ml_elem->data + 1); + sdata->vif.cfg.eml_med_sync_delay = + ieee80211_mle_get_eml_med_sync_delay(eht_ml_elem->data + 1); + } + } + + /* Allow VHT if at least one channel on the sband supports 80 MHz */ + have_80mhz = false; + for (i = 0; i < sband->n_channels; i++) { + if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED | + IEEE80211_CHAN_NO_80MHZ)) + continue; + + have_80mhz = true; + break; + } + + if (!have_80mhz) { + sdata_info(sdata, "80 MHz not supported, disabling VHT\n"); + *conn_flags |= IEEE80211_CONN_DISABLE_VHT; + } + + if (sband->band == NL80211_BAND_S1GHZ) { + s1g_oper = elems->s1g_oper; + if (!s1g_oper) + sdata_info(sdata, + "AP missing S1G operation element?\n"); + } + + *conn_flags |= + ieee80211_determine_chantype(sdata, link, *conn_flags, + sband, + cbss->channel, + bss->vht_cap_info, + ht_oper, vht_oper, + he_oper, eht_oper, + s1g_oper, + &chandef, false); + + if (link) + link->needed_rx_chains = + min(ieee80211_max_rx_chains(link, cbss), + local->rx_chains); + + rcu_read_unlock(); + /* the element data was RCU protected so no longer valid anyway */ + kfree(elems); + elems = NULL; + + if (*conn_flags & IEEE80211_CONN_DISABLE_HE && is_6ghz) { + sdata_info(sdata, "Rejecting non-HE 6/7 GHz connection"); + return -EINVAL; + } + + if (!link) + return 0; + + /* will change later if needed */ + link->smps_mode = IEEE80211_SMPS_OFF; + + mutex_lock(&local->mtx); + /* + * If this fails (possibly due to channel context sharing + * on incompatible channels, e.g. 80+80 and 160 sharing the + * same control channel) try to use a smaller bandwidth. + */ + ret = ieee80211_link_use_channel(link, &chandef, + IEEE80211_CHANCTX_SHARED); + + /* don't downgrade for 5 and 10 MHz channels, though. */ + if (chandef.width == NL80211_CHAN_WIDTH_5 || + chandef.width == NL80211_CHAN_WIDTH_10) + goto out; + + while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) { + *conn_flags |= + ieee80211_chandef_downgrade(&chandef); + ret = ieee80211_link_use_channel(link, &chandef, + IEEE80211_CHANCTX_SHARED); + } + out: + mutex_unlock(&local->mtx); + return ret; +} + +static bool ieee80211_get_dtim(const struct cfg80211_bss_ies *ies, + u8 *dtim_count, u8 *dtim_period) +{ + const u8 *tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len); + const u8 *idx_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, ies->data, + ies->len); + const struct ieee80211_tim_ie *tim = NULL; + const struct ieee80211_bssid_index *idx; + bool valid = tim_ie && tim_ie[1] >= 2; + + if (valid) + tim = (void *)(tim_ie + 2); + + if (dtim_count) + *dtim_count = valid ? tim->dtim_count : 0; + + if (dtim_period) + *dtim_period = valid ? tim->dtim_period : 0; + + /* Check if value is overridden by non-transmitted profile */ + if (!idx_ie || idx_ie[1] < 3) + return valid; + + idx = (void *)(idx_ie + 2); + + if (dtim_count) + *dtim_count = idx->dtim_count; + + if (dtim_period) + *dtim_period = idx->dtim_period; + + return true; +} + +static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + struct ieee802_11_elems *elems, + const u8 *elem_start, unsigned int elem_len) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; + struct ieee80211_local *local = sdata->local; + unsigned int link_id; + struct sta_info *sta; + u64 changed[IEEE80211_MLD_MAX_NUM_LINKS] = {}; + u16 valid_links = 0, dormant_links = 0; + int err; + + mutex_lock(&sdata->local->sta_mtx); + /* + * station info was already allocated and inserted before + * the association and should be available to us + */ + sta = sta_info_get(sdata, assoc_data->ap_addr); + if (WARN_ON(!sta)) + goto out_err; + + if (ieee80211_vif_is_mld(&sdata->vif)) { + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { + if (!assoc_data->link[link_id].bss) + continue; + + valid_links |= BIT(link_id); + if (assoc_data->link[link_id].disabled) + dormant_links |= BIT(link_id); + + if (link_id != assoc_data->assoc_link_id) { + err = ieee80211_sta_allocate_link(sta, link_id); + if (err) + goto out_err; + } + } + + ieee80211_vif_set_links(sdata, valid_links, dormant_links); + } + + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { + struct cfg80211_bss *cbss = assoc_data->link[link_id].bss; + struct ieee80211_link_data *link; + struct link_sta_info *link_sta; + + if (!cbss) + continue; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (WARN_ON(!link)) + goto out_err; + + if (ieee80211_vif_is_mld(&sdata->vif)) + link_info(link, + "local address %pM, AP link address %pM%s\n", + link->conf->addr, + assoc_data->link[link_id].bss->bssid, + link_id == assoc_data->assoc_link_id ? + " (assoc)" : ""); + + link_sta = rcu_dereference_protected(sta->link[link_id], + lockdep_is_held(&local->sta_mtx)); + if (WARN_ON(!link_sta)) + goto out_err; + + if (!link->u.mgd.have_beacon) { + const struct cfg80211_bss_ies *ies; + + rcu_read_lock(); + ies = rcu_dereference(cbss->beacon_ies); + if (ies) + link->u.mgd.have_beacon = true; + else + ies = rcu_dereference(cbss->ies); + ieee80211_get_dtim(ies, + &link->conf->sync_dtim_count, + &link->u.mgd.dtim_period); + link->conf->beacon_int = cbss->beacon_interval; + rcu_read_unlock(); + } + + link->conf->dtim_period = link->u.mgd.dtim_period ?: 1; + + if (link_id != assoc_data->assoc_link_id) { + err = ieee80211_prep_channel(sdata, link, cbss, + &link->u.mgd.conn_flags); + if (err) { + link_info(link, "prep_channel failed\n"); + goto out_err; + } + } + + err = ieee80211_mgd_setup_link_sta(link, sta, link_sta, + assoc_data->link[link_id].bss); + if (err) + goto out_err; + + if (!ieee80211_assoc_config_link(link, link_sta, + assoc_data->link[link_id].bss, + mgmt, elem_start, elem_len, + &changed[link_id])) + goto out_err; + + if (assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) { + valid_links &= ~BIT(link_id); + ieee80211_sta_remove_link(sta, link_id); + continue; + } + + if (link_id != assoc_data->assoc_link_id) { + err = ieee80211_sta_activate_link(sta, link_id); + if (err) + goto out_err; + } + } + + /* links might have changed due to rejected ones, set them again */ + ieee80211_vif_set_links(sdata, valid_links, dormant_links); + + rate_control_rate_init(sta); + + if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) { + set_sta_flag(sta, WLAN_STA_MFP); + sta->sta.mfp = true; + } else { + sta->sta.mfp = false; + } + + ieee80211_sta_set_max_amsdu_subframes(sta, elems->ext_capab, + elems->ext_capab_len); + + sta->sta.wme = (elems->wmm_param || elems->s1g_capab) && + local->hw.queues >= IEEE80211_NUM_ACS; + + err = sta_info_move_state(sta, IEEE80211_STA_ASSOC); + if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) + err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); + if (err) { + sdata_info(sdata, + "failed to move station %pM to desired state\n", + sta->sta.addr); + WARN_ON(__sta_info_destroy(sta)); + goto out_err; + } + + if (sdata->wdev.use_4addr) + drv_sta_set_4addr(local, sdata, &sta->sta, true); + + mutex_unlock(&sdata->local->sta_mtx); + + ieee80211_set_associated(sdata, assoc_data, changed); + + /* + * If we're using 4-addr mode, let the AP know that we're + * doing so, so that it can create the STA VLAN on its side + */ + if (ifmgd->use_4addr) + ieee80211_send_4addr_nullfunc(local, sdata); + + /* + * Start timer to probe the connection to the AP now. + * Also start the timer that will detect beacon loss. + */ + ieee80211_sta_reset_beacon_monitor(sdata); + ieee80211_sta_reset_conn_monitor(sdata); + + return true; +out_err: + eth_zero_addr(sdata->vif.cfg.ap_addr); + mutex_unlock(&sdata->local->sta_mtx); + return false; +} + +static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; + u16 capab_info, status_code, aid; + struct ieee80211_elems_parse_params parse_params = { + .bss = NULL, + .link_id = -1, + .from_ap = true, + }; + struct ieee802_11_elems *elems; + int ac; + const u8 *elem_start; + unsigned int elem_len; + bool reassoc; + struct ieee80211_event event = { + .type = MLME_EVENT, + .u.mlme.data = ASSOC_EVENT, + }; + struct ieee80211_prep_tx_info info = {}; + struct cfg80211_rx_assoc_resp resp = { + .uapsd_queues = -1, + }; + u8 ap_mld_addr[ETH_ALEN] __aligned(2); + unsigned int link_id; + + sdata_assert_lock(sdata); + + if (!assoc_data) + return; + + if (!ether_addr_equal(assoc_data->ap_addr, mgmt->bssid) || + !ether_addr_equal(assoc_data->ap_addr, mgmt->sa)) + return; + + /* + * AssocResp and ReassocResp have identical structure, so process both + * of them in this function. + */ + + if (len < 24 + 6) + return; + + reassoc = ieee80211_is_reassoc_resp(mgmt->frame_control); + capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); + status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); + if (assoc_data->s1g) + elem_start = mgmt->u.s1g_assoc_resp.variable; + else + elem_start = mgmt->u.assoc_resp.variable; + + /* + * Note: this may not be perfect, AP might misbehave - if + * anyone needs to rely on perfect complete notification + * with the exact right subtype, then we need to track what + * we actually transmitted. + */ + info.subtype = reassoc ? IEEE80211_STYPE_REASSOC_REQ : + IEEE80211_STYPE_ASSOC_REQ; + + if (assoc_data->fils_kek_len && + fils_decrypt_assoc_resp(sdata, (u8 *)mgmt, &len, assoc_data) < 0) + return; + + elem_len = len - (elem_start - (u8 *)mgmt); + parse_params.start = elem_start; + parse_params.len = elem_len; + elems = ieee802_11_parse_elems_full(&parse_params); + if (!elems) + goto notify_driver; + + if (elems->aid_resp) + aid = le16_to_cpu(elems->aid_resp->aid); + else if (assoc_data->s1g) + aid = 0; /* TODO */ + else + aid = le16_to_cpu(mgmt->u.assoc_resp.aid); + + /* + * The 5 MSB of the AID field are reserved + * (802.11-2016 9.4.1.8 AID field) + */ + aid &= 0x7ff; + + sdata_info(sdata, + "RX %sssocResp from %pM (capab=0x%x status=%d aid=%d)\n", + reassoc ? "Rea" : "A", assoc_data->ap_addr, + capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); + + ifmgd->broken_ap = false; + + if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY && + elems->timeout_int && + elems->timeout_int->type == WLAN_TIMEOUT_ASSOC_COMEBACK) { + u32 tu, ms; + + cfg80211_assoc_comeback(sdata->dev, assoc_data->ap_addr, + le32_to_cpu(elems->timeout_int->value)); + + tu = le32_to_cpu(elems->timeout_int->value); + ms = tu * 1024 / 1000; + sdata_info(sdata, + "%pM rejected association temporarily; comeback duration %u TU (%u ms)\n", + assoc_data->ap_addr, tu, ms); + assoc_data->timeout = jiffies + msecs_to_jiffies(ms); + assoc_data->timeout_started = true; + if (ms > IEEE80211_ASSOC_TIMEOUT) + run_again(sdata, assoc_data->timeout); + goto notify_driver; + } + + if (status_code != WLAN_STATUS_SUCCESS) { + sdata_info(sdata, "%pM denied association (code=%d)\n", + assoc_data->ap_addr, status_code); + event.u.mlme.status = MLME_DENIED; + event.u.mlme.reason = status_code; + drv_event_callback(sdata->local, sdata, &event); + } else { + if (aid == 0 || aid > IEEE80211_MAX_AID) { + sdata_info(sdata, + "invalid AID value %d (out of range), turn off PS\n", + aid); + aid = 0; + ifmgd->broken_ap = true; + } + + if (ieee80211_vif_is_mld(&sdata->vif)) { + if (!elems->ml_basic) { + sdata_info(sdata, + "MLO association with %pM but no multi-link element in response!\n", + assoc_data->ap_addr); + goto abandon_assoc; + } + + if (le16_get_bits(elems->ml_basic->control, + IEEE80211_ML_CONTROL_TYPE) != + IEEE80211_ML_CONTROL_TYPE_BASIC) { + sdata_info(sdata, + "bad multi-link element (control=0x%x)\n", + le16_to_cpu(elems->ml_basic->control)); + goto abandon_assoc; + } else { + struct ieee80211_mle_basic_common_info *common; + + common = (void *)elems->ml_basic->variable; + + if (memcmp(assoc_data->ap_addr, + common->mld_mac_addr, ETH_ALEN)) { + sdata_info(sdata, + "AP MLD MAC address mismatch: got %pM expected %pM\n", + common->mld_mac_addr, + assoc_data->ap_addr); + goto abandon_assoc; + } + } + } + + sdata->vif.cfg.aid = aid; + + if (!ieee80211_assoc_success(sdata, mgmt, elems, + elem_start, elem_len)) { + /* oops -- internal error -- send timeout for now */ + ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT); + goto notify_driver; + } + event.u.mlme.status = MLME_SUCCESS; + drv_event_callback(sdata->local, sdata, &event); + sdata_info(sdata, "associated\n"); + + info.success = 1; + } + + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { + struct ieee80211_link_data *link; + + if (!assoc_data->link[link_id].bss) + continue; + + resp.links[link_id].bss = assoc_data->link[link_id].bss; + ether_addr_copy(resp.links[link_id].addr, + assoc_data->link[link_id].addr); + resp.links[link_id].status = assoc_data->link[link_id].status; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (!link) + continue; + + /* get uapsd queues configuration - same for all links */ + resp.uapsd_queues = 0; + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + if (link->tx_conf[ac].uapsd) + resp.uapsd_queues |= ieee80211_ac_to_qos_mask[ac]; + } + + if (ieee80211_vif_is_mld(&sdata->vif)) { + ether_addr_copy(ap_mld_addr, sdata->vif.cfg.ap_addr); + resp.ap_mld_addr = ap_mld_addr; + } + + ieee80211_destroy_assoc_data(sdata, + status_code == WLAN_STATUS_SUCCESS ? + ASSOC_SUCCESS : + ASSOC_REJECTED); + + resp.buf = (u8 *)mgmt; + resp.len = len; + resp.req_ies = ifmgd->assoc_req_ies; + resp.req_ies_len = ifmgd->assoc_req_ies_len; + cfg80211_rx_assoc_resp(sdata->dev, &resp); +notify_driver: + drv_mgd_complete_tx(sdata->local, sdata, &info); + kfree(elems); + return; +abandon_assoc: + ieee80211_destroy_assoc_data(sdata, ASSOC_ABANDON); + goto notify_driver; +} + +static void ieee80211_rx_bss_info(struct ieee80211_link_data *link, + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_bss *bss; + struct ieee80211_channel *channel; + + sdata_assert_lock(sdata); + + channel = ieee80211_get_channel_khz(local->hw.wiphy, + ieee80211_rx_status_to_khz(rx_status)); + if (!channel) + return; + + bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, channel); + if (bss) { + link->conf->beacon_rate = bss->beacon_rate; + ieee80211_rx_bss_put(local, bss); + } +} + + +static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_link_data *link, + struct sk_buff *skb) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_mgmt *mgmt = (void *)skb->data; + struct ieee80211_if_managed *ifmgd; + struct ieee80211_rx_status *rx_status = (void *) skb->cb; + struct ieee80211_channel *channel; + size_t baselen, len = skb->len; + + ifmgd = &sdata->u.mgd; + + sdata_assert_lock(sdata); + + /* + * According to Draft P802.11ax D6.0 clause 26.17.2.3.2: + * "If a 6 GHz AP receives a Probe Request frame and responds with + * a Probe Response frame [..], the Address 1 field of the Probe + * Response frame shall be set to the broadcast address [..]" + * So, on 6GHz band we should also accept broadcast responses. + */ + channel = ieee80211_get_channel(sdata->local->hw.wiphy, + rx_status->freq); + if (!channel) + return; + + if (!ether_addr_equal(mgmt->da, sdata->vif.addr) && + (channel->band != NL80211_BAND_6GHZ || + !is_broadcast_ether_addr(mgmt->da))) + return; /* ignore ProbeResp to foreign address */ + + baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; + if (baselen > len) + return; + + ieee80211_rx_bss_info(link, mgmt, len, rx_status); + + if (ifmgd->associated && + ether_addr_equal(mgmt->bssid, link->u.mgd.bssid)) + ieee80211_reset_ap_probe(sdata); +} + +/* + * This is the canonical list of information elements we care about, + * the filter code also gives us all changes to the Microsoft OUI + * (00:50:F2) vendor IE which is used for WMM which we need to track, + * as well as the DTPC IE (part of the Cisco OUI) used for signaling + * changes to requested client power. + * + * We implement beacon filtering in software since that means we can + * avoid processing the frame here and in cfg80211, and userspace + * will not be able to tell whether the hardware supports it or not. + * + * XXX: This list needs to be dynamic -- userspace needs to be able to + * add items it requires. It also needs to be able to tell us to + * look out for other vendor IEs. + */ +static const u64 care_about_ies = + (1ULL << WLAN_EID_COUNTRY) | + (1ULL << WLAN_EID_ERP_INFO) | + (1ULL << WLAN_EID_CHANNEL_SWITCH) | + (1ULL << WLAN_EID_PWR_CONSTRAINT) | + (1ULL << WLAN_EID_HT_CAPABILITY) | + (1ULL << WLAN_EID_HT_OPERATION) | + (1ULL << WLAN_EID_EXT_CHANSWITCH_ANN); + +static void ieee80211_handle_beacon_sig(struct ieee80211_link_data *link, + struct ieee80211_if_managed *ifmgd, + struct ieee80211_bss_conf *bss_conf, + struct ieee80211_local *local, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + + /* Track average RSSI from the Beacon frames of the current AP */ + + if (!link->u.mgd.tracking_signal_avg) { + link->u.mgd.tracking_signal_avg = true; + ewma_beacon_signal_init(&link->u.mgd.ave_beacon_signal); + link->u.mgd.last_cqm_event_signal = 0; + link->u.mgd.count_beacon_signal = 1; + link->u.mgd.last_ave_beacon_signal = 0; + } else { + link->u.mgd.count_beacon_signal++; + } + + ewma_beacon_signal_add(&link->u.mgd.ave_beacon_signal, + -rx_status->signal); + + if (ifmgd->rssi_min_thold != ifmgd->rssi_max_thold && + link->u.mgd.count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) { + int sig = -ewma_beacon_signal_read(&link->u.mgd.ave_beacon_signal); + int last_sig = link->u.mgd.last_ave_beacon_signal; + struct ieee80211_event event = { + .type = RSSI_EVENT, + }; + + /* + * if signal crosses either of the boundaries, invoke callback + * with appropriate parameters + */ + if (sig > ifmgd->rssi_max_thold && + (last_sig <= ifmgd->rssi_min_thold || last_sig == 0)) { + link->u.mgd.last_ave_beacon_signal = sig; + event.u.rssi.data = RSSI_EVENT_HIGH; + drv_event_callback(local, sdata, &event); + } else if (sig < ifmgd->rssi_min_thold && + (last_sig >= ifmgd->rssi_max_thold || + last_sig == 0)) { + link->u.mgd.last_ave_beacon_signal = sig; + event.u.rssi.data = RSSI_EVENT_LOW; + drv_event_callback(local, sdata, &event); + } + } + + if (bss_conf->cqm_rssi_thold && + link->u.mgd.count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT && + !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) { + int sig = -ewma_beacon_signal_read(&link->u.mgd.ave_beacon_signal); + int last_event = link->u.mgd.last_cqm_event_signal; + int thold = bss_conf->cqm_rssi_thold; + int hyst = bss_conf->cqm_rssi_hyst; + + if (sig < thold && + (last_event == 0 || sig < last_event - hyst)) { + link->u.mgd.last_cqm_event_signal = sig; + ieee80211_cqm_rssi_notify( + &sdata->vif, + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, + sig, GFP_KERNEL); + } else if (sig > thold && + (last_event == 0 || sig > last_event + hyst)) { + link->u.mgd.last_cqm_event_signal = sig; + ieee80211_cqm_rssi_notify( + &sdata->vif, + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, + sig, GFP_KERNEL); + } + } + + if (bss_conf->cqm_rssi_low && + link->u.mgd.count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) { + int sig = -ewma_beacon_signal_read(&link->u.mgd.ave_beacon_signal); + int last_event = link->u.mgd.last_cqm_event_signal; + int low = bss_conf->cqm_rssi_low; + int high = bss_conf->cqm_rssi_high; + + if (sig < low && + (last_event == 0 || last_event >= low)) { + link->u.mgd.last_cqm_event_signal = sig; + ieee80211_cqm_rssi_notify( + &sdata->vif, + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, + sig, GFP_KERNEL); + } else if (sig > high && + (last_event == 0 || last_event <= high)) { + link->u.mgd.last_cqm_event_signal = sig; + ieee80211_cqm_rssi_notify( + &sdata->vif, + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, + sig, GFP_KERNEL); + } + } +} + +static bool ieee80211_rx_our_beacon(const u8 *tx_bssid, + struct cfg80211_bss *bss) +{ + if (ether_addr_equal(tx_bssid, bss->bssid)) + return true; + if (!bss->transmitted_bss) + return false; + return ether_addr_equal(tx_bssid, bss->transmitted_bss->bssid); +} + +static bool ieee80211_config_puncturing(struct ieee80211_link_data *link, + const struct ieee80211_eht_operation *eht_oper, + u64 *changed) +{ + u16 bitmap = 0, extracted; + + if ((eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) && + (eht_oper->params & + IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)) { + const struct ieee80211_eht_operation_info *info = + (void *)eht_oper->optional; + const u8 *disable_subchannel_bitmap = info->optional; + + bitmap = get_unaligned_le16(disable_subchannel_bitmap); + } + + extracted = ieee80211_extract_dis_subch_bmap(eht_oper, + &link->conf->chandef, + bitmap); + + /* accept if there are no changes */ + if (!(*changed & BSS_CHANGED_BANDWIDTH) && + extracted == link->conf->eht_puncturing) + return true; + + if (!cfg80211_valid_disable_subchannel_bitmap(&bitmap, + &link->conf->chandef)) { + link_info(link, + "Got an invalid disable subchannel bitmap from AP %pM: bitmap = 0x%x, bw = 0x%x. disconnect\n", + link->u.mgd.bssid, + bitmap, + link->conf->chandef.width); + return false; + } + + ieee80211_handle_puncturing_bitmap(link, eht_oper, bitmap, changed); + return true; +} + +static void ieee80211_ml_reconf_work(struct wiphy *wiphy, + struct wiphy_work *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, + u.mgd.ml_reconf_work.work); + u16 new_valid_links, new_active_links, new_dormant_links; + int ret; + + sdata_lock(sdata); + if (!sdata->u.mgd.removed_links) { + sdata_unlock(sdata); + return; + } + + sdata_info(sdata, + "MLO Reconfiguration: work: valid=0x%x, removed=0x%x\n", + sdata->vif.valid_links, sdata->u.mgd.removed_links); + + new_valid_links = sdata->vif.valid_links & ~sdata->u.mgd.removed_links; + if (new_valid_links == sdata->vif.valid_links) { + sdata_unlock(sdata); + return; + } + + if (!new_valid_links || + !(new_valid_links & ~sdata->vif.dormant_links)) { + sdata_info(sdata, "No valid links after reconfiguration\n"); + ret = -EINVAL; + goto out; + } + + new_active_links = sdata->vif.active_links & ~sdata->u.mgd.removed_links; + if (new_active_links != sdata->vif.active_links) { + if (!new_active_links) + new_active_links = + BIT(ffs(new_valid_links & + ~sdata->vif.dormant_links) - 1); + + ret = __ieee80211_set_active_links(&sdata->vif, + new_active_links); + if (ret) { + sdata_info(sdata, + "Failed setting active links\n"); + goto out; + } + } + + new_dormant_links = sdata->vif.dormant_links & ~sdata->u.mgd.removed_links; + + ret = ieee80211_vif_set_links(sdata, new_valid_links, + new_dormant_links); + if (ret) + sdata_info(sdata, "Failed setting valid links\n"); + +out: + if (!ret) + cfg80211_links_removed(sdata->dev, sdata->u.mgd.removed_links); + else + ___ieee80211_disconnect(sdata); + + sdata->u.mgd.removed_links = 0; + + sdata_unlock(sdata); +} + +static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *elems) +{ + const struct ieee80211_multi_link_elem *ml; + const struct element *sub; + ssize_t ml_len; + unsigned long removed_links = 0; + u16 link_removal_timeout[IEEE80211_MLD_MAX_NUM_LINKS] = {}; + u8 link_id; + u32 delay; + + if (!ieee80211_vif_is_mld(&sdata->vif) || !elems->ml_reconf) + return; + + ml_len = cfg80211_defragment_element(elems->ml_reconf_elem, + elems->ie_start, + elems->total_len, + elems->scratch_pos, + elems->scratch + elems->scratch_len - + elems->scratch_pos, + WLAN_EID_FRAGMENT); + if (ml_len < 0) + return; + + elems->ml_reconf = (const void *)elems->scratch_pos; + elems->ml_reconf_len = ml_len; + ml = elems->ml_reconf; + + /* Directly parse the sub elements as the common information doesn't + * hold any useful information. + */ + for_each_mle_subelement(sub, (u8 *)ml, ml_len) { + struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data; + u8 *pos = prof->variable; + u16 control; + + if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE) + continue; + + if (!ieee80211_mle_reconf_sta_prof_size_ok(sub->data, + sub->datalen)) + return; + + control = le16_to_cpu(prof->control); + link_id = control & IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID; + + removed_links |= BIT(link_id); + + /* the MAC address should not be included, but handle it */ + if (control & + IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT) + pos += 6; + + /* According to Draft P802.11be_D3.0, the control should + * include the AP Removal Timer present. If the AP Removal Timer + * is not present assume immediate removal. + */ + if (control & + IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT) + link_removal_timeout[link_id] = le16_to_cpu(*(__le16 *)pos); + } + + removed_links &= sdata->vif.valid_links; + if (!removed_links) { + /* In case the removal was cancelled, abort it */ + if (sdata->u.mgd.removed_links) { + sdata->u.mgd.removed_links = 0; + wiphy_delayed_work_cancel(sdata->local->hw.wiphy, + &sdata->u.mgd.ml_reconf_work); + } + return; + } + + delay = 0; + for_each_set_bit(link_id, &removed_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + sdata_dereference(sdata->vif.link_conf[link_id], sdata); + u32 link_delay; + + if (!link_conf) { + removed_links &= ~BIT(link_id); + continue; + } + + link_delay = link_conf->beacon_int * + link_removal_timeout[link_id]; + + if (!delay) + delay = link_delay; + else + delay = min(delay, link_delay); + } + + sdata->u.mgd.removed_links = removed_links; + wiphy_delayed_work_queue(sdata->local->hw.wiphy, + &sdata->u.mgd.ml_reconf_work, + TU_TO_JIFFIES(delay)); +} + +static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, + struct ieee80211_hdr *hdr, size_t len, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; + struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg; + struct ieee80211_mgmt *mgmt = (void *) hdr; + size_t baselen; + struct ieee802_11_elems *elems; + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_supported_band *sband; + struct ieee80211_channel *chan; + struct link_sta_info *link_sta; + struct sta_info *sta; + u64 changed = 0; + bool erp_valid; + u8 erp_value = 0; + u32 ncrc = 0; + u8 *bssid, *variable = mgmt->u.beacon.variable; + u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN]; + struct ieee80211_elems_parse_params parse_params = { + .link_id = -1, + .from_ap = true, + }; + + sdata_assert_lock(sdata); + + /* Process beacon from the current BSS */ + bssid = ieee80211_get_bssid(hdr, len, sdata->vif.type); + if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { + struct ieee80211_ext *ext = (void *) mgmt; + + if (ieee80211_is_s1g_short_beacon(ext->frame_control)) + variable = ext->u.s1g_short_beacon.variable; + else + variable = ext->u.s1g_beacon.variable; + } + + baselen = (u8 *) variable - (u8 *) mgmt; + if (baselen > len) + return; + + parse_params.start = variable; + parse_params.len = len - baselen; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(link->conf->chanctx_conf); + if (!chanctx_conf) { + rcu_read_unlock(); + return; + } + + if (ieee80211_rx_status_to_khz(rx_status) != + ieee80211_channel_to_khz(chanctx_conf->def.chan)) { + rcu_read_unlock(); + return; + } + chan = chanctx_conf->def.chan; + rcu_read_unlock(); + + if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon && + !WARN_ON(ieee80211_vif_is_mld(&sdata->vif)) && + ieee80211_rx_our_beacon(bssid, ifmgd->assoc_data->link[0].bss)) { + parse_params.bss = ifmgd->assoc_data->link[0].bss; + elems = ieee802_11_parse_elems_full(&parse_params); + if (!elems) + return; + + ieee80211_rx_bss_info(link, mgmt, len, rx_status); + + if (elems->dtim_period) + link->u.mgd.dtim_period = elems->dtim_period; + link->u.mgd.have_beacon = true; + ifmgd->assoc_data->need_beacon = false; + if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) { + link->conf->sync_tsf = + le64_to_cpu(mgmt->u.beacon.timestamp); + link->conf->sync_device_ts = + rx_status->device_timestamp; + link->conf->sync_dtim_count = elems->dtim_count; + } + + if (elems->mbssid_config_ie) + bss_conf->profile_periodicity = + elems->mbssid_config_ie->profile_periodicity; + else + bss_conf->profile_periodicity = 0; + + if (elems->ext_capab_len >= 11 && + (elems->ext_capab[10] & WLAN_EXT_CAPA11_EMA_SUPPORT)) + bss_conf->ema_ap = true; + else + bss_conf->ema_ap = false; + + /* continue assoc process */ + ifmgd->assoc_data->timeout = jiffies; + ifmgd->assoc_data->timeout_started = true; + run_again(sdata, ifmgd->assoc_data->timeout); + kfree(elems); + return; + } + + if (!ifmgd->associated || + !ieee80211_rx_our_beacon(bssid, link->u.mgd.bss)) + return; + bssid = link->u.mgd.bssid; + + if (!(rx_status->flag & RX_FLAG_NO_SIGNAL_VAL)) + ieee80211_handle_beacon_sig(link, ifmgd, bss_conf, + local, rx_status); + + if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) { + mlme_dbg_ratelimited(sdata, + "cancelling AP probe due to a received beacon\n"); + ieee80211_reset_ap_probe(sdata); + } + + /* + * Push the beacon loss detection into the future since + * we are processing a beacon from the AP just now. + */ + ieee80211_sta_reset_beacon_monitor(sdata); + + /* TODO: CRC urrently not calculated on S1G Beacon Compatibility + * element (which carries the beacon interval). Don't forget to add a + * bit to care_about_ies[] above if mac80211 is interested in a + * changing S1G element. + */ + if (!ieee80211_is_s1g_beacon(hdr->frame_control)) + ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); + parse_params.bss = link->u.mgd.bss; + parse_params.filter = care_about_ies; + parse_params.crc = ncrc; + elems = ieee802_11_parse_elems_full(&parse_params); + if (!elems) + return; + ncrc = elems->crc; + + if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) && + ieee80211_check_tim(elems->tim, elems->tim_len, vif_cfg->aid)) { + if (local->hw.conf.dynamic_ps_timeout > 0) { + if (local->hw.conf.flags & IEEE80211_CONF_PS) { + local->hw.conf.flags &= ~IEEE80211_CONF_PS; + ieee80211_hw_config(local, + IEEE80211_CONF_CHANGE_PS); + } + ieee80211_send_nullfunc(local, sdata, false); + } else if (!local->pspolling && sdata->u.mgd.powersave) { + local->pspolling = true; + + /* + * Here is assumed that the driver will be + * able to send ps-poll frame and receive a + * response even though power save mode is + * enabled, but some drivers might require + * to disable power save here. This needs + * to be investigated. + */ + ieee80211_send_pspoll(local, sdata); + } + } + + if (sdata->vif.p2p || + sdata->vif.driver_flags & IEEE80211_VIF_GET_NOA_UPDATE) { + struct ieee80211_p2p_noa_attr noa = {}; + int ret; + + ret = cfg80211_get_p2p_attr(variable, + len - baselen, + IEEE80211_P2P_ATTR_ABSENCE_NOTICE, + (u8 *) &noa, sizeof(noa)); + if (ret >= 2) { + if (link->u.mgd.p2p_noa_index != noa.index) { + /* valid noa_attr and index changed */ + link->u.mgd.p2p_noa_index = noa.index; + memcpy(&bss_conf->p2p_noa_attr, &noa, sizeof(noa)); + changed |= BSS_CHANGED_P2P_PS; + /* + * make sure we update all information, the CRC + * mechanism doesn't look at P2P attributes. + */ + link->u.mgd.beacon_crc_valid = false; + } + } else if (link->u.mgd.p2p_noa_index != -1) { + /* noa_attr not found and we had valid noa_attr before */ + link->u.mgd.p2p_noa_index = -1; + memset(&bss_conf->p2p_noa_attr, 0, sizeof(bss_conf->p2p_noa_attr)); + changed |= BSS_CHANGED_P2P_PS; + link->u.mgd.beacon_crc_valid = false; + } + } + + if (link->u.mgd.csa_waiting_bcn) + ieee80211_chswitch_post_beacon(link); + + /* + * Update beacon timing and dtim count on every beacon appearance. This + * will allow the driver to use the most updated values. Do it before + * comparing this one with last received beacon. + * IMPORTANT: These parameters would possibly be out of sync by the time + * the driver will use them. The synchronized view is currently + * guaranteed only in certain callbacks. + */ + if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) && + !ieee80211_is_s1g_beacon(hdr->frame_control)) { + link->conf->sync_tsf = + le64_to_cpu(mgmt->u.beacon.timestamp); + link->conf->sync_device_ts = + rx_status->device_timestamp; + link->conf->sync_dtim_count = elems->dtim_count; + } + + if ((ncrc == link->u.mgd.beacon_crc && link->u.mgd.beacon_crc_valid) || + ieee80211_is_s1g_short_beacon(mgmt->frame_control)) + goto free; + link->u.mgd.beacon_crc = ncrc; + link->u.mgd.beacon_crc_valid = true; + + ieee80211_rx_bss_info(link, mgmt, len, rx_status); + + ieee80211_sta_process_chanswitch(link, rx_status->mactime, + rx_status->device_timestamp, + elems, true); + + if (!link->u.mgd.disable_wmm_tracking && + ieee80211_sta_wmm_params(local, link, elems->wmm_param, + elems->wmm_param_len, + elems->mu_edca_param_set)) + changed |= BSS_CHANGED_QOS; + + /* + * If we haven't had a beacon before, tell the driver about the + * DTIM period (and beacon timing if desired) now. + */ + if (!link->u.mgd.have_beacon) { + /* a few bogus AP send dtim_period = 0 or no TIM IE */ + bss_conf->dtim_period = elems->dtim_period ?: 1; + + changed |= BSS_CHANGED_BEACON_INFO; + link->u.mgd.have_beacon = true; + + mutex_lock(&local->iflist_mtx); + ieee80211_recalc_ps(local); + mutex_unlock(&local->iflist_mtx); + + ieee80211_recalc_ps_vif(sdata); + } + + if (elems->erp_info) { + erp_valid = true; + erp_value = elems->erp_info[0]; + } else { + erp_valid = false; + } + + if (!ieee80211_is_s1g_beacon(hdr->frame_control)) + changed |= ieee80211_handle_bss_capability(link, + le16_to_cpu(mgmt->u.beacon.capab_info), + erp_valid, erp_value); + + mutex_lock(&local->sta_mtx); + sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr); + if (WARN_ON(!sta)) { + mutex_unlock(&local->sta_mtx); + goto free; + } + link_sta = rcu_dereference_protected(sta->link[link->link_id], + lockdep_is_held(&local->sta_mtx)); + if (WARN_ON(!link_sta)) { + mutex_unlock(&local->sta_mtx); + goto free; + } + + if (WARN_ON(!link->conf->chandef.chan)) + goto free; + + sband = local->hw.wiphy->bands[link->conf->chandef.chan->band]; + + changed |= ieee80211_recalc_twt_req(sdata, sband, link, link_sta, elems); + + if (ieee80211_config_bw(link, elems->ht_cap_elem, + elems->vht_cap_elem, elems->ht_operation, + elems->vht_operation, elems->he_operation, + elems->eht_operation, + elems->s1g_oper, bssid, &changed)) { + mutex_unlock(&local->sta_mtx); + sdata_info(sdata, + "failed to follow AP %pM bandwidth change, disconnect\n", + bssid); + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, + WLAN_REASON_DEAUTH_LEAVING, + true, deauth_buf); + ieee80211_report_disconnect(sdata, deauth_buf, + sizeof(deauth_buf), true, + WLAN_REASON_DEAUTH_LEAVING, + false); + goto free; + } + + if (elems->opmode_notif) + ieee80211_vht_handle_opmode(sdata, link_sta, + *elems->opmode_notif, + rx_status->band); + mutex_unlock(&local->sta_mtx); + + changed |= ieee80211_handle_pwr_constr(link, chan, mgmt, + elems->country_elem, + elems->country_elem_len, + elems->pwr_constr_elem, + elems->cisco_dtpc_elem); + + if (elems->eht_operation && + !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)) { + if (!ieee80211_config_puncturing(link, elems->eht_operation, + &changed)) { + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, + WLAN_REASON_DEAUTH_LEAVING, + true, deauth_buf); + ieee80211_report_disconnect(sdata, deauth_buf, + sizeof(deauth_buf), true, + WLAN_REASON_DEAUTH_LEAVING, + false); + goto free; + } + } + + ieee80211_ml_reconfiguration(sdata, elems); + + ieee80211_link_info_change_notify(sdata, link, changed); +free: + kfree(elems); +} + +void ieee80211_sta_rx_queued_ext(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_link_data *link = &sdata->deflink; + struct ieee80211_rx_status *rx_status; + struct ieee80211_hdr *hdr; + u16 fc; + + rx_status = (struct ieee80211_rx_status *) skb->cb; + hdr = (struct ieee80211_hdr *) skb->data; + fc = le16_to_cpu(hdr->frame_control); + + sdata_lock(sdata); + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_S1G_BEACON: + ieee80211_rx_mgmt_beacon(link, hdr, skb->len, rx_status); + break; + } + sdata_unlock(sdata); +} + +void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_link_data *link = &sdata->deflink; + struct ieee80211_rx_status *rx_status; + struct ieee80211_mgmt *mgmt; + u16 fc; + int ies_len; + + rx_status = (struct ieee80211_rx_status *) skb->cb; + mgmt = (struct ieee80211_mgmt *) skb->data; + fc = le16_to_cpu(mgmt->frame_control); + + sdata_lock(sdata); + + if (rx_status->link_valid) { + link = sdata_dereference(sdata->link[rx_status->link_id], + sdata); + if (!link) + goto out; + } + + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_BEACON: + ieee80211_rx_mgmt_beacon(link, (void *)mgmt, + skb->len, rx_status); + break; + case IEEE80211_STYPE_PROBE_RESP: + ieee80211_rx_mgmt_probe_resp(link, skb); + break; + case IEEE80211_STYPE_AUTH: + ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len); + break; + case IEEE80211_STYPE_DEAUTH: + ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); + break; + case IEEE80211_STYPE_DISASSOC: + ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); + break; + case IEEE80211_STYPE_ASSOC_RESP: + case IEEE80211_STYPE_REASSOC_RESP: + ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len); + break; + case IEEE80211_STYPE_ACTION: + if (!sdata->u.mgd.associated || + !ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) + break; + + if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) { + struct ieee802_11_elems *elems; + + ies_len = skb->len - + offsetof(struct ieee80211_mgmt, + u.action.u.chan_switch.variable); + + if (ies_len < 0) + break; + + /* CSA IE cannot be overridden, no need for BSSID */ + elems = ieee802_11_parse_elems( + mgmt->u.action.u.chan_switch.variable, + ies_len, true, NULL); + + if (elems && !elems->parse_error) + ieee80211_sta_process_chanswitch(link, + rx_status->mactime, + rx_status->device_timestamp, + elems, false); + kfree(elems); + } else if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) { + struct ieee802_11_elems *elems; + + ies_len = skb->len - + offsetof(struct ieee80211_mgmt, + u.action.u.ext_chan_switch.variable); + + if (ies_len < 0) + break; + + /* + * extended CSA IE can't be overridden, no need for + * BSSID + */ + elems = ieee802_11_parse_elems( + mgmt->u.action.u.ext_chan_switch.variable, + ies_len, true, NULL); + + if (elems && !elems->parse_error) { + /* for the handling code pretend it was an IE */ + elems->ext_chansw_ie = + &mgmt->u.action.u.ext_chan_switch.data; + + ieee80211_sta_process_chanswitch(link, + rx_status->mactime, + rx_status->device_timestamp, + elems, false); + } + + kfree(elems); + } + break; + } +out: + sdata_unlock(sdata); +} + +static void ieee80211_sta_timer(struct timer_list *t) +{ + struct ieee80211_sub_if_data *sdata = + from_timer(sdata, t, u.mgd.timer); + + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); +} + +void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, + u8 reason, bool tx) +{ + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, + tx, frame_buf); + + ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, + reason, false); +} + +static int ieee80211_auth(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data; + u32 tx_flags = 0; + u16 trans = 1; + u16 status = 0; + struct ieee80211_prep_tx_info info = { + .subtype = IEEE80211_STYPE_AUTH, + }; + + sdata_assert_lock(sdata); + + if (WARN_ON_ONCE(!auth_data)) + return -EINVAL; + + auth_data->tries++; + + if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) { + sdata_info(sdata, "authentication with %pM timed out\n", + auth_data->ap_addr); + + /* + * Most likely AP is not in the range so remove the + * bss struct for that AP. + */ + cfg80211_unlink_bss(local->hw.wiphy, auth_data->bss); + + return -ETIMEDOUT; + } + + if (auth_data->algorithm == WLAN_AUTH_SAE) + info.duration = jiffies_to_msecs(IEEE80211_AUTH_TIMEOUT_SAE); + + drv_mgd_prepare_tx(local, sdata, &info); + + sdata_info(sdata, "send auth to %pM (try %d/%d)\n", + auth_data->ap_addr, auth_data->tries, + IEEE80211_AUTH_MAX_TRIES); + + auth_data->expected_transaction = 2; + + if (auth_data->algorithm == WLAN_AUTH_SAE) { + trans = auth_data->sae_trans; + status = auth_data->sae_status; + auth_data->expected_transaction = trans; + } + + if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) + tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | + IEEE80211_TX_INTFL_MLME_CONN_TX; + + ieee80211_send_auth(sdata, trans, auth_data->algorithm, status, + auth_data->data, auth_data->data_len, + auth_data->ap_addr, auth_data->ap_addr, + NULL, 0, 0, tx_flags); + + if (tx_flags == 0) { + if (auth_data->algorithm == WLAN_AUTH_SAE) + auth_data->timeout = jiffies + + IEEE80211_AUTH_TIMEOUT_SAE; + else + auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; + } else { + auth_data->timeout = + round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG); + } + + auth_data->timeout_started = true; + run_again(sdata, auth_data->timeout); + + return 0; +} + +static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data; + struct ieee80211_local *local = sdata->local; + int ret; + + sdata_assert_lock(sdata); + + assoc_data->tries++; + if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) { + sdata_info(sdata, "association with %pM timed out\n", + assoc_data->ap_addr); + + /* + * Most likely AP is not in the range so remove the + * bss struct for that AP. + */ + cfg80211_unlink_bss(local->hw.wiphy, + assoc_data->link[assoc_data->assoc_link_id].bss); + + return -ETIMEDOUT; + } + + sdata_info(sdata, "associate with %pM (try %d/%d)\n", + assoc_data->ap_addr, assoc_data->tries, + IEEE80211_ASSOC_MAX_TRIES); + ret = ieee80211_send_assoc(sdata); + if (ret) + return ret; + + if (!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { + assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT; + assoc_data->timeout_started = true; + run_again(sdata, assoc_data->timeout); + } else { + assoc_data->timeout = + round_jiffies_up(jiffies + + IEEE80211_ASSOC_TIMEOUT_LONG); + assoc_data->timeout_started = true; + run_again(sdata, assoc_data->timeout); + } + + return 0; +} + +void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata, + __le16 fc, bool acked) +{ + struct ieee80211_local *local = sdata->local; + + sdata->u.mgd.status_fc = fc; + sdata->u.mgd.status_acked = acked; + sdata->u.mgd.status_received = true; + + wiphy_work_queue(local->hw.wiphy, &sdata->work); +} + +void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + sdata_lock(sdata); + + if (ifmgd->status_received) { + __le16 fc = ifmgd->status_fc; + bool status_acked = ifmgd->status_acked; + + ifmgd->status_received = false; + if (ifmgd->auth_data && ieee80211_is_auth(fc)) { + if (status_acked) { + if (ifmgd->auth_data->algorithm == + WLAN_AUTH_SAE) + ifmgd->auth_data->timeout = + jiffies + + IEEE80211_AUTH_TIMEOUT_SAE; + else + ifmgd->auth_data->timeout = + jiffies + + IEEE80211_AUTH_TIMEOUT_SHORT; + run_again(sdata, ifmgd->auth_data->timeout); + } else { + ifmgd->auth_data->timeout = jiffies - 1; + } + ifmgd->auth_data->timeout_started = true; + } else if (ifmgd->assoc_data && + (ieee80211_is_assoc_req(fc) || + ieee80211_is_reassoc_req(fc))) { + if (status_acked) { + ifmgd->assoc_data->timeout = + jiffies + IEEE80211_ASSOC_TIMEOUT_SHORT; + run_again(sdata, ifmgd->assoc_data->timeout); + } else { + ifmgd->assoc_data->timeout = jiffies - 1; + } + ifmgd->assoc_data->timeout_started = true; + } + } + + if (ifmgd->auth_data && ifmgd->auth_data->timeout_started && + time_after(jiffies, ifmgd->auth_data->timeout)) { + if (ifmgd->auth_data->done || ifmgd->auth_data->waiting) { + /* + * ok ... we waited for assoc or continuation but + * userspace didn't do it, so kill the auth data + */ + ieee80211_destroy_auth_data(sdata, false); + } else if (ieee80211_auth(sdata)) { + u8 ap_addr[ETH_ALEN]; + struct ieee80211_event event = { + .type = MLME_EVENT, + .u.mlme.data = AUTH_EVENT, + .u.mlme.status = MLME_TIMEOUT, + }; + + memcpy(ap_addr, ifmgd->auth_data->ap_addr, ETH_ALEN); + + ieee80211_destroy_auth_data(sdata, false); + + cfg80211_auth_timeout(sdata->dev, ap_addr); + drv_event_callback(sdata->local, sdata, &event); + } + } else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started) + run_again(sdata, ifmgd->auth_data->timeout); + + if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started && + time_after(jiffies, ifmgd->assoc_data->timeout)) { + if ((ifmgd->assoc_data->need_beacon && + !sdata->deflink.u.mgd.have_beacon) || + ieee80211_do_assoc(sdata)) { + struct ieee80211_event event = { + .type = MLME_EVENT, + .u.mlme.data = ASSOC_EVENT, + .u.mlme.status = MLME_TIMEOUT, + }; + + ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT); + drv_event_callback(sdata->local, sdata, &event); + } + } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started) + run_again(sdata, ifmgd->assoc_data->timeout); + + if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL && + ifmgd->associated) { + u8 *bssid = sdata->deflink.u.mgd.bssid; + int max_tries; + + if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) + max_tries = max_nullfunc_tries; + else + max_tries = max_probe_tries; + + /* ACK received for nullfunc probing frame */ + if (!ifmgd->probe_send_count) + ieee80211_reset_ap_probe(sdata); + else if (ifmgd->nullfunc_failed) { + if (ifmgd->probe_send_count < max_tries) { + mlme_dbg(sdata, + "No ack for nullfunc frame to AP %pM, try %d/%i\n", + bssid, ifmgd->probe_send_count, + max_tries); + ieee80211_mgd_probe_ap_send(sdata); + } else { + mlme_dbg(sdata, + "No ack for nullfunc frame to AP %pM, disconnecting.\n", + bssid); + ieee80211_sta_connection_lost(sdata, + WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, + false); + } + } else if (time_is_after_jiffies(ifmgd->probe_timeout)) + run_again(sdata, ifmgd->probe_timeout); + else if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { + mlme_dbg(sdata, + "Failed to send nullfunc to AP %pM after %dms, disconnecting\n", + bssid, probe_wait_ms); + ieee80211_sta_connection_lost(sdata, + WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false); + } else if (ifmgd->probe_send_count < max_tries) { + mlme_dbg(sdata, + "No probe response from AP %pM after %dms, try %d/%i\n", + bssid, probe_wait_ms, + ifmgd->probe_send_count, max_tries); + ieee80211_mgd_probe_ap_send(sdata); + } else { + /* + * We actually lost the connection ... or did we? + * Let's make sure! + */ + mlme_dbg(sdata, + "No probe response from AP %pM after %dms, disconnecting.\n", + bssid, probe_wait_ms); + + ieee80211_sta_connection_lost(sdata, + WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false); + } + } + + sdata_unlock(sdata); +} + +static void ieee80211_sta_bcn_mon_timer(struct timer_list *t) +{ + struct ieee80211_sub_if_data *sdata = + from_timer(sdata, t, u.mgd.bcn_mon_timer); + + if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif))) + return; + + if (sdata->vif.bss_conf.csa_active && + !sdata->deflink.u.mgd.csa_waiting_bcn) + return; + + if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER) + return; + + sdata->u.mgd.connection_loss = false; + wiphy_work_queue(sdata->local->hw.wiphy, + &sdata->u.mgd.beacon_connection_loss_work); +} + +static void ieee80211_sta_conn_mon_timer(struct timer_list *t) +{ + struct ieee80211_sub_if_data *sdata = + from_timer(sdata, t, u.mgd.conn_mon_timer); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + unsigned long timeout; + + if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif))) + return; + + if (sdata->vif.bss_conf.csa_active && + !sdata->deflink.u.mgd.csa_waiting_bcn) + return; + + sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr); + if (!sta) + return; + + timeout = sta->deflink.status_stats.last_ack; + if (time_before(sta->deflink.status_stats.last_ack, sta->deflink.rx_stats.last_rx)) + timeout = sta->deflink.rx_stats.last_rx; + timeout += IEEE80211_CONNECTION_IDLE_TIME; + + /* If timeout is after now, then update timer to fire at + * the later date, but do not actually probe at this time. + */ + if (time_is_after_jiffies(timeout)) { + mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(timeout)); + return; + } + + ieee80211_queue_work(&local->hw, &ifmgd->monitor_work); +} + +static void ieee80211_sta_monitor_work(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, + u.mgd.monitor_work); + + ieee80211_mgd_probe_ap(sdata, false); +} + +static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) +{ + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + __ieee80211_stop_poll(sdata); + + /* let's probe the connection once */ + if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) + ieee80211_queue_work(&sdata->local->hw, + &sdata->u.mgd.monitor_work); + } +} + +#ifdef CONFIG_PM +void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + + sdata_lock(sdata); + + if (ifmgd->auth_data || ifmgd->assoc_data) { + const u8 *ap_addr = ifmgd->auth_data ? + ifmgd->auth_data->ap_addr : + ifmgd->assoc_data->ap_addr; + + /* + * If we are trying to authenticate / associate while suspending, + * cfg80211 won't know and won't actually abort those attempts, + * thus we need to do that ourselves. + */ + ieee80211_send_deauth_disassoc(sdata, ap_addr, ap_addr, + IEEE80211_STYPE_DEAUTH, + WLAN_REASON_DEAUTH_LEAVING, + false, frame_buf); + if (ifmgd->assoc_data) + ieee80211_destroy_assoc_data(sdata, ASSOC_ABANDON); + if (ifmgd->auth_data) + ieee80211_destroy_auth_data(sdata, false); + cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, + IEEE80211_DEAUTH_FRAME_LEN, + false); + } + + /* This is a bit of a hack - we should find a better and more generic + * solution to this. Normally when suspending, cfg80211 will in fact + * deauthenticate. However, it doesn't (and cannot) stop an ongoing + * auth (not so important) or assoc (this is the problem) process. + * + * As a consequence, it can happen that we are in the process of both + * associating and suspending, and receive an association response + * after cfg80211 has checked if it needs to disconnect, but before + * we actually set the flag to drop incoming frames. This will then + * cause the workqueue flush to process the association response in + * the suspend, resulting in a successful association just before it + * tries to remove the interface from the driver, which now though + * has a channel context assigned ... this results in issues. + * + * To work around this (for now) simply deauth here again if we're + * now connected. + */ + if (ifmgd->associated && !sdata->local->wowlan) { + u8 bssid[ETH_ALEN]; + struct cfg80211_deauth_request req = { + .reason_code = WLAN_REASON_DEAUTH_LEAVING, + .bssid = bssid, + }; + + memcpy(bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); + ieee80211_mgd_deauth(sdata, &req); + } + + sdata_unlock(sdata); +} +#endif + +void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + sdata_lock(sdata); + if (!ifmgd->associated) { + sdata_unlock(sdata); + return; + } + + if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) { + sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME; + mlme_dbg(sdata, "driver requested disconnect after resume\n"); + ieee80211_sta_connection_lost(sdata, + WLAN_REASON_UNSPECIFIED, + true); + sdata_unlock(sdata); + return; + } + + if (sdata->flags & IEEE80211_SDATA_DISCONNECT_HW_RESTART) { + sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_HW_RESTART; + mlme_dbg(sdata, "driver requested disconnect after hardware restart\n"); + ieee80211_sta_connection_lost(sdata, + WLAN_REASON_UNSPECIFIED, + true); + sdata_unlock(sdata); + return; + } + + sdata_unlock(sdata); +} + +static void ieee80211_request_smps_mgd_work(struct wiphy *wiphy, + struct wiphy_work *work) +{ + struct ieee80211_link_data *link = + container_of(work, struct ieee80211_link_data, + u.mgd.request_smps_work); + + sdata_lock(link->sdata); + __ieee80211_request_smps_mgd(link->sdata, link, + link->u.mgd.driver_smps_mode); + sdata_unlock(link->sdata); +} + +/* interface setup */ +void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); + wiphy_work_init(&ifmgd->beacon_connection_loss_work, + ieee80211_beacon_connection_loss_work); + wiphy_work_init(&ifmgd->csa_connection_drop_work, + ieee80211_csa_connection_drop_work); + INIT_DELAYED_WORK(&ifmgd->tdls_peer_del_work, + ieee80211_tdls_peer_del_work); + wiphy_delayed_work_init(&ifmgd->ml_reconf_work, + ieee80211_ml_reconf_work); + timer_setup(&ifmgd->timer, ieee80211_sta_timer, 0); + timer_setup(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, 0); + timer_setup(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, 0); + INIT_DELAYED_WORK(&ifmgd->tx_tspec_wk, + ieee80211_sta_handle_tspec_ac_params_wk); + + ifmgd->flags = 0; + ifmgd->powersave = sdata->wdev.ps; + ifmgd->uapsd_queues = sdata->local->hw.uapsd_queues; + ifmgd->uapsd_max_sp_len = sdata->local->hw.uapsd_max_sp_len; + /* Setup TDLS data */ + spin_lock_init(&ifmgd->teardown_lock); + ifmgd->teardown_skb = NULL; + ifmgd->orig_teardown_skb = NULL; +} + +void ieee80211_mgd_setup_link(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + unsigned int link_id = link->link_id; + + link->u.mgd.p2p_noa_index = -1; + link->u.mgd.conn_flags = 0; + link->conf->bssid = link->u.mgd.bssid; + + wiphy_work_init(&link->u.mgd.request_smps_work, + ieee80211_request_smps_mgd_work); + if (local->hw.wiphy->features & NL80211_FEATURE_DYNAMIC_SMPS) + link->u.mgd.req_smps = IEEE80211_SMPS_AUTOMATIC; + else + link->u.mgd.req_smps = IEEE80211_SMPS_OFF; + + wiphy_delayed_work_init(&link->u.mgd.chswitch_work, + ieee80211_chswitch_work); + + if (sdata->u.mgd.assoc_data) + ether_addr_copy(link->conf->addr, + sdata->u.mgd.assoc_data->link[link_id].addr); + else if (!is_valid_ether_addr(link->conf->addr)) + eth_random_addr(link->conf->addr); +} + +/* scan finished notification */ +void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + + /* Restart STA timers */ + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (ieee80211_sdata_running(sdata)) + ieee80211_restart_sta_timer(sdata); + } + rcu_read_unlock(); +} + +static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, + struct cfg80211_bss *cbss, s8 link_id, + const u8 *ap_mld_addr, bool assoc, + bool override) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_bss *bss = (void *)cbss->priv; + struct sta_info *new_sta = NULL; + struct ieee80211_link_data *link; + bool have_sta = false; + bool mlo; + int err; + + if (link_id >= 0) { + mlo = true; + if (WARN_ON(!ap_mld_addr)) + return -EINVAL; + err = ieee80211_vif_set_links(sdata, BIT(link_id), 0); + } else { + if (WARN_ON(ap_mld_addr)) + return -EINVAL; + ap_mld_addr = cbss->bssid; + err = ieee80211_vif_set_links(sdata, 0, 0); + link_id = 0; + mlo = false; + } + + if (err) + return err; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (WARN_ON(!link)) { + err = -ENOLINK; + goto out_err; + } + + if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data)) { + err = -EINVAL; + goto out_err; + } + + /* If a reconfig is happening, bail out */ + if (local->in_reconfig) { + err = -EBUSY; + goto out_err; + } + + if (assoc) { + rcu_read_lock(); + have_sta = sta_info_get(sdata, ap_mld_addr); + rcu_read_unlock(); + } + + if (!have_sta) { + if (mlo) + new_sta = sta_info_alloc_with_link(sdata, ap_mld_addr, + link_id, cbss->bssid, + GFP_KERNEL); + else + new_sta = sta_info_alloc(sdata, ap_mld_addr, GFP_KERNEL); + + if (!new_sta) { + err = -ENOMEM; + goto out_err; + } + + new_sta->sta.mlo = mlo; + } + + /* + * Set up the information for the new channel before setting the + * new channel. We can't - completely race-free - change the basic + * rates bitmap and the channel (sband) that it refers to, but if + * we set it up before we at least avoid calling into the driver's + * bss_info_changed() method with invalid information (since we do + * call that from changing the channel - only for IDLE and perhaps + * some others, but ...). + * + * So to avoid that, just set up all the new information before the + * channel, but tell the driver to apply it only afterwards, since + * it might need the new channel for that. + */ + if (new_sta) { + const struct cfg80211_bss_ies *ies; + struct link_sta_info *link_sta; + + rcu_read_lock(); + link_sta = rcu_dereference(new_sta->link[link_id]); + if (WARN_ON(!link_sta)) { + rcu_read_unlock(); + sta_info_free(local, new_sta); + err = -EINVAL; + goto out_err; + } + + err = ieee80211_mgd_setup_link_sta(link, new_sta, + link_sta, cbss); + if (err) { + rcu_read_unlock(); + sta_info_free(local, new_sta); + goto out_err; + } + + memcpy(link->u.mgd.bssid, cbss->bssid, ETH_ALEN); + + /* set timing information */ + link->conf->beacon_int = cbss->beacon_interval; + ies = rcu_dereference(cbss->beacon_ies); + if (ies) { + link->conf->sync_tsf = ies->tsf; + link->conf->sync_device_ts = + bss->device_ts_beacon; + + ieee80211_get_dtim(ies, + &link->conf->sync_dtim_count, + NULL); + } else if (!ieee80211_hw_check(&sdata->local->hw, + TIMING_BEACON_ONLY)) { + ies = rcu_dereference(cbss->proberesp_ies); + /* must be non-NULL since beacon IEs were NULL */ + link->conf->sync_tsf = ies->tsf; + link->conf->sync_device_ts = + bss->device_ts_presp; + link->conf->sync_dtim_count = 0; + } else { + link->conf->sync_tsf = 0; + link->conf->sync_device_ts = 0; + link->conf->sync_dtim_count = 0; + } + rcu_read_unlock(); + } + + if (new_sta || override) { + err = ieee80211_prep_channel(sdata, link, cbss, + &link->u.mgd.conn_flags); + if (err) { + if (new_sta) + sta_info_free(local, new_sta); + goto out_err; + } + } + + if (new_sta) { + /* + * tell driver about BSSID, basic rates and timing + * this was set up above, before setting the channel + */ + ieee80211_link_info_change_notify(sdata, link, + BSS_CHANGED_BSSID | + BSS_CHANGED_BASIC_RATES | + BSS_CHANGED_BEACON_INT); + + if (assoc) + sta_info_pre_move_state(new_sta, IEEE80211_STA_AUTH); + + err = sta_info_insert(new_sta); + new_sta = NULL; + if (err) { + sdata_info(sdata, + "failed to insert STA entry for the AP (error %d)\n", + err); + goto out_err; + } + } else + WARN_ON_ONCE(!ether_addr_equal(link->u.mgd.bssid, cbss->bssid)); + + /* Cancel scan to ensure that nothing interferes with connection */ + if (local->scanning) + ieee80211_scan_cancel(local); + + return 0; + +out_err: + ieee80211_link_release_channel(&sdata->deflink); + ieee80211_vif_set_links(sdata, 0, 0); + return err; +} + +/* config hooks */ +int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, + struct cfg80211_auth_request *req) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_mgd_auth_data *auth_data; + u16 auth_alg; + int err; + bool cont_auth; + + /* prepare auth data structure */ + + switch (req->auth_type) { + case NL80211_AUTHTYPE_OPEN_SYSTEM: + auth_alg = WLAN_AUTH_OPEN; + break; + case NL80211_AUTHTYPE_SHARED_KEY: + if (fips_enabled) + return -EOPNOTSUPP; + auth_alg = WLAN_AUTH_SHARED_KEY; + break; + case NL80211_AUTHTYPE_FT: + auth_alg = WLAN_AUTH_FT; + break; + case NL80211_AUTHTYPE_NETWORK_EAP: + auth_alg = WLAN_AUTH_LEAP; + break; + case NL80211_AUTHTYPE_SAE: + auth_alg = WLAN_AUTH_SAE; + break; + case NL80211_AUTHTYPE_FILS_SK: + auth_alg = WLAN_AUTH_FILS_SK; + break; + case NL80211_AUTHTYPE_FILS_SK_PFS: + auth_alg = WLAN_AUTH_FILS_SK_PFS; + break; + case NL80211_AUTHTYPE_FILS_PK: + auth_alg = WLAN_AUTH_FILS_PK; + break; + default: + return -EOPNOTSUPP; + } + + if (ifmgd->assoc_data) + return -EBUSY; + + auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len + + req->ie_len, GFP_KERNEL); + if (!auth_data) + return -ENOMEM; + + memcpy(auth_data->ap_addr, + req->ap_mld_addr ?: req->bss->bssid, + ETH_ALEN); + auth_data->bss = req->bss; + auth_data->link_id = req->link_id; + + if (req->auth_data_len >= 4) { + if (req->auth_type == NL80211_AUTHTYPE_SAE) { + __le16 *pos = (__le16 *) req->auth_data; + + auth_data->sae_trans = le16_to_cpu(pos[0]); + auth_data->sae_status = le16_to_cpu(pos[1]); + } + memcpy(auth_data->data, req->auth_data + 4, + req->auth_data_len - 4); + auth_data->data_len += req->auth_data_len - 4; + } + + /* Check if continuing authentication or trying to authenticate with the + * same BSS that we were in the process of authenticating with and avoid + * removal and re-addition of the STA entry in + * ieee80211_prep_connection(). + */ + cont_auth = ifmgd->auth_data && req->bss == ifmgd->auth_data->bss && + ifmgd->auth_data->link_id == req->link_id; + + if (req->ie && req->ie_len) { + memcpy(&auth_data->data[auth_data->data_len], + req->ie, req->ie_len); + auth_data->data_len += req->ie_len; + } + + if (req->key && req->key_len) { + auth_data->key_len = req->key_len; + auth_data->key_idx = req->key_idx; + memcpy(auth_data->key, req->key, req->key_len); + } + + auth_data->algorithm = auth_alg; + + /* try to authenticate/probe */ + + if (ifmgd->auth_data) { + if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE) { + auth_data->peer_confirmed = + ifmgd->auth_data->peer_confirmed; + } + ieee80211_destroy_auth_data(sdata, cont_auth); + } + + /* prep auth_data so we don't go into idle on disassoc */ + ifmgd->auth_data = auth_data; + + /* If this is continuation of an ongoing SAE authentication exchange + * (i.e., request to send SAE Confirm) and the peer has already + * confirmed, mark authentication completed since we are about to send + * out SAE Confirm. + */ + if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE && + auth_data->peer_confirmed && auth_data->sae_trans == 2) + ieee80211_mark_sta_auth(sdata); + + if (ifmgd->associated) { + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + + sdata_info(sdata, + "disconnect from AP %pM for new auth to %pM\n", + sdata->vif.cfg.ap_addr, auth_data->ap_addr); + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, + WLAN_REASON_UNSPECIFIED, + false, frame_buf); + + ieee80211_report_disconnect(sdata, frame_buf, + sizeof(frame_buf), true, + WLAN_REASON_UNSPECIFIED, + false); + } + + sdata_info(sdata, "authenticate with %pM\n", auth_data->ap_addr); + + /* needed for transmitting the auth frame(s) properly */ + memcpy(sdata->vif.cfg.ap_addr, auth_data->ap_addr, ETH_ALEN); + + err = ieee80211_prep_connection(sdata, req->bss, req->link_id, + req->ap_mld_addr, cont_auth, false); + if (err) + goto err_clear; + + err = ieee80211_auth(sdata); + if (err) { + sta_info_destroy_addr(sdata, auth_data->ap_addr); + goto err_clear; + } + + /* hold our own reference */ + cfg80211_ref_bss(local->hw.wiphy, auth_data->bss); + return 0; + + err_clear: + if (!ieee80211_vif_is_mld(&sdata->vif)) { + eth_zero_addr(sdata->deflink.u.mgd.bssid); + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_BSSID); + mutex_lock(&sdata->local->mtx); + ieee80211_link_release_channel(&sdata->deflink); + mutex_unlock(&sdata->local->mtx); + } + ifmgd->auth_data = NULL; + kfree(auth_data); + return err; +} + +static ieee80211_conn_flags_t +ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgd_assoc_data *assoc_data, + struct cfg80211_assoc_request *req, + ieee80211_conn_flags_t conn_flags, + unsigned int link_id) +{ + struct ieee80211_local *local = sdata->local; + const struct cfg80211_bss_ies *beacon_ies; + struct ieee80211_supported_band *sband; + const struct element *ht_elem, *vht_elem; + struct ieee80211_link_data *link; + struct cfg80211_bss *cbss; + struct ieee80211_bss *bss; + bool is_5ghz, is_6ghz; + + cbss = assoc_data->link[link_id].bss; + if (WARN_ON(!cbss)) + return 0; + + bss = (void *)cbss->priv; + + sband = local->hw.wiphy->bands[cbss->channel->band]; + if (WARN_ON(!sband)) + return 0; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (WARN_ON(!link)) + return 0; + + is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ; + is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ; + + /* for MLO connections assume advertising all rates is OK */ + if (!req->ap_mld_addr) { + assoc_data->supp_rates = bss->supp_rates; + assoc_data->supp_rates_len = bss->supp_rates_len; + } + + /* copy and link elems for the STA profile */ + if (req->links[link_id].elems_len) { + memcpy(assoc_data->ie_pos, req->links[link_id].elems, + req->links[link_id].elems_len); + assoc_data->link[link_id].elems = assoc_data->ie_pos; + assoc_data->link[link_id].elems_len = req->links[link_id].elems_len; + assoc_data->ie_pos += req->links[link_id].elems_len; + } + + rcu_read_lock(); + ht_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_OPERATION); + if (ht_elem && ht_elem->datalen >= sizeof(struct ieee80211_ht_operation)) + assoc_data->link[link_id].ap_ht_param = + ((struct ieee80211_ht_operation *)(ht_elem->data))->ht_param; + else if (!is_6ghz) + conn_flags |= IEEE80211_CONN_DISABLE_HT; + vht_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY); + if (vht_elem && vht_elem->datalen >= sizeof(struct ieee80211_vht_cap)) { + memcpy(&assoc_data->link[link_id].ap_vht_cap, vht_elem->data, + sizeof(struct ieee80211_vht_cap)); + } else if (is_5ghz) { + link_info(link, + "VHT capa missing/short, disabling VHT/HE/EHT\n"); + conn_flags |= IEEE80211_CONN_DISABLE_VHT | + IEEE80211_CONN_DISABLE_HE | + IEEE80211_CONN_DISABLE_EHT; + } + rcu_read_unlock(); + + link->u.mgd.beacon_crc_valid = false; + link->u.mgd.dtim_period = 0; + link->u.mgd.have_beacon = false; + + /* override HT/VHT configuration only if the AP and we support it */ + if (!(conn_flags & IEEE80211_CONN_DISABLE_HT)) { + struct ieee80211_sta_ht_cap sta_ht_cap; + + memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap)); + ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap); + } + + link->conf->eht_puncturing = 0; + + rcu_read_lock(); + beacon_ies = rcu_dereference(cbss->beacon_ies); + if (beacon_ies) { + const struct ieee80211_eht_operation *eht_oper; + const struct element *elem; + u8 dtim_count = 0; + + ieee80211_get_dtim(beacon_ies, &dtim_count, + &link->u.mgd.dtim_period); + + sdata->deflink.u.mgd.have_beacon = true; + + if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) { + link->conf->sync_tsf = beacon_ies->tsf; + link->conf->sync_device_ts = bss->device_ts_beacon; + link->conf->sync_dtim_count = dtim_count; + } + + elem = cfg80211_find_ext_elem(WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION, + beacon_ies->data, beacon_ies->len); + if (elem && elem->datalen >= 3) + link->conf->profile_periodicity = elem->data[2]; + else + link->conf->profile_periodicity = 0; + + elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, + beacon_ies->data, beacon_ies->len); + if (elem && elem->datalen >= 11 && + (elem->data[10] & WLAN_EXT_CAPA11_EMA_SUPPORT)) + link->conf->ema_ap = true; + else + link->conf->ema_ap = false; + + elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_OPERATION, + beacon_ies->data, beacon_ies->len); + eht_oper = (const void *)(elem->data + 1); + + if (elem && + ieee80211_eht_oper_size_ok((const void *)(elem->data + 1), + elem->datalen - 1) && + (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) && + (eht_oper->params & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)) { + const struct ieee80211_eht_operation_info *info = + (void *)eht_oper->optional; + const u8 *disable_subchannel_bitmap = info->optional; + u16 bitmap; + + bitmap = get_unaligned_le16(disable_subchannel_bitmap); + if (cfg80211_valid_disable_subchannel_bitmap(&bitmap, + &link->conf->chandef)) + ieee80211_handle_puncturing_bitmap(link, + eht_oper, + bitmap, + NULL); + else + conn_flags |= IEEE80211_CONN_DISABLE_EHT; + } + } + rcu_read_unlock(); + + if (bss->corrupt_data) { + char *corrupt_type = "data"; + + if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_BEACON) { + if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP) + corrupt_type = "beacon and probe response"; + else + corrupt_type = "beacon"; + } else if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP) { + corrupt_type = "probe response"; + } + sdata_info(sdata, "associating to AP %pM with corrupt %s\n", + cbss->bssid, corrupt_type); + } + + if (link->u.mgd.req_smps == IEEE80211_SMPS_AUTOMATIC) { + if (sdata->u.mgd.powersave) + link->smps_mode = IEEE80211_SMPS_DYNAMIC; + else + link->smps_mode = IEEE80211_SMPS_OFF; + } else { + link->smps_mode = link->u.mgd.req_smps; + } + + return conn_flags; +} + +int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, + struct cfg80211_assoc_request *req) +{ + unsigned int assoc_link_id = req->link_id < 0 ? 0 : req->link_id; + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_mgd_assoc_data *assoc_data; + const struct element *ssid_elem; + struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg; + ieee80211_conn_flags_t conn_flags = 0; + struct ieee80211_link_data *link; + struct cfg80211_bss *cbss; + struct ieee80211_bss *bss; + bool override; + int i, err; + size_t size = sizeof(*assoc_data) + req->ie_len; + + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) + size += req->links[i].elems_len; + + /* FIXME: no support for 4-addr MLO yet */ + if (sdata->u.mgd.use_4addr && req->link_id >= 0) + return -EOPNOTSUPP; + + assoc_data = kzalloc(size, GFP_KERNEL); + if (!assoc_data) + return -ENOMEM; + + cbss = req->link_id < 0 ? req->bss : req->links[req->link_id].bss; + + rcu_read_lock(); + ssid_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_SSID); + if (!ssid_elem || ssid_elem->datalen > sizeof(assoc_data->ssid)) { + rcu_read_unlock(); + kfree(assoc_data); + return -EINVAL; + } + memcpy(assoc_data->ssid, ssid_elem->data, ssid_elem->datalen); + assoc_data->ssid_len = ssid_elem->datalen; + memcpy(vif_cfg->ssid, assoc_data->ssid, assoc_data->ssid_len); + vif_cfg->ssid_len = assoc_data->ssid_len; + rcu_read_unlock(); + + if (req->ap_mld_addr) { + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { + if (!req->links[i].bss) + continue; + link = sdata_dereference(sdata->link[i], sdata); + if (link) + ether_addr_copy(assoc_data->link[i].addr, + link->conf->addr); + else + eth_random_addr(assoc_data->link[i].addr); + } + } else { + memcpy(assoc_data->link[0].addr, sdata->vif.addr, ETH_ALEN); + } + + assoc_data->s1g = cbss->channel->band == NL80211_BAND_S1GHZ; + + memcpy(assoc_data->ap_addr, + req->ap_mld_addr ?: req->bss->bssid, + ETH_ALEN); + + if (ifmgd->associated) { + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + + sdata_info(sdata, + "disconnect from AP %pM for new assoc to %pM\n", + sdata->vif.cfg.ap_addr, assoc_data->ap_addr); + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, + WLAN_REASON_UNSPECIFIED, + false, frame_buf); + + ieee80211_report_disconnect(sdata, frame_buf, + sizeof(frame_buf), true, + WLAN_REASON_UNSPECIFIED, + false); + } + + if (ifmgd->auth_data && !ifmgd->auth_data->done) { + err = -EBUSY; + goto err_free; + } + + if (ifmgd->assoc_data) { + err = -EBUSY; + goto err_free; + } + + if (ifmgd->auth_data) { + bool match; + + /* keep sta info, bssid if matching */ + match = ether_addr_equal(ifmgd->auth_data->ap_addr, + assoc_data->ap_addr) && + ifmgd->auth_data->link_id == req->link_id; + ieee80211_destroy_auth_data(sdata, match); + } + + /* prepare assoc data */ + + bss = (void *)cbss->priv; + assoc_data->wmm = bss->wmm_used && + (local->hw.queues >= IEEE80211_NUM_ACS); + + /* + * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode. + * We still associate in non-HT mode (11a/b/g) if any one of these + * ciphers is configured as pairwise. + * We can set this to true for non-11n hardware, that'll be checked + * separately along with the peer capabilities. + */ + for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) { + if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || + req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP || + req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) { + conn_flags |= IEEE80211_CONN_DISABLE_HT; + conn_flags |= IEEE80211_CONN_DISABLE_VHT; + conn_flags |= IEEE80211_CONN_DISABLE_HE; + conn_flags |= IEEE80211_CONN_DISABLE_EHT; + netdev_info(sdata->dev, + "disabling HT/VHT/HE due to WEP/TKIP use\n"); + } + } + + /* also disable HT/VHT/HE/EHT if the AP doesn't use WMM */ + if (!bss->wmm_used) { + conn_flags |= IEEE80211_CONN_DISABLE_HT; + conn_flags |= IEEE80211_CONN_DISABLE_VHT; + conn_flags |= IEEE80211_CONN_DISABLE_HE; + conn_flags |= IEEE80211_CONN_DISABLE_EHT; + netdev_info(sdata->dev, + "disabling HT/VHT/HE as WMM/QoS is not supported by the AP\n"); + } + + if (req->flags & ASSOC_REQ_DISABLE_HT) { + mlme_dbg(sdata, "HT disabled by flag, disabling HT/VHT/HE\n"); + conn_flags |= IEEE80211_CONN_DISABLE_HT; + conn_flags |= IEEE80211_CONN_DISABLE_VHT; + conn_flags |= IEEE80211_CONN_DISABLE_HE; + conn_flags |= IEEE80211_CONN_DISABLE_EHT; + } + + if (req->flags & ASSOC_REQ_DISABLE_VHT) { + mlme_dbg(sdata, "VHT disabled by flag, disabling VHT\n"); + conn_flags |= IEEE80211_CONN_DISABLE_VHT; + } + + if (req->flags & ASSOC_REQ_DISABLE_HE) { + mlme_dbg(sdata, "HE disabled by flag, disabling HE/EHT\n"); + conn_flags |= IEEE80211_CONN_DISABLE_HE; + conn_flags |= IEEE80211_CONN_DISABLE_EHT; + } + + if (req->flags & ASSOC_REQ_DISABLE_EHT) + conn_flags |= IEEE80211_CONN_DISABLE_EHT; + + memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa)); + memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask, + sizeof(ifmgd->ht_capa_mask)); + + memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa)); + memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask, + sizeof(ifmgd->vht_capa_mask)); + + memcpy(&ifmgd->s1g_capa, &req->s1g_capa, sizeof(ifmgd->s1g_capa)); + memcpy(&ifmgd->s1g_capa_mask, &req->s1g_capa_mask, + sizeof(ifmgd->s1g_capa_mask)); + + if (req->ie && req->ie_len) { + memcpy(assoc_data->ie, req->ie, req->ie_len); + assoc_data->ie_len = req->ie_len; + assoc_data->ie_pos = assoc_data->ie + assoc_data->ie_len; + } else { + assoc_data->ie_pos = assoc_data->ie; + } + + if (req->fils_kek) { + /* should already be checked in cfg80211 - so warn */ + if (WARN_ON(req->fils_kek_len > FILS_MAX_KEK_LEN)) { + err = -EINVAL; + goto err_free; + } + memcpy(assoc_data->fils_kek, req->fils_kek, + req->fils_kek_len); + assoc_data->fils_kek_len = req->fils_kek_len; + } + + if (req->fils_nonces) + memcpy(assoc_data->fils_nonces, req->fils_nonces, + 2 * FILS_NONCE_LEN); + + /* default timeout */ + assoc_data->timeout = jiffies; + assoc_data->timeout_started = true; + + assoc_data->assoc_link_id = assoc_link_id; + + if (req->ap_mld_addr) { + for (i = 0; i < ARRAY_SIZE(assoc_data->link); i++) { + assoc_data->link[i].conn_flags = conn_flags; + assoc_data->link[i].bss = req->links[i].bss; + assoc_data->link[i].disabled = req->links[i].disabled; + } + + /* if there was no authentication, set up the link */ + err = ieee80211_vif_set_links(sdata, BIT(assoc_link_id), 0); + if (err) + goto err_clear; + } else { + assoc_data->link[0].conn_flags = conn_flags; + assoc_data->link[0].bss = cbss; + } + + link = sdata_dereference(sdata->link[assoc_link_id], sdata); + if (WARN_ON(!link)) { + err = -EINVAL; + goto err_clear; + } + + /* keep old conn_flags from ieee80211_prep_channel() from auth */ + conn_flags |= link->u.mgd.conn_flags; + conn_flags |= ieee80211_setup_assoc_link(sdata, assoc_data, req, + conn_flags, assoc_link_id); + override = link->u.mgd.conn_flags != conn_flags; + link->u.mgd.conn_flags |= conn_flags; + + if (WARN((sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD) && + ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK), + "U-APSD not supported with HW_PS_NULLFUNC_STACK\n")) + sdata->vif.driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; + + if (bss->wmm_used && bss->uapsd_supported && + (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD)) { + assoc_data->uapsd = true; + ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED; + } else { + assoc_data->uapsd = false; + ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED; + } + + if (req->prev_bssid) + memcpy(assoc_data->prev_ap_addr, req->prev_bssid, ETH_ALEN); + + if (req->use_mfp) { + ifmgd->mfp = IEEE80211_MFP_REQUIRED; + ifmgd->flags |= IEEE80211_STA_MFP_ENABLED; + } else { + ifmgd->mfp = IEEE80211_MFP_DISABLED; + ifmgd->flags &= ~IEEE80211_STA_MFP_ENABLED; + } + + if (req->flags & ASSOC_REQ_USE_RRM) + ifmgd->flags |= IEEE80211_STA_ENABLE_RRM; + else + ifmgd->flags &= ~IEEE80211_STA_ENABLE_RRM; + + if (req->crypto.control_port) + ifmgd->flags |= IEEE80211_STA_CONTROL_PORT; + else + ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT; + + sdata->control_port_protocol = req->crypto.control_port_ethertype; + sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt; + sdata->control_port_over_nl80211 = + req->crypto.control_port_over_nl80211; + sdata->control_port_no_preauth = req->crypto.control_port_no_preauth; + + /* kick off associate process */ + ifmgd->assoc_data = assoc_data; + + for (i = 0; i < ARRAY_SIZE(assoc_data->link); i++) { + if (!assoc_data->link[i].bss) + continue; + if (i == assoc_data->assoc_link_id) + continue; + /* only calculate the flags, hence link == NULL */ + err = ieee80211_prep_channel(sdata, NULL, assoc_data->link[i].bss, + &assoc_data->link[i].conn_flags); + if (err) + goto err_clear; + } + + /* needed for transmitting the assoc frames properly */ + memcpy(sdata->vif.cfg.ap_addr, assoc_data->ap_addr, ETH_ALEN); + + err = ieee80211_prep_connection(sdata, cbss, req->link_id, + req->ap_mld_addr, true, override); + if (err) + goto err_clear; + + assoc_data->link[assoc_data->assoc_link_id].conn_flags = + link->u.mgd.conn_flags; + + if (ieee80211_hw_check(&sdata->local->hw, NEED_DTIM_BEFORE_ASSOC)) { + const struct cfg80211_bss_ies *beacon_ies; + + rcu_read_lock(); + beacon_ies = rcu_dereference(req->bss->beacon_ies); + + if (beacon_ies) { + /* + * Wait up to one beacon interval ... + * should this be more if we miss one? + */ + sdata_info(sdata, "waiting for beacon from %pM\n", + link->u.mgd.bssid); + assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval); + assoc_data->timeout_started = true; + assoc_data->need_beacon = true; + } + rcu_read_unlock(); + } + + run_again(sdata, assoc_data->timeout); + + return 0; + err_clear: + eth_zero_addr(sdata->deflink.u.mgd.bssid); + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_BSSID); + ifmgd->assoc_data = NULL; + err_free: + kfree(assoc_data); + return err; +} + +int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, + struct cfg80211_deauth_request *req) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + bool tx = !req->local_state_change; + struct ieee80211_prep_tx_info info = { + .subtype = IEEE80211_STYPE_DEAUTH, + }; + + if (ifmgd->auth_data && + ether_addr_equal(ifmgd->auth_data->ap_addr, req->bssid)) { + sdata_info(sdata, + "aborting authentication with %pM by local choice (Reason: %u=%s)\n", + req->bssid, req->reason_code, + ieee80211_get_reason_code_string(req->reason_code)); + + drv_mgd_prepare_tx(sdata->local, sdata, &info); + ieee80211_send_deauth_disassoc(sdata, req->bssid, req->bssid, + IEEE80211_STYPE_DEAUTH, + req->reason_code, tx, + frame_buf); + ieee80211_destroy_auth_data(sdata, false); + ieee80211_report_disconnect(sdata, frame_buf, + sizeof(frame_buf), true, + req->reason_code, false); + drv_mgd_complete_tx(sdata->local, sdata, &info); + return 0; + } + + if (ifmgd->assoc_data && + ether_addr_equal(ifmgd->assoc_data->ap_addr, req->bssid)) { + sdata_info(sdata, + "aborting association with %pM by local choice (Reason: %u=%s)\n", + req->bssid, req->reason_code, + ieee80211_get_reason_code_string(req->reason_code)); + + drv_mgd_prepare_tx(sdata->local, sdata, &info); + ieee80211_send_deauth_disassoc(sdata, req->bssid, req->bssid, + IEEE80211_STYPE_DEAUTH, + req->reason_code, tx, + frame_buf); + ieee80211_destroy_assoc_data(sdata, ASSOC_ABANDON); + ieee80211_report_disconnect(sdata, frame_buf, + sizeof(frame_buf), true, + req->reason_code, false); + return 0; + } + + if (ifmgd->associated && + ether_addr_equal(sdata->vif.cfg.ap_addr, req->bssid)) { + sdata_info(sdata, + "deauthenticating from %pM by local choice (Reason: %u=%s)\n", + req->bssid, req->reason_code, + ieee80211_get_reason_code_string(req->reason_code)); + + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, + req->reason_code, tx, frame_buf); + ieee80211_report_disconnect(sdata, frame_buf, + sizeof(frame_buf), true, + req->reason_code, false); + drv_mgd_complete_tx(sdata->local, sdata, &info); + return 0; + } + + return -ENOTCONN; +} + +int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, + struct cfg80211_disassoc_request *req) +{ + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + + if (!sdata->u.mgd.associated || + memcmp(sdata->vif.cfg.ap_addr, req->ap_addr, ETH_ALEN)) + return -ENOTCONN; + + sdata_info(sdata, + "disassociating from %pM by local choice (Reason: %u=%s)\n", + req->ap_addr, req->reason_code, + ieee80211_get_reason_code_string(req->reason_code)); + + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC, + req->reason_code, !req->local_state_change, + frame_buf); + + ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, + req->reason_code, false); + + return 0; +} + +void ieee80211_mgd_stop_link(struct ieee80211_link_data *link) +{ + wiphy_work_cancel(link->sdata->local->hw.wiphy, + &link->u.mgd.request_smps_work); + wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy, + &link->u.mgd.chswitch_work); +} + +void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + /* + * Make sure some work items will not run after this, + * they will not do anything but might not have been + * cancelled when disconnecting. + */ + cancel_work_sync(&ifmgd->monitor_work); + wiphy_work_cancel(sdata->local->hw.wiphy, + &ifmgd->beacon_connection_loss_work); + wiphy_work_cancel(sdata->local->hw.wiphy, + &ifmgd->csa_connection_drop_work); + cancel_delayed_work_sync(&ifmgd->tdls_peer_del_work); + wiphy_delayed_work_cancel(sdata->local->hw.wiphy, + &ifmgd->ml_reconf_work); + + sdata_lock(sdata); + if (ifmgd->assoc_data) + ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT); + if (ifmgd->auth_data) + ieee80211_destroy_auth_data(sdata, false); + spin_lock_bh(&ifmgd->teardown_lock); + if (ifmgd->teardown_skb) { + kfree_skb(ifmgd->teardown_skb); + ifmgd->teardown_skb = NULL; + ifmgd->orig_teardown_skb = NULL; + } + kfree(ifmgd->assoc_req_ies); + ifmgd->assoc_req_ies = NULL; + ifmgd->assoc_req_ies_len = 0; + spin_unlock_bh(&ifmgd->teardown_lock); + del_timer_sync(&ifmgd->timer); + sdata_unlock(sdata); +} + +void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, + enum nl80211_cqm_rssi_threshold_event rssi_event, + s32 rssi_level, + gfp_t gfp) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + trace_api_cqm_rssi_notify(sdata, rssi_event, rssi_level); + + cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, rssi_level, gfp); +} +EXPORT_SYMBOL(ieee80211_cqm_rssi_notify); + +void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + trace_api_cqm_beacon_loss_notify(sdata->local, sdata); + + cfg80211_cqm_beacon_loss_notify(sdata->dev, gfp); +} +EXPORT_SYMBOL(ieee80211_cqm_beacon_loss_notify); + +static void _ieee80211_enable_rssi_reports(struct ieee80211_sub_if_data *sdata, + int rssi_min_thold, + int rssi_max_thold) +{ + trace_api_enable_rssi_reports(sdata, rssi_min_thold, rssi_max_thold); + + if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) + return; + + /* + * Scale up threshold values before storing it, as the RSSI averaging + * algorithm uses a scaled up value as well. Change this scaling + * factor if the RSSI averaging algorithm changes. + */ + sdata->u.mgd.rssi_min_thold = rssi_min_thold*16; + sdata->u.mgd.rssi_max_thold = rssi_max_thold*16; +} + +void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif, + int rssi_min_thold, + int rssi_max_thold) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + WARN_ON(rssi_min_thold == rssi_max_thold || + rssi_min_thold > rssi_max_thold); + + _ieee80211_enable_rssi_reports(sdata, rssi_min_thold, + rssi_max_thold); +} +EXPORT_SYMBOL(ieee80211_enable_rssi_reports); + +void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + _ieee80211_enable_rssi_reports(sdata, 0, 0); +} +EXPORT_SYMBOL(ieee80211_disable_rssi_reports); diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c new file mode 100644 index 0000000000..b44896e145 --- /dev/null +++ b/net/mac80211/ocb.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * OCB mode implementation + * + * Copyright: (c) 2014 Czech Technical University in Prague + * (c) 2014 Volkswagen Group Research + * Copyright (C) 2022 - 2023 Intel Corporation + * Author: Rostislav Lisovy <rostislav.lisovy@fel.cvut.cz> + * Funded by: Volkswagen Group Research + */ + +#include <linux/delay.h> +#include <linux/if_ether.h> +#include <linux/skbuff.h> +#include <linux/if_arp.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <net/mac80211.h> +#include <asm/unaligned.h> + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" + +#define IEEE80211_OCB_HOUSEKEEPING_INTERVAL (60 * HZ) +#define IEEE80211_OCB_PEER_INACTIVITY_LIMIT (240 * HZ) +#define IEEE80211_OCB_MAX_STA_ENTRIES 128 + +/** + * enum ocb_deferred_task_flags - mac80211 OCB deferred tasks + * @OCB_WORK_HOUSEKEEPING: run the periodic OCB housekeeping tasks + * + * These flags are used in @wrkq_flags field of &struct ieee80211_if_ocb + */ +enum ocb_deferred_task_flags { + OCB_WORK_HOUSEKEEPING, +}; + +void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata, + const u8 *bssid, const u8 *addr, + u32 supp_rates) +{ + struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_supported_band *sband; + enum nl80211_bss_scan_width scan_width; + struct sta_info *sta; + int band; + + /* XXX: Consider removing the least recently used entry and + * allow new one to be added. + */ + if (local->num_sta >= IEEE80211_OCB_MAX_STA_ENTRIES) { + net_info_ratelimited("%s: No room for a new OCB STA entry %pM\n", + sdata->name, addr); + return; + } + + ocb_dbg(sdata, "Adding new OCB station %pM\n", addr); + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + if (WARN_ON_ONCE(!chanctx_conf)) { + rcu_read_unlock(); + return; + } + band = chanctx_conf->def.chan->band; + scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def); + rcu_read_unlock(); + + sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); + if (!sta) + return; + + /* Add only mandatory rates for now */ + sband = local->hw.wiphy->bands[band]; + sta->sta.deflink.supp_rates[band] = + ieee80211_mandatory_rates(sband, scan_width); + + spin_lock(&ifocb->incomplete_lock); + list_add(&sta->list, &ifocb->incomplete_stations); + spin_unlock(&ifocb->incomplete_lock); + wiphy_work_queue(local->hw.wiphy, &sdata->work); +} + +static struct sta_info *ieee80211_ocb_finish_sta(struct sta_info *sta) + __acquires(RCU) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + u8 addr[ETH_ALEN]; + + memcpy(addr, sta->sta.addr, ETH_ALEN); + + ocb_dbg(sdata, "Adding new IBSS station %pM (dev=%s)\n", + addr, sdata->name); + + sta_info_move_state(sta, IEEE80211_STA_AUTH); + sta_info_move_state(sta, IEEE80211_STA_ASSOC); + sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); + + rate_control_rate_init(sta); + + /* If it fails, maybe we raced another insertion? */ + if (sta_info_insert_rcu(sta)) + return sta_info_get(sdata, addr); + return sta; +} + +static void ieee80211_ocb_housekeeping(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; + + ocb_dbg(sdata, "Running ocb housekeeping\n"); + + ieee80211_sta_expire(sdata, IEEE80211_OCB_PEER_INACTIVITY_LIMIT); + + mod_timer(&ifocb->housekeeping_timer, + round_jiffies(jiffies + IEEE80211_OCB_HOUSEKEEPING_INTERVAL)); +} + +void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; + struct sta_info *sta; + + if (ifocb->joined != true) + return; + + sdata_lock(sdata); + + spin_lock_bh(&ifocb->incomplete_lock); + while (!list_empty(&ifocb->incomplete_stations)) { + sta = list_first_entry(&ifocb->incomplete_stations, + struct sta_info, list); + list_del(&sta->list); + spin_unlock_bh(&ifocb->incomplete_lock); + + ieee80211_ocb_finish_sta(sta); + rcu_read_unlock(); + spin_lock_bh(&ifocb->incomplete_lock); + } + spin_unlock_bh(&ifocb->incomplete_lock); + + if (test_and_clear_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags)) + ieee80211_ocb_housekeeping(sdata); + + sdata_unlock(sdata); +} + +static void ieee80211_ocb_housekeeping_timer(struct timer_list *t) +{ + struct ieee80211_sub_if_data *sdata = + from_timer(sdata, t, u.ocb.housekeeping_timer); + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; + + set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags); + + wiphy_work_queue(local->hw.wiphy, &sdata->work); +} + +void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; + + timer_setup(&ifocb->housekeeping_timer, + ieee80211_ocb_housekeeping_timer, 0); + INIT_LIST_HEAD(&ifocb->incomplete_stations); + spin_lock_init(&ifocb->incomplete_lock); +} + +int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata, + struct ocb_setup *setup) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; + u64 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID; + int err; + + if (ifocb->joined == true) + return -EINVAL; + + sdata->deflink.operating_11g_mode = true; + sdata->deflink.smps_mode = IEEE80211_SMPS_OFF; + sdata->deflink.needed_rx_chains = sdata->local->rx_chains; + + mutex_lock(&sdata->local->mtx); + err = ieee80211_link_use_channel(&sdata->deflink, &setup->chandef, + IEEE80211_CHANCTX_SHARED); + mutex_unlock(&sdata->local->mtx); + if (err) + return err; + + ieee80211_bss_info_change_notify(sdata, changed); + + ifocb->joined = true; + + set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags); + wiphy_work_queue(local->hw.wiphy, &sdata->work); + + netif_carrier_on(sdata->dev); + return 0; +} + +int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + ifocb->joined = false; + sta_info_flush(sdata); + + spin_lock_bh(&ifocb->incomplete_lock); + while (!list_empty(&ifocb->incomplete_stations)) { + sta = list_first_entry(&ifocb->incomplete_stations, + struct sta_info, list); + list_del(&sta->list); + spin_unlock_bh(&ifocb->incomplete_lock); + + sta_info_free(local, sta); + spin_lock_bh(&ifocb->incomplete_lock); + } + spin_unlock_bh(&ifocb->incomplete_lock); + + netif_carrier_off(sdata->dev); + clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_OCB); + + mutex_lock(&sdata->local->mtx); + ieee80211_link_release_channel(&sdata->deflink); + mutex_unlock(&sdata->local->mtx); + + skb_queue_purge(&sdata->skb_queue); + + del_timer_sync(&sdata->u.ocb.housekeeping_timer); + /* If the timer fired while we waited for it, it will have + * requeued the work. Now the work will be running again + * but will not rearm the timer again because it checks + * whether we are connected to the network or not -- at this + * point we shouldn't be anymore. + */ + + return 0; +} diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c new file mode 100644 index 0000000000..5bedd9cef4 --- /dev/null +++ b/net/mac80211/offchannel.c @@ -0,0 +1,1030 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Off-channel operation helpers + * + * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> + * Copyright 2004, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2007, Michael Wu <flamingice@sourmilk.net> + * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> + * Copyright (C) 2019, 2022-2023 Intel Corporation + */ +#include <linux/export.h> +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "driver-ops.h" + +/* + * Tell our hardware to disable PS. + * Optionally inform AP that we will go to sleep so that it will buffer + * the frames while we are doing off-channel work. This is optional + * because we *may* be doing work on-operating channel, and want our + * hardware unconditionally awake, but still let the AP send us normal frames. + */ +static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + bool offchannel_ps_enabled = false; + + /* FIXME: what to do when local->pspolling is true? */ + + del_timer_sync(&local->dynamic_ps_timer); + del_timer_sync(&ifmgd->bcn_mon_timer); + del_timer_sync(&ifmgd->conn_mon_timer); + + cancel_work_sync(&local->dynamic_ps_enable_work); + + if (local->hw.conf.flags & IEEE80211_CONF_PS) { + offchannel_ps_enabled = true; + local->hw.conf.flags &= ~IEEE80211_CONF_PS; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + } + + if (!offchannel_ps_enabled || + !ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) + /* + * If power save was enabled, no need to send a nullfunc + * frame because AP knows that we are sleeping. But if the + * hardware is creating the nullfunc frame for power save + * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not + * enabled) and power save was enabled, the firmware just + * sent a null frame with power save disabled. So we need + * to send a new nullfunc frame to inform the AP that we + * are again sleeping. + */ + ieee80211_send_nullfunc(local, sdata, true); +} + +/* inform AP that we are awake again */ +static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + + if (!local->ps_sdata) + ieee80211_send_nullfunc(local, sdata, false); + else if (local->hw.conf.dynamic_ps_timeout > 0) { + /* + * the dynamic_ps_timer had been running before leaving the + * operating channel, restart the timer now and send a nullfunc + * frame to inform the AP that we are awake so that AP sends + * the buffered packets (if any). + */ + ieee80211_send_nullfunc(local, sdata, false); + mod_timer(&local->dynamic_ps_timer, jiffies + + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); + } + + ieee80211_sta_reset_beacon_monitor(sdata); + ieee80211_sta_reset_conn_monitor(sdata); +} + +void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + + if (WARN_ON(local->use_chanctx)) + return; + + /* + * notify the AP about us leaving the channel and stop all + * STA interfaces. + */ + + /* + * Stop queues and transmit all frames queued by the driver + * before sending nullfunc to enable powersave at the AP. + */ + ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, + IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL, + false); + ieee80211_flush_queues(local, NULL, false); + + mutex_lock(&local->iflist_mtx); + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + + if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE || + sdata->vif.type == NL80211_IFTYPE_NAN) + continue; + + if (sdata->vif.type != NL80211_IFTYPE_MONITOR) + set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); + + /* Check to see if we should disable beaconing. */ + if (sdata->vif.bss_conf.enable_beacon) { + set_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, + &sdata->state); + sdata->vif.bss_conf.enable_beacon = false; + ieee80211_link_info_change_notify( + sdata, &sdata->deflink, + BSS_CHANGED_BEACON_ENABLED); + } + + if (sdata->vif.type == NL80211_IFTYPE_STATION && + sdata->u.mgd.associated) + ieee80211_offchannel_ps_enable(sdata); + } + mutex_unlock(&local->iflist_mtx); +} + +void ieee80211_offchannel_return(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + + if (WARN_ON(local->use_chanctx)) + return; + + mutex_lock(&local->iflist_mtx); + list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) + continue; + + if (sdata->vif.type != NL80211_IFTYPE_MONITOR) + clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); + + if (!ieee80211_sdata_running(sdata)) + continue; + + /* Tell AP we're back */ + if (sdata->vif.type == NL80211_IFTYPE_STATION && + sdata->u.mgd.associated) + ieee80211_offchannel_ps_disable(sdata); + + if (test_and_clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, + &sdata->state)) { + sdata->vif.bss_conf.enable_beacon = true; + ieee80211_link_info_change_notify( + sdata, &sdata->deflink, + BSS_CHANGED_BEACON_ENABLED); + } + } + mutex_unlock(&local->iflist_mtx); + + ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, + IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL, + false); +} + +static void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) +{ + /* was never transmitted */ + if (roc->frame) { + cfg80211_mgmt_tx_status(&roc->sdata->wdev, roc->mgmt_tx_cookie, + roc->frame->data, roc->frame->len, + false, GFP_KERNEL); + ieee80211_free_txskb(&roc->sdata->local->hw, roc->frame); + } + + if (!roc->mgmt_tx_cookie) + cfg80211_remain_on_channel_expired(&roc->sdata->wdev, + roc->cookie, roc->chan, + GFP_KERNEL); + else + cfg80211_tx_mgmt_expired(&roc->sdata->wdev, + roc->mgmt_tx_cookie, + roc->chan, GFP_KERNEL); + + list_del(&roc->list); + kfree(roc); +} + +static unsigned long ieee80211_end_finished_rocs(struct ieee80211_local *local, + unsigned long now) +{ + struct ieee80211_roc_work *roc, *tmp; + long remaining_dur_min = LONG_MAX; + + lockdep_assert_held(&local->mtx); + + list_for_each_entry_safe(roc, tmp, &local->roc_list, list) { + long remaining; + + if (!roc->started) + break; + + remaining = roc->start_time + + msecs_to_jiffies(roc->duration) - + now; + + /* In case of HW ROC, it is possible that the HW finished the + * ROC session before the actual requested time. In such a case + * end the ROC session (disregarding the remaining time). + */ + if (roc->abort || roc->hw_begun || remaining <= 0) + ieee80211_roc_notify_destroy(roc); + else + remaining_dur_min = min(remaining_dur_min, remaining); + } + + return remaining_dur_min; +} + +static bool ieee80211_recalc_sw_work(struct ieee80211_local *local, + unsigned long now) +{ + long dur = ieee80211_end_finished_rocs(local, now); + + if (dur == LONG_MAX) + return false; + + wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, dur); + return true; +} + +static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc, + unsigned long start_time) +{ + if (WARN_ON(roc->notified)) + return; + + roc->start_time = start_time; + roc->started = true; + + if (roc->mgmt_tx_cookie) { + if (!WARN_ON(!roc->frame)) { + ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7, + roc->chan->band); + roc->frame = NULL; + } + } else { + cfg80211_ready_on_channel(&roc->sdata->wdev, roc->cookie, + roc->chan, roc->req_duration, + GFP_KERNEL); + } + + roc->notified = true; +} + +static void ieee80211_hw_roc_start(struct wiphy *wiphy, struct wiphy_work *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, hw_roc_start); + struct ieee80211_roc_work *roc; + + mutex_lock(&local->mtx); + + list_for_each_entry(roc, &local->roc_list, list) { + if (!roc->started) + break; + + roc->hw_begun = true; + ieee80211_handle_roc_started(roc, local->hw_roc_start_time); + } + + mutex_unlock(&local->mtx); +} + +void ieee80211_ready_on_channel(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + local->hw_roc_start_time = jiffies; + + trace_api_ready_on_channel(local); + + wiphy_work_queue(hw->wiphy, &local->hw_roc_start); +} +EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel); + +static void _ieee80211_start_next_roc(struct ieee80211_local *local) +{ + struct ieee80211_roc_work *roc, *tmp; + enum ieee80211_roc_type type; + u32 min_dur, max_dur; + + lockdep_assert_held(&local->mtx); + + if (WARN_ON(list_empty(&local->roc_list))) + return; + + roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work, + list); + + if (WARN_ON(roc->started)) + return; + + min_dur = roc->duration; + max_dur = roc->duration; + type = roc->type; + + list_for_each_entry(tmp, &local->roc_list, list) { + if (tmp == roc) + continue; + if (tmp->sdata != roc->sdata || tmp->chan != roc->chan) + break; + max_dur = max(tmp->duration, max_dur); + min_dur = min(tmp->duration, min_dur); + type = max(tmp->type, type); + } + + if (local->ops->remain_on_channel) { + int ret = drv_remain_on_channel(local, roc->sdata, roc->chan, + max_dur, type); + + if (ret) { + wiphy_warn(local->hw.wiphy, + "failed to start next HW ROC (%d)\n", ret); + /* + * queue the work struct again to avoid recursion + * when multiple failures occur + */ + list_for_each_entry(tmp, &local->roc_list, list) { + if (tmp->sdata != roc->sdata || + tmp->chan != roc->chan) + break; + tmp->started = true; + tmp->abort = true; + } + wiphy_work_queue(local->hw.wiphy, &local->hw_roc_done); + return; + } + + /* we'll notify about the start once the HW calls back */ + list_for_each_entry(tmp, &local->roc_list, list) { + if (tmp->sdata != roc->sdata || tmp->chan != roc->chan) + break; + tmp->started = true; + } + } else { + /* If actually operating on the desired channel (with at least + * 20 MHz channel width) don't stop all the operations but still + * treat it as though the ROC operation started properly, so + * other ROC operations won't interfere with this one. + */ + roc->on_channel = roc->chan == local->_oper_chandef.chan && + local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 && + local->_oper_chandef.width != NL80211_CHAN_WIDTH_10; + + /* start this ROC */ + ieee80211_recalc_idle(local); + + if (!roc->on_channel) { + ieee80211_offchannel_stop_vifs(local); + + local->tmp_channel = roc->chan; + ieee80211_hw_config(local, 0); + } + + wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, + msecs_to_jiffies(min_dur)); + + /* tell userspace or send frame(s) */ + list_for_each_entry(tmp, &local->roc_list, list) { + if (tmp->sdata != roc->sdata || tmp->chan != roc->chan) + break; + + tmp->on_channel = roc->on_channel; + ieee80211_handle_roc_started(tmp, jiffies); + } + } +} + +void ieee80211_start_next_roc(struct ieee80211_local *local) +{ + struct ieee80211_roc_work *roc; + + lockdep_assert_held(&local->mtx); + + if (list_empty(&local->roc_list)) { + ieee80211_run_deferred_scan(local); + return; + } + + /* defer roc if driver is not started (i.e. during reconfig) */ + if (local->in_reconfig) + return; + + roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work, + list); + + if (WARN_ON_ONCE(roc->started)) + return; + + if (local->ops->remain_on_channel) { + _ieee80211_start_next_roc(local); + } else { + /* delay it a bit */ + wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, + round_jiffies_relative(HZ / 2)); + } +} + +static void __ieee80211_roc_work(struct ieee80211_local *local) +{ + struct ieee80211_roc_work *roc; + bool on_channel; + + lockdep_assert_held(&local->mtx); + + if (WARN_ON(local->ops->remain_on_channel)) + return; + + roc = list_first_entry_or_null(&local->roc_list, + struct ieee80211_roc_work, list); + if (!roc) + return; + + if (!roc->started) { + WARN_ON(local->use_chanctx); + _ieee80211_start_next_roc(local); + } else { + on_channel = roc->on_channel; + if (ieee80211_recalc_sw_work(local, jiffies)) + return; + + /* careful - roc pointer became invalid during recalc */ + + if (!on_channel) { + ieee80211_flush_queues(local, NULL, false); + + local->tmp_channel = NULL; + ieee80211_hw_config(local, 0); + + ieee80211_offchannel_return(local); + } + + ieee80211_recalc_idle(local); + ieee80211_start_next_roc(local); + } +} + +static void ieee80211_roc_work(struct wiphy *wiphy, struct wiphy_work *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, roc_work.work); + + mutex_lock(&local->mtx); + __ieee80211_roc_work(local); + mutex_unlock(&local->mtx); +} + +static void ieee80211_hw_roc_done(struct wiphy *wiphy, struct wiphy_work *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, hw_roc_done); + + mutex_lock(&local->mtx); + + ieee80211_end_finished_rocs(local, jiffies); + + /* if there's another roc, start it now */ + ieee80211_start_next_roc(local); + + mutex_unlock(&local->mtx); +} + +void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + trace_api_remain_on_channel_expired(local); + + wiphy_work_queue(hw->wiphy, &local->hw_roc_done); +} +EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired); + +static bool +ieee80211_coalesce_hw_started_roc(struct ieee80211_local *local, + struct ieee80211_roc_work *new_roc, + struct ieee80211_roc_work *cur_roc) +{ + unsigned long now = jiffies; + unsigned long remaining; + + if (WARN_ON(!cur_roc->started)) + return false; + + /* if it was scheduled in the hardware, but not started yet, + * we can only combine if the older one had a longer duration + */ + if (!cur_roc->hw_begun && new_roc->duration > cur_roc->duration) + return false; + + remaining = cur_roc->start_time + + msecs_to_jiffies(cur_roc->duration) - + now; + + /* if it doesn't fit entirely, schedule a new one */ + if (new_roc->duration > jiffies_to_msecs(remaining)) + return false; + + /* add just after the current one so we combine their finish later */ + list_add(&new_roc->list, &cur_roc->list); + + /* if the existing one has already begun then let this one also + * begin, otherwise they'll both be marked properly by the work + * struct that runs once the driver notifies us of the beginning + */ + if (cur_roc->hw_begun) { + new_roc->hw_begun = true; + ieee80211_handle_roc_started(new_roc, now); + } + + return true; +} + +static int ieee80211_start_roc_work(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel *channel, + unsigned int duration, u64 *cookie, + struct sk_buff *txskb, + enum ieee80211_roc_type type) +{ + struct ieee80211_roc_work *roc, *tmp; + bool queued = false, combine_started = true; + int ret; + + lockdep_assert_held(&local->mtx); + + if (channel->freq_offset) + /* this may work, but is untested */ + return -EOPNOTSUPP; + + if (local->use_chanctx && !local->ops->remain_on_channel) + return -EOPNOTSUPP; + + roc = kzalloc(sizeof(*roc), GFP_KERNEL); + if (!roc) + return -ENOMEM; + + /* + * If the duration is zero, then the driver + * wouldn't actually do anything. Set it to + * 10 for now. + * + * TODO: cancel the off-channel operation + * when we get the SKB's TX status and + * the wait time was zero before. + */ + if (!duration) + duration = 10; + + roc->chan = channel; + roc->duration = duration; + roc->req_duration = duration; + roc->frame = txskb; + roc->type = type; + roc->sdata = sdata; + + /* + * cookie is either the roc cookie (for normal roc) + * or the SKB (for mgmt TX) + */ + if (!txskb) { + roc->cookie = ieee80211_mgmt_tx_cookie(local); + *cookie = roc->cookie; + } else { + roc->mgmt_tx_cookie = *cookie; + } + + /* if there's no need to queue, handle it immediately */ + if (list_empty(&local->roc_list) && + !local->scanning && !ieee80211_is_radar_required(local)) { + /* if not HW assist, just queue & schedule work */ + if (!local->ops->remain_on_channel) { + list_add_tail(&roc->list, &local->roc_list); + wiphy_delayed_work_queue(local->hw.wiphy, + &local->roc_work, 0); + } else { + /* otherwise actually kick it off here + * (for error handling) + */ + ret = drv_remain_on_channel(local, sdata, channel, + duration, type); + if (ret) { + kfree(roc); + return ret; + } + roc->started = true; + list_add_tail(&roc->list, &local->roc_list); + } + + return 0; + } + + /* otherwise handle queueing */ + + list_for_each_entry(tmp, &local->roc_list, list) { + if (tmp->chan != channel || tmp->sdata != sdata) + continue; + + /* + * Extend this ROC if possible: If it hasn't started, add + * just after the new one to combine. + */ + if (!tmp->started) { + list_add(&roc->list, &tmp->list); + queued = true; + break; + } + + if (!combine_started) + continue; + + if (!local->ops->remain_on_channel) { + /* If there's no hardware remain-on-channel, and + * doing so won't push us over the maximum r-o-c + * we allow, then we can just add the new one to + * the list and mark it as having started now. + * If it would push over the limit, don't try to + * combine with other started ones (that haven't + * been running as long) but potentially sort it + * with others that had the same fate. + */ + unsigned long now = jiffies; + u32 elapsed = jiffies_to_msecs(now - tmp->start_time); + struct wiphy *wiphy = local->hw.wiphy; + u32 max_roc = wiphy->max_remain_on_channel_duration; + + if (elapsed + roc->duration > max_roc) { + combine_started = false; + continue; + } + + list_add(&roc->list, &tmp->list); + queued = true; + roc->on_channel = tmp->on_channel; + ieee80211_handle_roc_started(roc, now); + ieee80211_recalc_sw_work(local, now); + break; + } + + queued = ieee80211_coalesce_hw_started_roc(local, roc, tmp); + if (queued) + break; + /* if it wasn't queued, perhaps it can be combined with + * another that also couldn't get combined previously, + * but no need to check for already started ones, since + * that can't work. + */ + combine_started = false; + } + + if (!queued) + list_add_tail(&roc->list, &local->roc_list); + + return 0; +} + +int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, + struct ieee80211_channel *chan, + unsigned int duration, u64 *cookie) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + struct ieee80211_local *local = sdata->local; + int ret; + + mutex_lock(&local->mtx); + ret = ieee80211_start_roc_work(local, sdata, chan, + duration, cookie, NULL, + IEEE80211_ROC_TYPE_NORMAL); + mutex_unlock(&local->mtx); + + return ret; +} + +static int ieee80211_cancel_roc(struct ieee80211_local *local, + u64 cookie, bool mgmt_tx) +{ + struct ieee80211_roc_work *roc, *tmp, *found = NULL; + int ret; + + if (!cookie) + return -ENOENT; + + wiphy_work_flush(local->hw.wiphy, &local->hw_roc_start); + + mutex_lock(&local->mtx); + list_for_each_entry_safe(roc, tmp, &local->roc_list, list) { + if (!mgmt_tx && roc->cookie != cookie) + continue; + else if (mgmt_tx && roc->mgmt_tx_cookie != cookie) + continue; + + found = roc; + break; + } + + if (!found) { + mutex_unlock(&local->mtx); + return -ENOENT; + } + + if (!found->started) { + ieee80211_roc_notify_destroy(found); + goto out_unlock; + } + + if (local->ops->remain_on_channel) { + ret = drv_cancel_remain_on_channel(local, roc->sdata); + if (WARN_ON_ONCE(ret)) { + mutex_unlock(&local->mtx); + return ret; + } + + /* TODO: + * if multiple items were combined here then we really shouldn't + * cancel them all - we should wait for as much time as needed + * for the longest remaining one, and only then cancel ... + */ + list_for_each_entry_safe(roc, tmp, &local->roc_list, list) { + if (!roc->started) + break; + if (roc == found) + found = NULL; + ieee80211_roc_notify_destroy(roc); + } + + /* that really must not happen - it was started */ + WARN_ON(found); + + ieee80211_start_next_roc(local); + } else { + /* go through work struct to return to the operating channel */ + found->abort = true; + wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, 0); + } + + out_unlock: + mutex_unlock(&local->mtx); + + return 0; +} + +int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy, + struct wireless_dev *wdev, u64 cookie) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + struct ieee80211_local *local = sdata->local; + + return ieee80211_cancel_roc(local, cookie, false); +} + +int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_mgmt_tx_params *params, u64 *cookie) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct sta_info *sta = NULL; + const struct ieee80211_mgmt *mgmt = (void *)params->buf; + bool need_offchan = false; + bool mlo_sta = false; + int link_id = -1; + u32 flags; + int ret; + u8 *data; + + if (params->dont_wait_for_ack) + flags = IEEE80211_TX_CTL_NO_ACK; + else + flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX | + IEEE80211_TX_CTL_REQ_TX_STATUS; + + if (params->no_cck) + flags |= IEEE80211_TX_CTL_NO_CCK_RATE; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_ADHOC: + if (!sdata->vif.cfg.ibss_joined) + need_offchan = true; +#ifdef CONFIG_MAC80211_MESH + fallthrough; + case NL80211_IFTYPE_MESH_POINT: + if (ieee80211_vif_is_mesh(&sdata->vif) && + !sdata->u.mesh.mesh_id_len) + need_offchan = true; +#endif + fallthrough; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_P2P_GO: + if (sdata->vif.type != NL80211_IFTYPE_ADHOC && + !ieee80211_vif_is_mesh(&sdata->vif) && + !sdata->bss->active) + need_offchan = true; + + rcu_read_lock(); + sta = sta_info_get_bss(sdata, mgmt->da); + mlo_sta = sta && sta->sta.mlo; + + if (!ieee80211_is_action(mgmt->frame_control) || + mgmt->u.action.category == WLAN_CATEGORY_PUBLIC || + mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED || + mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) { + rcu_read_unlock(); + break; + } + + if (!sta) { + rcu_read_unlock(); + return -ENOLINK; + } + if (params->link_id >= 0 && + !(sta->sta.valid_links & BIT(params->link_id))) { + rcu_read_unlock(); + return -ENOLINK; + } + link_id = params->link_id; + rcu_read_unlock(); + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + sdata_lock(sdata); + if (!sdata->u.mgd.associated || + (params->offchan && params->wait && + local->ops->remain_on_channel && + memcmp(sdata->vif.cfg.ap_addr, mgmt->bssid, ETH_ALEN))) + need_offchan = true; + sdata_unlock(sdata); + break; + case NL80211_IFTYPE_P2P_DEVICE: + need_offchan = true; + break; + case NL80211_IFTYPE_NAN: + default: + return -EOPNOTSUPP; + } + + /* configurations requiring offchan cannot work if no channel has been + * specified + */ + if (need_offchan && !params->chan) + return -EINVAL; + + mutex_lock(&local->mtx); + + /* Check if the operating channel is the requested channel */ + if (!params->chan && mlo_sta) { + need_offchan = false; + } else if (!need_offchan) { + struct ieee80211_chanctx_conf *chanctx_conf = NULL; + int i; + + rcu_read_lock(); + /* Check all the links first */ + for (i = 0; i < ARRAY_SIZE(sdata->vif.link_conf); i++) { + struct ieee80211_bss_conf *conf; + + conf = rcu_dereference(sdata->vif.link_conf[i]); + if (!conf) + continue; + + chanctx_conf = rcu_dereference(conf->chanctx_conf); + if (!chanctx_conf) + continue; + + if (mlo_sta && params->chan == chanctx_conf->def.chan && + ether_addr_equal(sdata->vif.addr, mgmt->sa)) { + link_id = i; + break; + } + + if (ether_addr_equal(conf->addr, mgmt->sa)) + break; + + chanctx_conf = NULL; + } + + if (chanctx_conf) { + need_offchan = params->chan && + (params->chan != + chanctx_conf->def.chan); + } else { + need_offchan = true; + } + rcu_read_unlock(); + } + + if (need_offchan && !params->offchan) { + ret = -EBUSY; + goto out_unlock; + } + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + params->len); + if (!skb) { + ret = -ENOMEM; + goto out_unlock; + } + skb_reserve(skb, local->hw.extra_tx_headroom); + + data = skb_put_data(skb, params->buf, params->len); + + /* Update CSA counters */ + if (sdata->vif.bss_conf.csa_active && + (sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_MESH_POINT || + sdata->vif.type == NL80211_IFTYPE_ADHOC) && + params->n_csa_offsets) { + int i; + struct beacon_data *beacon = NULL; + + rcu_read_lock(); + + if (sdata->vif.type == NL80211_IFTYPE_AP) + beacon = rcu_dereference(sdata->deflink.u.ap.beacon); + else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) + beacon = rcu_dereference(sdata->u.ibss.presp); + else if (ieee80211_vif_is_mesh(&sdata->vif)) + beacon = rcu_dereference(sdata->u.mesh.beacon); + + if (beacon) + for (i = 0; i < params->n_csa_offsets; i++) + data[params->csa_offsets[i]] = + beacon->cntdwn_current_counter; + + rcu_read_unlock(); + } + + IEEE80211_SKB_CB(skb)->flags = flags; + + skb->dev = sdata->dev; + + if (!params->dont_wait_for_ack) { + /* make a copy to preserve the frame contents + * in case of encryption. + */ + ret = ieee80211_attach_ack_skb(local, skb, cookie, GFP_KERNEL); + if (ret) { + kfree_skb(skb); + goto out_unlock; + } + } else { + /* Assign a dummy non-zero cookie, it's not sent to + * userspace in this case but we rely on its value + * internally in the need_offchan case to distinguish + * mgmt-tx from remain-on-channel. + */ + *cookie = 0xffffffff; + } + + if (!need_offchan) { + ieee80211_tx_skb_tid(sdata, skb, 7, link_id); + ret = 0; + goto out_unlock; + } + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN | + IEEE80211_TX_INTFL_OFFCHAN_TX_OK; + if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) + IEEE80211_SKB_CB(skb)->hw_queue = + local->hw.offchannel_tx_hw_queue; + + /* This will handle all kinds of coalescing and immediate TX */ + ret = ieee80211_start_roc_work(local, sdata, params->chan, + params->wait, cookie, skb, + IEEE80211_ROC_TYPE_MGMT_TX); + if (ret) + ieee80211_free_txskb(&local->hw, skb); + out_unlock: + mutex_unlock(&local->mtx); + return ret; +} + +int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, + struct wireless_dev *wdev, u64 cookie) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + return ieee80211_cancel_roc(local, cookie, true); +} + +void ieee80211_roc_setup(struct ieee80211_local *local) +{ + wiphy_work_init(&local->hw_roc_start, ieee80211_hw_roc_start); + wiphy_work_init(&local->hw_roc_done, ieee80211_hw_roc_done); + wiphy_delayed_work_init(&local->roc_work, ieee80211_roc_work); + INIT_LIST_HEAD(&local->roc_list); +} + +void ieee80211_roc_purge(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_roc_work *roc, *tmp; + bool work_to_do = false; + + mutex_lock(&local->mtx); + list_for_each_entry_safe(roc, tmp, &local->roc_list, list) { + if (sdata && roc->sdata != sdata) + continue; + + if (roc->started) { + if (local->ops->remain_on_channel) { + /* can race, so ignore return value */ + drv_cancel_remain_on_channel(local, roc->sdata); + ieee80211_roc_notify_destroy(roc); + } else { + roc->abort = true; + work_to_do = true; + } + } else { + ieee80211_roc_notify_destroy(roc); + } + } + if (work_to_do) + __ieee80211_roc_work(local); + mutex_unlock(&local->mtx); +} diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c new file mode 100644 index 0000000000..0ccb5701c7 --- /dev/null +++ b/net/mac80211/pm.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Portions + * Copyright (C) 2020-2021 Intel Corporation + */ +#include <net/mac80211.h> +#include <net/rtnetlink.h> + +#include "ieee80211_i.h" +#include "mesh.h" +#include "driver-ops.h" +#include "led.h" + +static void ieee80211_sched_scan_cancel(struct ieee80211_local *local) +{ + if (ieee80211_request_sched_scan_stop(local)) + return; + cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0); +} + +int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + + if (!local->open_count) + goto suspend; + + local->suspending = true; + mb(); /* make suspending visible before any cancellation */ + + ieee80211_scan_cancel(local); + + ieee80211_dfs_cac_cancel(local); + + ieee80211_roc_purge(local, NULL); + + ieee80211_del_virtual_monitor(local); + + if (ieee80211_hw_check(hw, AMPDU_AGGREGATION) && + !(wowlan && wowlan->any)) { + mutex_lock(&local->sta_mtx); + list_for_each_entry(sta, &local->sta_list, list) { + set_sta_flag(sta, WLAN_STA_BLOCK_BA); + ieee80211_sta_tear_down_BA_sessions( + sta, AGG_STOP_LOCAL_REQUEST); + } + mutex_unlock(&local->sta_mtx); + } + + /* keep sched_scan only in case of 'any' trigger */ + if (!(wowlan && wowlan->any)) + ieee80211_sched_scan_cancel(local); + + ieee80211_stop_queues_by_reason(hw, + IEEE80211_MAX_QUEUE_MAP, + IEEE80211_QUEUE_STOP_REASON_SUSPEND, + false); + + /* flush out all packets */ + synchronize_net(); + + ieee80211_flush_queues(local, NULL, true); + + local->quiescing = true; + /* make quiescing visible to timers everywhere */ + mb(); + + flush_workqueue(local->workqueue); + + /* Don't try to run timers while suspended. */ + del_timer_sync(&local->sta_cleanup); + + /* + * Note that this particular timer doesn't need to be + * restarted at resume. + */ + cancel_work_sync(&local->dynamic_ps_enable_work); + del_timer_sync(&local->dynamic_ps_timer); + + local->wowlan = wowlan; + if (local->wowlan) { + int err; + + /* Drivers don't expect to suspend while some operations like + * authenticating or associating are in progress. It doesn't + * make sense anyway to accept that, since the authentication + * or association would never finish since the driver can't do + * that on its own. + * Thus, clean up in-progress auth/assoc first. + */ + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + if (sdata->vif.type != NL80211_IFTYPE_STATION) + continue; + ieee80211_mgd_quiesce(sdata); + /* If suspended during TX in progress, and wowlan + * is enabled (connection will be active) there + * can be a race where the driver is put out + * of power-save due to TX and during suspend + * dynamic_ps_timer is cancelled and TX packet + * is flushed, leaving the driver in ACTIVE even + * after resuming until dynamic_ps_timer puts + * driver back in DOZE. + */ + if (sdata->u.mgd.associated && + sdata->u.mgd.powersave && + !(local->hw.conf.flags & IEEE80211_CONF_PS)) { + local->hw.conf.flags |= IEEE80211_CONF_PS; + ieee80211_hw_config(local, + IEEE80211_CONF_CHANGE_PS); + } + } + + err = drv_suspend(local, wowlan); + if (err < 0) { + local->quiescing = false; + local->wowlan = false; + if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) { + mutex_lock(&local->sta_mtx); + list_for_each_entry(sta, + &local->sta_list, list) { + clear_sta_flag(sta, WLAN_STA_BLOCK_BA); + } + mutex_unlock(&local->sta_mtx); + } + ieee80211_wake_queues_by_reason(hw, + IEEE80211_MAX_QUEUE_MAP, + IEEE80211_QUEUE_STOP_REASON_SUSPEND, + false); + return err; + } else if (err > 0) { + WARN_ON(err != 1); + /* cfg80211 will call back into mac80211 to disconnect + * all interfaces, allow that to proceed properly + */ + ieee80211_wake_queues_by_reason(hw, + IEEE80211_MAX_QUEUE_MAP, + IEEE80211_QUEUE_STOP_REASON_SUSPEND, + false); + return err; + } else { + goto suspend; + } + } + + /* remove all interfaces that were created in the driver */ + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_MONITOR: + continue; + case NL80211_IFTYPE_STATION: + ieee80211_mgd_quiesce(sdata); + break; + default: + break; + } + + flush_delayed_work(&sdata->dec_tailroom_needed_wk); + drv_remove_interface(local, sdata); + } + + /* + * We disconnected on all interfaces before suspend, all channel + * contexts should be released. + */ + WARN_ON(!list_empty(&local->chanctx_list)); + + /* stop hardware - this must stop RX */ + ieee80211_stop_device(local); + + suspend: + local->suspended = true; + /* need suspended to be visible before quiescing is false */ + barrier(); + local->quiescing = false; + local->suspending = false; + + return 0; +} + +/* + * __ieee80211_resume() is a static inline which just calls + * ieee80211_reconfig(), which is also needed for hardware + * hang/firmware failure/etc. recovery. + */ + +void ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif, + struct cfg80211_wowlan_wakeup *wakeup, + gfp_t gfp) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + cfg80211_report_wowlan_wakeup(&sdata->wdev, wakeup, gfp); +} +EXPORT_SYMBOL(ieee80211_report_wowlan_wakeup); diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c new file mode 100644 index 0000000000..d5ea5f5bcf --- /dev/null +++ b/net/mac80211/rate.c @@ -0,0 +1,1018 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> + * Copyright 2017 Intel Deutschland GmbH + * Copyright (C) 2022 Intel Corporation + */ + +#include <linux/kernel.h> +#include <linux/rtnetlink.h> +#include <linux/module.h> +#include <linux/slab.h> +#include "rate.h" +#include "ieee80211_i.h" +#include "debugfs.h" + +struct rate_control_alg { + struct list_head list; + const struct rate_control_ops *ops; +}; + +static LIST_HEAD(rate_ctrl_algs); +static DEFINE_MUTEX(rate_ctrl_mutex); + +static char *ieee80211_default_rc_algo = CONFIG_MAC80211_RC_DEFAULT; +module_param(ieee80211_default_rc_algo, charp, 0644); +MODULE_PARM_DESC(ieee80211_default_rc_algo, + "Default rate control algorithm for mac80211 to use"); + +void rate_control_rate_init(struct sta_info *sta) +{ + struct ieee80211_local *local = sta->sdata->local; + struct rate_control_ref *ref = sta->rate_ctrl; + struct ieee80211_sta *ista = &sta->sta; + void *priv_sta = sta->rate_ctrl_priv; + struct ieee80211_supported_band *sband; + struct ieee80211_chanctx_conf *chanctx_conf; + + ieee80211_sta_set_rx_nss(&sta->deflink); + + if (!ref) + return; + + rcu_read_lock(); + + chanctx_conf = rcu_dereference(sta->sdata->vif.bss_conf.chanctx_conf); + if (WARN_ON(!chanctx_conf)) { + rcu_read_unlock(); + return; + } + + sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band]; + + /* TODO: check for minstrel_s1g ? */ + if (sband->band == NL80211_BAND_S1GHZ) { + ieee80211_s1g_sta_rate_init(sta); + rcu_read_unlock(); + return; + } + + spin_lock_bh(&sta->rate_ctrl_lock); + ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista, + priv_sta); + spin_unlock_bh(&sta->rate_ctrl_lock); + rcu_read_unlock(); + set_sta_flag(sta, WLAN_STA_RATE_CONTROL); +} + +void rate_control_tx_status(struct ieee80211_local *local, + struct ieee80211_tx_status *st) +{ + struct rate_control_ref *ref = local->rate_ctrl; + struct sta_info *sta = container_of(st->sta, struct sta_info, sta); + void *priv_sta = sta->rate_ctrl_priv; + struct ieee80211_supported_band *sband; + + if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) + return; + + sband = local->hw.wiphy->bands[st->info->band]; + + spin_lock_bh(&sta->rate_ctrl_lock); + if (ref->ops->tx_status_ext) + ref->ops->tx_status_ext(ref->priv, sband, priv_sta, st); + else if (st->skb) + ref->ops->tx_status(ref->priv, sband, st->sta, priv_sta, st->skb); + else + WARN_ON_ONCE(1); + + spin_unlock_bh(&sta->rate_ctrl_lock); +} + +void rate_control_rate_update(struct ieee80211_local *local, + struct ieee80211_supported_band *sband, + struct sta_info *sta, unsigned int link_id, + u32 changed) +{ + struct rate_control_ref *ref = local->rate_ctrl; + struct ieee80211_sta *ista = &sta->sta; + void *priv_sta = sta->rate_ctrl_priv; + struct ieee80211_chanctx_conf *chanctx_conf; + + WARN_ON(link_id != 0); + + if (ref && ref->ops->rate_update) { + rcu_read_lock(); + + chanctx_conf = rcu_dereference(sta->sdata->vif.bss_conf.chanctx_conf); + if (WARN_ON(!chanctx_conf)) { + rcu_read_unlock(); + return; + } + + spin_lock_bh(&sta->rate_ctrl_lock); + ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def, + ista, priv_sta, changed); + spin_unlock_bh(&sta->rate_ctrl_lock); + rcu_read_unlock(); + } + + drv_sta_rc_update(local, sta->sdata, &sta->sta, changed); +} + +int ieee80211_rate_control_register(const struct rate_control_ops *ops) +{ + struct rate_control_alg *alg; + + if (!ops->name) + return -EINVAL; + + mutex_lock(&rate_ctrl_mutex); + list_for_each_entry(alg, &rate_ctrl_algs, list) { + if (!strcmp(alg->ops->name, ops->name)) { + /* don't register an algorithm twice */ + WARN_ON(1); + mutex_unlock(&rate_ctrl_mutex); + return -EALREADY; + } + } + + alg = kzalloc(sizeof(*alg), GFP_KERNEL); + if (alg == NULL) { + mutex_unlock(&rate_ctrl_mutex); + return -ENOMEM; + } + alg->ops = ops; + + list_add_tail(&alg->list, &rate_ctrl_algs); + mutex_unlock(&rate_ctrl_mutex); + + return 0; +} +EXPORT_SYMBOL(ieee80211_rate_control_register); + +void ieee80211_rate_control_unregister(const struct rate_control_ops *ops) +{ + struct rate_control_alg *alg; + + mutex_lock(&rate_ctrl_mutex); + list_for_each_entry(alg, &rate_ctrl_algs, list) { + if (alg->ops == ops) { + list_del(&alg->list); + kfree(alg); + break; + } + } + mutex_unlock(&rate_ctrl_mutex); +} +EXPORT_SYMBOL(ieee80211_rate_control_unregister); + +static const struct rate_control_ops * +ieee80211_try_rate_control_ops_get(const char *name) +{ + struct rate_control_alg *alg; + const struct rate_control_ops *ops = NULL; + + if (!name) + return NULL; + + mutex_lock(&rate_ctrl_mutex); + list_for_each_entry(alg, &rate_ctrl_algs, list) { + if (!strcmp(alg->ops->name, name)) { + ops = alg->ops; + break; + } + } + mutex_unlock(&rate_ctrl_mutex); + return ops; +} + +/* Get the rate control algorithm. */ +static const struct rate_control_ops * +ieee80211_rate_control_ops_get(const char *name) +{ + const struct rate_control_ops *ops; + const char *alg_name; + + kernel_param_lock(THIS_MODULE); + if (!name) + alg_name = ieee80211_default_rc_algo; + else + alg_name = name; + + ops = ieee80211_try_rate_control_ops_get(alg_name); + if (!ops && name) + /* try default if specific alg requested but not found */ + ops = ieee80211_try_rate_control_ops_get(ieee80211_default_rc_algo); + + /* Note: check for > 0 is intentional to avoid clang warning */ + if (!ops && (strlen(CONFIG_MAC80211_RC_DEFAULT) > 0)) + /* try built-in one if specific alg requested but not found */ + ops = ieee80211_try_rate_control_ops_get(CONFIG_MAC80211_RC_DEFAULT); + + kernel_param_unlock(THIS_MODULE); + + return ops; +} + +#ifdef CONFIG_MAC80211_DEBUGFS +static ssize_t rcname_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct rate_control_ref *ref = file->private_data; + int len = strlen(ref->ops->name); + + return simple_read_from_buffer(userbuf, count, ppos, + ref->ops->name, len); +} + +const struct file_operations rcname_ops = { + .read = rcname_read, + .open = simple_open, + .llseek = default_llseek, +}; +#endif + +static struct rate_control_ref * +rate_control_alloc(const char *name, struct ieee80211_local *local) +{ + struct rate_control_ref *ref; + + ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL); + if (!ref) + return NULL; + ref->ops = ieee80211_rate_control_ops_get(name); + if (!ref->ops) + goto free; + + ref->priv = ref->ops->alloc(&local->hw); + if (!ref->priv) + goto free; + return ref; + +free: + kfree(ref); + return NULL; +} + +static void rate_control_free(struct ieee80211_local *local, + struct rate_control_ref *ctrl_ref) +{ + ctrl_ref->ops->free(ctrl_ref->priv); + +#ifdef CONFIG_MAC80211_DEBUGFS + debugfs_remove_recursive(local->debugfs.rcdir); + local->debugfs.rcdir = NULL; +#endif + + kfree(ctrl_ref); +} + +void ieee80211_check_rate_mask(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + u32 user_mask, basic_rates = link->conf->basic_rates; + enum nl80211_band band; + + if (WARN_ON(!link->conf->chandef.chan)) + return; + + band = link->conf->chandef.chan->band; + if (band == NL80211_BAND_S1GHZ) { + /* TODO */ + return; + } + + if (WARN_ON_ONCE(!basic_rates)) + return; + + user_mask = sdata->rc_rateidx_mask[band]; + sband = local->hw.wiphy->bands[band]; + + if (user_mask & basic_rates) + return; + + sdata_dbg(sdata, + "no overlap between basic rates (0x%x) and user mask (0x%x on band %d) - clearing the latter", + basic_rates, user_mask, band); + sdata->rc_rateidx_mask[band] = (1 << sband->n_bitrates) - 1; +} + +static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc) +{ + struct sk_buff *skb = txrc->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + return (info->flags & (IEEE80211_TX_CTL_NO_ACK | + IEEE80211_TX_CTL_USE_MINRATE)) || + !ieee80211_is_tx_data(skb); +} + +static void rc_send_low_basicrate(struct ieee80211_tx_rate *rate, + u32 basic_rates, + struct ieee80211_supported_band *sband) +{ + u8 i; + + if (sband->band == NL80211_BAND_S1GHZ) { + /* TODO */ + rate->flags |= IEEE80211_TX_RC_S1G_MCS; + rate->idx = 0; + return; + } + + if (basic_rates == 0) + return; /* assume basic rates unknown and accept rate */ + if (rate->idx < 0) + return; + if (basic_rates & (1 << rate->idx)) + return; /* selected rate is a basic rate */ + + for (i = rate->idx + 1; i <= sband->n_bitrates; i++) { + if (basic_rates & (1 << i)) { + rate->idx = i; + return; + } + } + + /* could not find a basic rate; use original selection */ +} + +static void __rate_control_send_low(struct ieee80211_hw *hw, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, + struct ieee80211_tx_info *info, + u32 rate_mask) +{ + int i; + u32 rate_flags = + ieee80211_chandef_rate_flags(&hw->conf.chandef); + + if (sband->band == NL80211_BAND_S1GHZ) { + info->control.rates[0].flags |= IEEE80211_TX_RC_S1G_MCS; + info->control.rates[0].idx = 0; + return; + } + + if ((sband->band == NL80211_BAND_2GHZ) && + (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) + rate_flags |= IEEE80211_RATE_ERP_G; + + info->control.rates[0].idx = 0; + for (i = 0; i < sband->n_bitrates; i++) { + if (!(rate_mask & BIT(i))) + continue; + + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + + if (!rate_supported(sta, sband->band, i)) + continue; + + info->control.rates[0].idx = i; + break; + } + WARN_ONCE(i == sband->n_bitrates, + "no supported rates for sta %pM (0x%x, band %d) in rate_mask 0x%x with flags 0x%x\n", + sta ? sta->addr : NULL, + sta ? sta->deflink.supp_rates[sband->band] : -1, + sband->band, + rate_mask, rate_flags); + + info->control.rates[0].count = + (info->flags & IEEE80211_TX_CTL_NO_ACK) ? + 1 : hw->max_rate_tries; + + info->control.skip_table = 1; +} + + +static bool rate_control_send_low(struct ieee80211_sta *pubsta, + struct ieee80211_tx_rate_control *txrc) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); + struct ieee80211_supported_band *sband = txrc->sband; + struct sta_info *sta; + int mcast_rate; + bool use_basicrate = false; + + if (!pubsta || rc_no_data_or_no_ack_use_min(txrc)) { + __rate_control_send_low(txrc->hw, sband, pubsta, info, + txrc->rate_idx_mask); + + if (!pubsta && txrc->bss) { + mcast_rate = txrc->bss_conf->mcast_rate[sband->band]; + if (mcast_rate > 0) { + info->control.rates[0].idx = mcast_rate - 1; + return true; + } + use_basicrate = true; + } else if (pubsta) { + sta = container_of(pubsta, struct sta_info, sta); + if (ieee80211_vif_is_mesh(&sta->sdata->vif)) + use_basicrate = true; + } + + if (use_basicrate) + rc_send_low_basicrate(&info->control.rates[0], + txrc->bss_conf->basic_rates, + sband); + + return true; + } + return false; +} + +static bool rate_idx_match_legacy_mask(s8 *rate_idx, int n_bitrates, u32 mask) +{ + int j; + + /* See whether the selected rate or anything below it is allowed. */ + for (j = *rate_idx; j >= 0; j--) { + if (mask & (1 << j)) { + /* Okay, found a suitable rate. Use it. */ + *rate_idx = j; + return true; + } + } + + /* Try to find a higher rate that would be allowed */ + for (j = *rate_idx + 1; j < n_bitrates; j++) { + if (mask & (1 << j)) { + /* Okay, found a suitable rate. Use it. */ + *rate_idx = j; + return true; + } + } + return false; +} + +static bool rate_idx_match_mcs_mask(s8 *rate_idx, u8 *mcs_mask) +{ + int i, j; + int ridx, rbit; + + ridx = *rate_idx / 8; + rbit = *rate_idx % 8; + + /* sanity check */ + if (ridx < 0 || ridx >= IEEE80211_HT_MCS_MASK_LEN) + return false; + + /* See whether the selected rate or anything below it is allowed. */ + for (i = ridx; i >= 0; i--) { + for (j = rbit; j >= 0; j--) + if (mcs_mask[i] & BIT(j)) { + *rate_idx = i * 8 + j; + return true; + } + rbit = 7; + } + + /* Try to find a higher rate that would be allowed */ + ridx = (*rate_idx + 1) / 8; + rbit = (*rate_idx + 1) % 8; + + for (i = ridx; i < IEEE80211_HT_MCS_MASK_LEN; i++) { + for (j = rbit; j < 8; j++) + if (mcs_mask[i] & BIT(j)) { + *rate_idx = i * 8 + j; + return true; + } + rbit = 0; + } + return false; +} + +static bool rate_idx_match_vht_mcs_mask(s8 *rate_idx, u16 *vht_mask) +{ + int i, j; + int ridx, rbit; + + ridx = *rate_idx >> 4; + rbit = *rate_idx & 0xf; + + if (ridx < 0 || ridx >= NL80211_VHT_NSS_MAX) + return false; + + /* See whether the selected rate or anything below it is allowed. */ + for (i = ridx; i >= 0; i--) { + for (j = rbit; j >= 0; j--) { + if (vht_mask[i] & BIT(j)) { + *rate_idx = (i << 4) | j; + return true; + } + } + rbit = 15; + } + + /* Try to find a higher rate that would be allowed */ + ridx = (*rate_idx + 1) >> 4; + rbit = (*rate_idx + 1) & 0xf; + + for (i = ridx; i < NL80211_VHT_NSS_MAX; i++) { + for (j = rbit; j < 16; j++) { + if (vht_mask[i] & BIT(j)) { + *rate_idx = (i << 4) | j; + return true; + } + } + rbit = 0; + } + return false; +} + +static void rate_idx_match_mask(s8 *rate_idx, u16 *rate_flags, + struct ieee80211_supported_band *sband, + enum nl80211_chan_width chan_width, + u32 mask, + u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN], + u16 vht_mask[NL80211_VHT_NSS_MAX]) +{ + if (*rate_flags & IEEE80211_TX_RC_VHT_MCS) { + /* handle VHT rates */ + if (rate_idx_match_vht_mcs_mask(rate_idx, vht_mask)) + return; + + *rate_idx = 0; + /* keep protection flags */ + *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS | + IEEE80211_TX_RC_USE_CTS_PROTECT | + IEEE80211_TX_RC_USE_SHORT_PREAMBLE); + + *rate_flags |= IEEE80211_TX_RC_MCS; + if (chan_width == NL80211_CHAN_WIDTH_40) + *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + + if (rate_idx_match_mcs_mask(rate_idx, mcs_mask)) + return; + + /* also try the legacy rates. */ + *rate_flags &= ~(IEEE80211_TX_RC_MCS | + IEEE80211_TX_RC_40_MHZ_WIDTH); + if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates, + mask)) + return; + } else if (*rate_flags & IEEE80211_TX_RC_MCS) { + /* handle HT rates */ + if (rate_idx_match_mcs_mask(rate_idx, mcs_mask)) + return; + + /* also try the legacy rates. */ + *rate_idx = 0; + /* keep protection flags */ + *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS | + IEEE80211_TX_RC_USE_CTS_PROTECT | + IEEE80211_TX_RC_USE_SHORT_PREAMBLE); + if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates, + mask)) + return; + } else { + /* handle legacy rates */ + if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates, + mask)) + return; + + /* if HT BSS, and we handle a data frame, also try HT rates */ + switch (chan_width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: + return; + default: + break; + } + + *rate_idx = 0; + /* keep protection flags */ + *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS | + IEEE80211_TX_RC_USE_CTS_PROTECT | + IEEE80211_TX_RC_USE_SHORT_PREAMBLE); + + *rate_flags |= IEEE80211_TX_RC_MCS; + + if (chan_width == NL80211_CHAN_WIDTH_40) + *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + + if (rate_idx_match_mcs_mask(rate_idx, mcs_mask)) + return; + } + + /* + * Uh.. No suitable rate exists. This should not really happen with + * sane TX rate mask configurations. However, should someone manage to + * configure supported rates and TX rate mask in incompatible way, + * allow the frame to be transmitted with whatever the rate control + * selected. + */ +} + +static void rate_fixup_ratelist(struct ieee80211_vif *vif, + struct ieee80211_supported_band *sband, + struct ieee80211_tx_info *info, + struct ieee80211_tx_rate *rates, + int max_rates) +{ + struct ieee80211_rate *rate; + bool inval = false; + int i; + + /* + * Set up the RTS/CTS rate as the fastest basic rate + * that is not faster than the data rate unless there + * is no basic rate slower than the data rate, in which + * case we pick the slowest basic rate + * + * XXX: Should this check all retry rates? + */ + if (!(rates[0].flags & + (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))) { + u32 basic_rates = vif->bss_conf.basic_rates; + s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0; + + rate = &sband->bitrates[rates[0].idx]; + + for (i = 0; i < sband->n_bitrates; i++) { + /* must be a basic rate */ + if (!(basic_rates & BIT(i))) + continue; + /* must not be faster than the data rate */ + if (sband->bitrates[i].bitrate > rate->bitrate) + continue; + /* maximum */ + if (sband->bitrates[baserate].bitrate < + sband->bitrates[i].bitrate) + baserate = i; + } + + info->control.rts_cts_rate_idx = baserate; + } + + for (i = 0; i < max_rates; i++) { + /* + * make sure there's no valid rate following + * an invalid one, just in case drivers don't + * take the API seriously to stop at -1. + */ + if (inval) { + rates[i].idx = -1; + continue; + } + if (rates[i].idx < 0) { + inval = true; + continue; + } + + /* + * For now assume MCS is already set up correctly, this + * needs to be fixed. + */ + if (rates[i].flags & IEEE80211_TX_RC_MCS) { + WARN_ON(rates[i].idx > 76); + + if (!(rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) && + info->control.use_cts_prot) + rates[i].flags |= + IEEE80211_TX_RC_USE_CTS_PROTECT; + continue; + } + + if (rates[i].flags & IEEE80211_TX_RC_VHT_MCS) { + WARN_ON(ieee80211_rate_get_vht_mcs(&rates[i]) > 9); + continue; + } + + /* set up RTS protection if desired */ + if (info->control.use_rts) { + rates[i].flags |= IEEE80211_TX_RC_USE_RTS_CTS; + info->control.use_cts_prot = false; + } + + /* RC is busted */ + if (WARN_ON_ONCE(rates[i].idx >= sband->n_bitrates)) { + rates[i].idx = -1; + continue; + } + + rate = &sband->bitrates[rates[i].idx]; + + /* set up short preamble */ + if (info->control.short_preamble && + rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) + rates[i].flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE; + + /* set up G protection */ + if (!(rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) && + info->control.use_cts_prot && + rate->flags & IEEE80211_RATE_ERP_G) + rates[i].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT; + } +} + + +static void rate_control_fill_sta_table(struct ieee80211_sta *sta, + struct ieee80211_tx_info *info, + struct ieee80211_tx_rate *rates, + int max_rates) +{ + struct ieee80211_sta_rates *ratetbl = NULL; + int i; + + if (sta && !info->control.skip_table) + ratetbl = rcu_dereference(sta->rates); + + /* Fill remaining rate slots with data from the sta rate table. */ + max_rates = min_t(int, max_rates, IEEE80211_TX_RATE_TABLE_SIZE); + for (i = 0; i < max_rates; i++) { + if (i < ARRAY_SIZE(info->control.rates) && + info->control.rates[i].idx >= 0 && + info->control.rates[i].count) { + if (rates != info->control.rates) + rates[i] = info->control.rates[i]; + } else if (ratetbl) { + rates[i].idx = ratetbl->rate[i].idx; + rates[i].flags = ratetbl->rate[i].flags; + if (info->control.use_rts) + rates[i].count = ratetbl->rate[i].count_rts; + else if (info->control.use_cts_prot) + rates[i].count = ratetbl->rate[i].count_cts; + else + rates[i].count = ratetbl->rate[i].count; + } else { + rates[i].idx = -1; + rates[i].count = 0; + } + + if (rates[i].idx < 0 || !rates[i].count) + break; + } +} + +static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, u32 *mask, + u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN], + u16 vht_mask[NL80211_VHT_NSS_MAX]) +{ + u32 i, flags; + + *mask = sdata->rc_rateidx_mask[sband->band]; + flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); + for (i = 0; i < sband->n_bitrates; i++) { + if ((flags & sband->bitrates[i].flags) != flags) + *mask &= ~BIT(i); + } + + if (*mask == (1 << sband->n_bitrates) - 1 && + !sdata->rc_has_mcs_mask[sband->band] && + !sdata->rc_has_vht_mcs_mask[sband->band]) + return false; + + if (sdata->rc_has_mcs_mask[sband->band]) + memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[sband->band], + IEEE80211_HT_MCS_MASK_LEN); + else + memset(mcs_mask, 0xff, IEEE80211_HT_MCS_MASK_LEN); + + if (sdata->rc_has_vht_mcs_mask[sband->band]) + memcpy(vht_mask, sdata->rc_rateidx_vht_mcs_mask[sband->band], + sizeof(u16) * NL80211_VHT_NSS_MAX); + else + memset(vht_mask, 0xff, sizeof(u16) * NL80211_VHT_NSS_MAX); + + if (sta) { + __le16 sta_vht_cap; + u16 sta_vht_mask[NL80211_VHT_NSS_MAX]; + + /* Filter out rates that the STA does not support */ + *mask &= sta->deflink.supp_rates[sband->band]; + for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) + mcs_mask[i] &= sta->deflink.ht_cap.mcs.rx_mask[i]; + + sta_vht_cap = sta->deflink.vht_cap.vht_mcs.rx_mcs_map; + ieee80211_get_vht_mask_from_cap(sta_vht_cap, sta_vht_mask); + for (i = 0; i < NL80211_VHT_NSS_MAX; i++) + vht_mask[i] &= sta_vht_mask[i]; + } + + return true; +} + +static void +rate_control_apply_mask_ratetbl(struct sta_info *sta, + struct ieee80211_supported_band *sband, + struct ieee80211_sta_rates *rates) +{ + int i; + u32 mask; + u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN]; + u16 vht_mask[NL80211_VHT_NSS_MAX]; + enum nl80211_chan_width chan_width; + + if (!rate_control_cap_mask(sta->sdata, sband, &sta->sta, &mask, + mcs_mask, vht_mask)) + return; + + chan_width = sta->sdata->vif.bss_conf.chandef.width; + for (i = 0; i < IEEE80211_TX_RATE_TABLE_SIZE; i++) { + if (rates->rate[i].idx < 0) + break; + + rate_idx_match_mask(&rates->rate[i].idx, &rates->rate[i].flags, + sband, chan_width, mask, mcs_mask, + vht_mask); + } +} + +static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + struct ieee80211_supported_band *sband, + struct ieee80211_tx_rate *rates, + int max_rates) +{ + enum nl80211_chan_width chan_width; + u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN]; + u32 mask; + u16 rate_flags, vht_mask[NL80211_VHT_NSS_MAX]; + int i; + + /* + * Try to enforce the rateidx mask the user wanted. skip this if the + * default mask (allow all rates) is used to save some processing for + * the common case. + */ + if (!rate_control_cap_mask(sdata, sband, sta, &mask, mcs_mask, + vht_mask)) + return; + + /* + * Make sure the rate index selected for each TX rate is + * included in the configured mask and change the rate indexes + * if needed. + */ + chan_width = sdata->vif.bss_conf.chandef.width; + for (i = 0; i < max_rates; i++) { + /* Skip invalid rates */ + if (rates[i].idx < 0) + break; + + rate_flags = rates[i].flags; + rate_idx_match_mask(&rates[i].idx, &rate_flags, sband, + chan_width, mask, mcs_mask, vht_mask); + rates[i].flags = rate_flags; + } +} + +void ieee80211_get_tx_rates(struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct sk_buff *skb, + struct ieee80211_tx_rate *dest, + int max_rates) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_supported_band *sband; + + rate_control_fill_sta_table(sta, info, dest, max_rates); + + if (!vif) + return; + + sdata = vif_to_sdata(vif); + sband = sdata->local->hw.wiphy->bands[info->band]; + + if (ieee80211_is_tx_data(skb)) + rate_control_apply_mask(sdata, sta, sband, dest, max_rates); + + if (dest[0].idx < 0) + __rate_control_send_low(&sdata->local->hw, sband, sta, info, + sdata->rc_rateidx_mask[info->band]); + + if (sta) + rate_fixup_ratelist(vif, sband, info, dest, max_rates); +} +EXPORT_SYMBOL(ieee80211_get_tx_rates); + +void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_tx_rate_control *txrc) +{ + struct rate_control_ref *ref = sdata->local->rate_ctrl; + void *priv_sta = NULL; + struct ieee80211_sta *ista = NULL; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); + int i; + + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + info->control.rates[i].idx = -1; + info->control.rates[i].flags = 0; + info->control.rates[i].count = 0; + } + + if (rate_control_send_low(sta ? &sta->sta : NULL, txrc)) + return; + + if (ieee80211_hw_check(&sdata->local->hw, HAS_RATE_CONTROL)) + return; + + if (sta && test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) { + ista = &sta->sta; + priv_sta = sta->rate_ctrl_priv; + } + + if (ista) { + spin_lock_bh(&sta->rate_ctrl_lock); + ref->ops->get_rate(ref->priv, ista, priv_sta, txrc); + spin_unlock_bh(&sta->rate_ctrl_lock); + } else { + rate_control_send_low(NULL, txrc); + } + + if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_RC_TABLE)) + return; + + ieee80211_get_tx_rates(&sdata->vif, ista, txrc->skb, + info->control.rates, + ARRAY_SIZE(info->control.rates)); +} + +int rate_control_set_rates(struct ieee80211_hw *hw, + struct ieee80211_sta *pubsta, + struct ieee80211_sta_rates *rates) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_sta_rates *old; + struct ieee80211_supported_band *sband; + + sband = ieee80211_get_sband(sta->sdata); + if (!sband) + return -EINVAL; + rate_control_apply_mask_ratetbl(sta, sband, rates); + /* + * mac80211 guarantees that this function will not be called + * concurrently, so the following RCU access is safe, even without + * extra locking. This can not be checked easily, so we just set + * the condition to true. + */ + old = rcu_dereference_protected(pubsta->rates, true); + rcu_assign_pointer(pubsta->rates, rates); + if (old) + kfree_rcu(old, rcu_head); + + if (sta->uploaded) + drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); + + ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta)); + + return 0; +} +EXPORT_SYMBOL(rate_control_set_rates); + +int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, + const char *name) +{ + struct rate_control_ref *ref; + + ASSERT_RTNL(); + + if (local->open_count) + return -EBUSY; + + if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) { + if (WARN_ON(!local->ops->set_rts_threshold)) + return -EINVAL; + return 0; + } + + ref = rate_control_alloc(name, local); + if (!ref) { + wiphy_warn(local->hw.wiphy, + "Failed to select rate control algorithm\n"); + return -ENOENT; + } + + WARN_ON(local->rate_ctrl); + local->rate_ctrl = ref; + + wiphy_debug(local->hw.wiphy, "Selected rate control algorithm '%s'\n", + ref->ops->name); + + return 0; +} + +void rate_control_deinitialize(struct ieee80211_local *local) +{ + struct rate_control_ref *ref; + + ref = local->rate_ctrl; + + if (!ref) + return; + + local->rate_ctrl = NULL; + rate_control_free(local, ref); +} diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h new file mode 100644 index 0000000000..d6190f10fe --- /dev/null +++ b/net/mac80211/rate.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> + * Copyright (C) 2022 Intel Corporation + */ + +#ifndef IEEE80211_RATE_H +#define IEEE80211_RATE_H + +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/types.h> +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "sta_info.h" +#include "driver-ops.h" + +struct rate_control_ref { + const struct rate_control_ops *ops; + void *priv; +}; + +void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_tx_rate_control *txrc); + +void rate_control_tx_status(struct ieee80211_local *local, + struct ieee80211_tx_status *st); + +void rate_control_rate_init(struct sta_info *sta); +void rate_control_rate_update(struct ieee80211_local *local, + struct ieee80211_supported_band *sband, + struct sta_info *sta, + unsigned int link_id, + u32 changed); + +static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, + struct sta_info *sta, gfp_t gfp) +{ + spin_lock_init(&sta->rate_ctrl_lock); + return ref->ops->alloc_sta(ref->priv, &sta->sta, gfp); +} + +static inline void rate_control_free_sta(struct sta_info *sta) +{ + struct rate_control_ref *ref = sta->rate_ctrl; + struct ieee80211_sta *ista = &sta->sta; + void *priv_sta = sta->rate_ctrl_priv; + + ref->ops->free_sta(ref->priv, ista, priv_sta); +} + +static inline void rate_control_add_sta_debugfs(struct sta_info *sta) +{ +#ifdef CONFIG_MAC80211_DEBUGFS + struct rate_control_ref *ref = sta->rate_ctrl; + if (ref && sta->debugfs_dir && ref->ops->add_sta_debugfs) + ref->ops->add_sta_debugfs(ref->priv, sta->rate_ctrl_priv, + sta->debugfs_dir); +#endif +} + +extern const struct file_operations rcname_ops; + +static inline void rate_control_add_debugfs(struct ieee80211_local *local) +{ +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *debugfsdir; + + if (!local->rate_ctrl) + return; + + if (!local->rate_ctrl->ops->add_debugfs) + return; + + debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir); + local->debugfs.rcdir = debugfsdir; + debugfs_create_file("name", 0400, debugfsdir, + local->rate_ctrl, &rcname_ops); + + local->rate_ctrl->ops->add_debugfs(&local->hw, local->rate_ctrl->priv, + debugfsdir); +#endif +} + +void ieee80211_check_rate_mask(struct ieee80211_link_data *link); + +/* Get a reference to the rate control algorithm. If `name' is NULL, get the + * first available algorithm. */ +int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, + const char *name); +void rate_control_deinitialize(struct ieee80211_local *local); + + +/* Rate control algorithms */ +#ifdef CONFIG_MAC80211_RC_MINSTREL +int rc80211_minstrel_init(void); +void rc80211_minstrel_exit(void); +#else +static inline int rc80211_minstrel_init(void) +{ + return 0; +} +static inline void rc80211_minstrel_exit(void) +{ +} +#endif + + +#endif /* IEEE80211_RATE_H */ diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c new file mode 100644 index 0000000000..b34c805220 --- /dev/null +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -0,0 +1,2052 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org> + * Copyright (C) 2019-2022 Intel Corporation + */ +#include <linux/netdevice.h> +#include <linux/types.h> +#include <linux/skbuff.h> +#include <linux/debugfs.h> +#include <linux/random.h> +#include <linux/moduleparam.h> +#include <linux/ieee80211.h> +#include <linux/minmax.h> +#include <net/mac80211.h> +#include "rate.h" +#include "sta_info.h" +#include "rc80211_minstrel_ht.h" + +#define AVG_AMPDU_SIZE 16 +#define AVG_PKT_SIZE 1200 + +/* Number of bits for an average sized packet */ +#define MCS_NBITS ((AVG_PKT_SIZE * AVG_AMPDU_SIZE) << 3) + +/* Number of symbols for a packet with (bps) bits per symbol */ +#define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps)) + +/* Transmission time (nanoseconds) for a packet containing (syms) symbols */ +#define MCS_SYMBOL_TIME(sgi, syms) \ + (sgi ? \ + ((syms) * 18000 + 4000) / 5 : /* syms * 3.6 us */ \ + ((syms) * 1000) << 2 /* syms * 4 us */ \ + ) + +/* Transmit duration for the raw data part of an average sized packet */ +#define MCS_DURATION(streams, sgi, bps) \ + (MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) / AVG_AMPDU_SIZE) + +#define BW_20 0 +#define BW_40 1 +#define BW_80 2 + +/* + * Define group sort order: HT40 -> SGI -> #streams + */ +#define GROUP_IDX(_streams, _sgi, _ht40) \ + MINSTREL_HT_GROUP_0 + \ + MINSTREL_MAX_STREAMS * 2 * _ht40 + \ + MINSTREL_MAX_STREAMS * _sgi + \ + _streams - 1 + +#define _MAX(a, b) (((a)>(b))?(a):(b)) + +#define GROUP_SHIFT(duration) \ + _MAX(0, 16 - __builtin_clz(duration)) + +/* MCS rate information for an MCS group */ +#define __MCS_GROUP(_streams, _sgi, _ht40, _s) \ + [GROUP_IDX(_streams, _sgi, _ht40)] = { \ + .streams = _streams, \ + .shift = _s, \ + .bw = _ht40, \ + .flags = \ + IEEE80211_TX_RC_MCS | \ + (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ + (_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \ + .duration = { \ + MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26) >> _s, \ + MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52) >> _s, \ + MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78) >> _s, \ + MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104) >> _s, \ + MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156) >> _s, \ + MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208) >> _s, \ + MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234) >> _s, \ + MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) >> _s \ + } \ +} + +#define MCS_GROUP_SHIFT(_streams, _sgi, _ht40) \ + GROUP_SHIFT(MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26)) + +#define MCS_GROUP(_streams, _sgi, _ht40) \ + __MCS_GROUP(_streams, _sgi, _ht40, \ + MCS_GROUP_SHIFT(_streams, _sgi, _ht40)) + +#define VHT_GROUP_IDX(_streams, _sgi, _bw) \ + (MINSTREL_VHT_GROUP_0 + \ + MINSTREL_MAX_STREAMS * 2 * (_bw) + \ + MINSTREL_MAX_STREAMS * (_sgi) + \ + (_streams) - 1) + +#define BW2VBPS(_bw, r3, r2, r1) \ + (_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1) + +#define __VHT_GROUP(_streams, _sgi, _bw, _s) \ + [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \ + .streams = _streams, \ + .shift = _s, \ + .bw = _bw, \ + .flags = \ + IEEE80211_TX_RC_VHT_MCS | \ + (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ + (_bw == BW_80 ? IEEE80211_TX_RC_80_MHZ_WIDTH : \ + _bw == BW_40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \ + .duration = { \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 117, 54, 26)) >> _s, \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 234, 108, 52)) >> _s, \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 351, 162, 78)) >> _s, \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 468, 216, 104)) >> _s, \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 702, 324, 156)) >> _s, \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 936, 432, 208)) >> _s, \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 1053, 486, 234)) >> _s, \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 1170, 540, 260)) >> _s, \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 1404, 648, 312)) >> _s, \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 1560, 720, 346)) >> _s \ + } \ +} + +#define VHT_GROUP_SHIFT(_streams, _sgi, _bw) \ + GROUP_SHIFT(MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 117, 54, 26))) + +#define VHT_GROUP(_streams, _sgi, _bw) \ + __VHT_GROUP(_streams, _sgi, _bw, \ + VHT_GROUP_SHIFT(_streams, _sgi, _bw)) + +#define CCK_DURATION(_bitrate, _short) \ + (1000 * (10 /* SIFS */ + \ + (_short ? 72 + 24 : 144 + 48) + \ + (8 * (AVG_PKT_SIZE + 4) * 10) / (_bitrate))) + +#define CCK_DURATION_LIST(_short, _s) \ + CCK_DURATION(10, _short) >> _s, \ + CCK_DURATION(20, _short) >> _s, \ + CCK_DURATION(55, _short) >> _s, \ + CCK_DURATION(110, _short) >> _s + +#define __CCK_GROUP(_s) \ + [MINSTREL_CCK_GROUP] = { \ + .streams = 1, \ + .flags = 0, \ + .shift = _s, \ + .duration = { \ + CCK_DURATION_LIST(false, _s), \ + CCK_DURATION_LIST(true, _s) \ + } \ + } + +#define CCK_GROUP_SHIFT \ + GROUP_SHIFT(CCK_DURATION(10, false)) + +#define CCK_GROUP __CCK_GROUP(CCK_GROUP_SHIFT) + +#define OFDM_DURATION(_bitrate) \ + (1000 * (16 /* SIFS + signal ext */ + \ + 16 /* T_PREAMBLE */ + \ + 4 /* T_SIGNAL */ + \ + 4 * (((16 + 80 * (AVG_PKT_SIZE + 4) + 6) / \ + ((_bitrate) * 4))))) + +#define OFDM_DURATION_LIST(_s) \ + OFDM_DURATION(60) >> _s, \ + OFDM_DURATION(90) >> _s, \ + OFDM_DURATION(120) >> _s, \ + OFDM_DURATION(180) >> _s, \ + OFDM_DURATION(240) >> _s, \ + OFDM_DURATION(360) >> _s, \ + OFDM_DURATION(480) >> _s, \ + OFDM_DURATION(540) >> _s + +#define __OFDM_GROUP(_s) \ + [MINSTREL_OFDM_GROUP] = { \ + .streams = 1, \ + .flags = 0, \ + .shift = _s, \ + .duration = { \ + OFDM_DURATION_LIST(_s), \ + } \ + } + +#define OFDM_GROUP_SHIFT \ + GROUP_SHIFT(OFDM_DURATION(60)) + +#define OFDM_GROUP __OFDM_GROUP(OFDM_GROUP_SHIFT) + + +static bool minstrel_vht_only = true; +module_param(minstrel_vht_only, bool, 0644); +MODULE_PARM_DESC(minstrel_vht_only, + "Use only VHT rates when VHT is supported by sta."); + +/* + * To enable sufficiently targeted rate sampling, MCS rates are divided into + * groups, based on the number of streams and flags (HT40, SGI) that they + * use. + * + * Sortorder has to be fixed for GROUP_IDX macro to be applicable: + * BW -> SGI -> #streams + */ +const struct mcs_group minstrel_mcs_groups[] = { + MCS_GROUP(1, 0, BW_20), + MCS_GROUP(2, 0, BW_20), + MCS_GROUP(3, 0, BW_20), + MCS_GROUP(4, 0, BW_20), + + MCS_GROUP(1, 1, BW_20), + MCS_GROUP(2, 1, BW_20), + MCS_GROUP(3, 1, BW_20), + MCS_GROUP(4, 1, BW_20), + + MCS_GROUP(1, 0, BW_40), + MCS_GROUP(2, 0, BW_40), + MCS_GROUP(3, 0, BW_40), + MCS_GROUP(4, 0, BW_40), + + MCS_GROUP(1, 1, BW_40), + MCS_GROUP(2, 1, BW_40), + MCS_GROUP(3, 1, BW_40), + MCS_GROUP(4, 1, BW_40), + + CCK_GROUP, + OFDM_GROUP, + + VHT_GROUP(1, 0, BW_20), + VHT_GROUP(2, 0, BW_20), + VHT_GROUP(3, 0, BW_20), + VHT_GROUP(4, 0, BW_20), + + VHT_GROUP(1, 1, BW_20), + VHT_GROUP(2, 1, BW_20), + VHT_GROUP(3, 1, BW_20), + VHT_GROUP(4, 1, BW_20), + + VHT_GROUP(1, 0, BW_40), + VHT_GROUP(2, 0, BW_40), + VHT_GROUP(3, 0, BW_40), + VHT_GROUP(4, 0, BW_40), + + VHT_GROUP(1, 1, BW_40), + VHT_GROUP(2, 1, BW_40), + VHT_GROUP(3, 1, BW_40), + VHT_GROUP(4, 1, BW_40), + + VHT_GROUP(1, 0, BW_80), + VHT_GROUP(2, 0, BW_80), + VHT_GROUP(3, 0, BW_80), + VHT_GROUP(4, 0, BW_80), + + VHT_GROUP(1, 1, BW_80), + VHT_GROUP(2, 1, BW_80), + VHT_GROUP(3, 1, BW_80), + VHT_GROUP(4, 1, BW_80), +}; + +const s16 minstrel_cck_bitrates[4] = { 10, 20, 55, 110 }; +const s16 minstrel_ofdm_bitrates[8] = { 60, 90, 120, 180, 240, 360, 480, 540 }; +static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly; +static const u8 minstrel_sample_seq[] = { + MINSTREL_SAMPLE_TYPE_INC, + MINSTREL_SAMPLE_TYPE_JUMP, + MINSTREL_SAMPLE_TYPE_INC, + MINSTREL_SAMPLE_TYPE_JUMP, + MINSTREL_SAMPLE_TYPE_INC, + MINSTREL_SAMPLE_TYPE_SLOW, +}; + +static void +minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi); + +/* + * Some VHT MCSes are invalid (when Ndbps / Nes is not an integer) + * e.g for MCS9@20MHzx1Nss: Ndbps=8x52*(5/6) Nes=1 + * + * Returns the valid mcs map for struct minstrel_mcs_group_data.supported + */ +static u16 +minstrel_get_valid_vht_rates(int bw, int nss, __le16 mcs_map) +{ + u16 mask = 0; + + if (bw == BW_20) { + if (nss != 3 && nss != 6) + mask = BIT(9); + } else if (bw == BW_80) { + if (nss == 3 || nss == 7) + mask = BIT(6); + else if (nss == 6) + mask = BIT(9); + } else { + WARN_ON(bw != BW_40); + } + + switch ((le16_to_cpu(mcs_map) >> (2 * (nss - 1))) & 3) { + case IEEE80211_VHT_MCS_SUPPORT_0_7: + mask |= 0x300; + break; + case IEEE80211_VHT_MCS_SUPPORT_0_8: + mask |= 0x200; + break; + case IEEE80211_VHT_MCS_SUPPORT_0_9: + break; + default: + mask = 0x3ff; + } + + return 0x3ff & ~mask; +} + +static bool +minstrel_ht_is_legacy_group(int group) +{ + return group == MINSTREL_CCK_GROUP || + group == MINSTREL_OFDM_GROUP; +} + +/* + * Look up an MCS group index based on mac80211 rate information + */ +static int +minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate) +{ + return GROUP_IDX((rate->idx / 8) + 1, + !!(rate->flags & IEEE80211_TX_RC_SHORT_GI), + !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)); +} + +/* + * Look up an MCS group index based on new cfg80211 rate_info. + */ +static int +minstrel_ht_ri_get_group_idx(struct rate_info *rate) +{ + return GROUP_IDX((rate->mcs / 8) + 1, + !!(rate->flags & RATE_INFO_FLAGS_SHORT_GI), + !!(rate->bw & RATE_INFO_BW_40)); +} + +static int +minstrel_vht_get_group_idx(struct ieee80211_tx_rate *rate) +{ + return VHT_GROUP_IDX(ieee80211_rate_get_vht_nss(rate), + !!(rate->flags & IEEE80211_TX_RC_SHORT_GI), + !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + + 2*!!(rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)); +} + +/* + * Look up an MCS group index based on new cfg80211 rate_info. + */ +static int +minstrel_vht_ri_get_group_idx(struct rate_info *rate) +{ + return VHT_GROUP_IDX(rate->nss, + !!(rate->flags & RATE_INFO_FLAGS_SHORT_GI), + !!(rate->bw & RATE_INFO_BW_40) + + 2*!!(rate->bw & RATE_INFO_BW_80)); +} + +static struct minstrel_rate_stats * +minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, + struct ieee80211_tx_rate *rate) +{ + int group, idx; + + if (rate->flags & IEEE80211_TX_RC_MCS) { + group = minstrel_ht_get_group_idx(rate); + idx = rate->idx % 8; + goto out; + } + + if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + group = minstrel_vht_get_group_idx(rate); + idx = ieee80211_rate_get_vht_mcs(rate); + goto out; + } + + group = MINSTREL_CCK_GROUP; + for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++) { + if (!(mi->supported[group] & BIT(idx))) + continue; + + if (rate->idx != mp->cck_rates[idx]) + continue; + + /* short preamble */ + if ((mi->supported[group] & BIT(idx + 4)) && + (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)) + idx += 4; + goto out; + } + + group = MINSTREL_OFDM_GROUP; + for (idx = 0; idx < ARRAY_SIZE(mp->ofdm_rates[0]); idx++) + if (rate->idx == mp->ofdm_rates[mi->band][idx]) + goto out; + + idx = 0; +out: + return &mi->groups[group].rates[idx]; +} + +/* + * Get the minstrel rate statistics for specified STA and rate info. + */ +static struct minstrel_rate_stats * +minstrel_ht_ri_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, + struct ieee80211_rate_status *rate_status) +{ + int group, idx; + struct rate_info *rate = &rate_status->rate_idx; + + if (rate->flags & RATE_INFO_FLAGS_MCS) { + group = minstrel_ht_ri_get_group_idx(rate); + idx = rate->mcs % 8; + goto out; + } + + if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) { + group = minstrel_vht_ri_get_group_idx(rate); + idx = rate->mcs; + goto out; + } + + group = MINSTREL_CCK_GROUP; + for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++) { + if (rate->legacy != minstrel_cck_bitrates[ mp->cck_rates[idx] ]) + continue; + + /* short preamble */ + if ((mi->supported[group] & BIT(idx + 4)) && + mi->use_short_preamble) + idx += 4; + goto out; + } + + group = MINSTREL_OFDM_GROUP; + for (idx = 0; idx < ARRAY_SIZE(mp->ofdm_rates[0]); idx++) + if (rate->legacy == minstrel_ofdm_bitrates[ mp->ofdm_rates[mi->band][idx] ]) + goto out; + + idx = 0; +out: + return &mi->groups[group].rates[idx]; +} + +static inline struct minstrel_rate_stats * +minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index) +{ + return &mi->groups[MI_RATE_GROUP(index)].rates[MI_RATE_IDX(index)]; +} + +static inline int minstrel_get_duration(int index) +{ + const struct mcs_group *group = &minstrel_mcs_groups[MI_RATE_GROUP(index)]; + unsigned int duration = group->duration[MI_RATE_IDX(index)]; + + return duration << group->shift; +} + +static unsigned int +minstrel_ht_avg_ampdu_len(struct minstrel_ht_sta *mi) +{ + int duration; + + if (mi->avg_ampdu_len) + return MINSTREL_TRUNC(mi->avg_ampdu_len); + + if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(mi->max_tp_rate[0]))) + return 1; + + duration = minstrel_get_duration(mi->max_tp_rate[0]); + + if (duration > 400 * 1000) + return 2; + + if (duration > 250 * 1000) + return 4; + + if (duration > 150 * 1000) + return 8; + + return 16; +} + +/* + * Return current throughput based on the average A-MPDU length, taking into + * account the expected number of retransmissions and their expected length + */ +int +minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate, + int prob_avg) +{ + unsigned int nsecs = 0, overhead = mi->overhead; + unsigned int ampdu_len = 1; + + /* do not account throughput if success prob is below 10% */ + if (prob_avg < MINSTREL_FRAC(10, 100)) + return 0; + + if (minstrel_ht_is_legacy_group(group)) + overhead = mi->overhead_legacy; + else + ampdu_len = minstrel_ht_avg_ampdu_len(mi); + + nsecs = 1000 * overhead / ampdu_len; + nsecs += minstrel_mcs_groups[group].duration[rate] << + minstrel_mcs_groups[group].shift; + + /* + * For the throughput calculation, limit the probability value to 90% to + * account for collision related packet error rate fluctuation + * (prob is scaled - see MINSTREL_FRAC above) + */ + if (prob_avg > MINSTREL_FRAC(90, 100)) + prob_avg = MINSTREL_FRAC(90, 100); + + return MINSTREL_TRUNC(100 * ((prob_avg * 1000000) / nsecs)); +} + +/* + * Find & sort topmost throughput rates + * + * If multiple rates provide equal throughput the sorting is based on their + * current success probability. Higher success probability is preferred among + * MCS groups, CCK rates do not provide aggregation and are therefore at last. + */ +static void +minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u16 index, + u16 *tp_list) +{ + int cur_group, cur_idx, cur_tp_avg, cur_prob; + int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob; + int j = MAX_THR_RATES; + + cur_group = MI_RATE_GROUP(index); + cur_idx = MI_RATE_IDX(index); + cur_prob = mi->groups[cur_group].rates[cur_idx].prob_avg; + cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, cur_prob); + + do { + tmp_group = MI_RATE_GROUP(tp_list[j - 1]); + tmp_idx = MI_RATE_IDX(tp_list[j - 1]); + tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg; + tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, + tmp_prob); + if (cur_tp_avg < tmp_tp_avg || + (cur_tp_avg == tmp_tp_avg && cur_prob <= tmp_prob)) + break; + j--; + } while (j > 0); + + if (j < MAX_THR_RATES - 1) { + memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) * + (MAX_THR_RATES - (j + 1)))); + } + if (j < MAX_THR_RATES) + tp_list[j] = index; +} + +/* + * Find and set the topmost probability rate per sta and per group + */ +static void +minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 *dest, u16 index) +{ + struct minstrel_mcs_group_data *mg; + struct minstrel_rate_stats *mrs; + int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob; + int max_tp_group, max_tp_idx, max_tp_prob; + int cur_tp_avg, cur_group, cur_idx; + int max_gpr_group, max_gpr_idx; + int max_gpr_tp_avg, max_gpr_prob; + + cur_group = MI_RATE_GROUP(index); + cur_idx = MI_RATE_IDX(index); + mg = &mi->groups[cur_group]; + mrs = &mg->rates[cur_idx]; + + tmp_group = MI_RATE_GROUP(*dest); + tmp_idx = MI_RATE_IDX(*dest); + tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg; + tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob); + + /* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from + * MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */ + max_tp_group = MI_RATE_GROUP(mi->max_tp_rate[0]); + max_tp_idx = MI_RATE_IDX(mi->max_tp_rate[0]); + max_tp_prob = mi->groups[max_tp_group].rates[max_tp_idx].prob_avg; + + if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(index)) && + !minstrel_ht_is_legacy_group(max_tp_group)) + return; + + /* skip rates faster than max tp rate with lower prob */ + if (minstrel_get_duration(mi->max_tp_rate[0]) > minstrel_get_duration(index) && + mrs->prob_avg < max_tp_prob) + return; + + max_gpr_group = MI_RATE_GROUP(mg->max_group_prob_rate); + max_gpr_idx = MI_RATE_IDX(mg->max_group_prob_rate); + max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_avg; + + if (mrs->prob_avg > MINSTREL_FRAC(75, 100)) { + cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, + mrs->prob_avg); + if (cur_tp_avg > tmp_tp_avg) + *dest = index; + + max_gpr_tp_avg = minstrel_ht_get_tp_avg(mi, max_gpr_group, + max_gpr_idx, + max_gpr_prob); + if (cur_tp_avg > max_gpr_tp_avg) + mg->max_group_prob_rate = index; + } else { + if (mrs->prob_avg > tmp_prob) + *dest = index; + if (mrs->prob_avg > max_gpr_prob) + mg->max_group_prob_rate = index; + } +} + + +/* + * Assign new rate set per sta and use CCK rates only if the fastest + * rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted + * rate sets where MCS and CCK rates are mixed, because CCK rates can + * not use aggregation. + */ +static void +minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi, + u16 tmp_mcs_tp_rate[MAX_THR_RATES], + u16 tmp_legacy_tp_rate[MAX_THR_RATES]) +{ + unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp, tmp_prob; + int i; + + tmp_group = MI_RATE_GROUP(tmp_legacy_tp_rate[0]); + tmp_idx = MI_RATE_IDX(tmp_legacy_tp_rate[0]); + tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg; + tmp_cck_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob); + + tmp_group = MI_RATE_GROUP(tmp_mcs_tp_rate[0]); + tmp_idx = MI_RATE_IDX(tmp_mcs_tp_rate[0]); + tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg; + tmp_mcs_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob); + + if (tmp_cck_tp > tmp_mcs_tp) { + for(i = 0; i < MAX_THR_RATES; i++) { + minstrel_ht_sort_best_tp_rates(mi, tmp_legacy_tp_rate[i], + tmp_mcs_tp_rate); + } + } + +} + +/* + * Try to increase robustness of max_prob rate by decrease number of + * streams if possible. + */ +static inline void +minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi) +{ + struct minstrel_mcs_group_data *mg; + int tmp_max_streams, group, tmp_idx, tmp_prob; + int tmp_tp = 0; + + if (!mi->sta->deflink.ht_cap.ht_supported) + return; + + group = MI_RATE_GROUP(mi->max_tp_rate[0]); + tmp_max_streams = minstrel_mcs_groups[group].streams; + for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { + mg = &mi->groups[group]; + if (!mi->supported[group] || group == MINSTREL_CCK_GROUP) + continue; + + tmp_idx = MI_RATE_IDX(mg->max_group_prob_rate); + tmp_prob = mi->groups[group].rates[tmp_idx].prob_avg; + + if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) && + (minstrel_mcs_groups[group].streams < tmp_max_streams)) { + mi->max_prob_rate = mg->max_group_prob_rate; + tmp_tp = minstrel_ht_get_tp_avg(mi, group, + tmp_idx, + tmp_prob); + } + } +} + +static u16 +__minstrel_ht_get_sample_rate(struct minstrel_ht_sta *mi, + enum minstrel_sample_type type) +{ + u16 *rates = mi->sample[type].sample_rates; + u16 cur; + int i; + + for (i = 0; i < MINSTREL_SAMPLE_RATES; i++) { + if (!rates[i]) + continue; + + cur = rates[i]; + rates[i] = 0; + return cur; + } + + return 0; +} + +static inline int +minstrel_ewma(int old, int new, int weight) +{ + int diff, incr; + + diff = new - old; + incr = (EWMA_DIV - weight) * diff / EWMA_DIV; + + return old + incr; +} + +static inline int minstrel_filter_avg_add(u16 *prev_1, u16 *prev_2, s32 in) +{ + s32 out_1 = *prev_1; + s32 out_2 = *prev_2; + s32 val; + + if (!in) + in += 1; + + if (!out_1) { + val = out_1 = in; + goto out; + } + + val = MINSTREL_AVG_COEFF1 * in; + val += MINSTREL_AVG_COEFF2 * out_1; + val += MINSTREL_AVG_COEFF3 * out_2; + val >>= MINSTREL_SCALE; + + if (val > 1 << MINSTREL_SCALE) + val = 1 << MINSTREL_SCALE; + if (val < 0) + val = 1; + +out: + *prev_2 = out_1; + *prev_1 = val; + + return val; +} + +/* +* Recalculate statistics and counters of a given rate +*/ +static void +minstrel_ht_calc_rate_stats(struct minstrel_priv *mp, + struct minstrel_rate_stats *mrs) +{ + unsigned int cur_prob; + + if (unlikely(mrs->attempts > 0)) { + cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts); + minstrel_filter_avg_add(&mrs->prob_avg, + &mrs->prob_avg_1, cur_prob); + mrs->att_hist += mrs->attempts; + mrs->succ_hist += mrs->success; + } + + mrs->last_success = mrs->success; + mrs->last_attempts = mrs->attempts; + mrs->success = 0; + mrs->attempts = 0; +} + +static bool +minstrel_ht_find_sample_rate(struct minstrel_ht_sta *mi, int type, int idx) +{ + int i; + + for (i = 0; i < MINSTREL_SAMPLE_RATES; i++) { + u16 cur = mi->sample[type].sample_rates[i]; + + if (cur == idx) + return true; + + if (!cur) + break; + } + + return false; +} + +static int +minstrel_ht_move_sample_rates(struct minstrel_ht_sta *mi, int type, + u32 fast_rate_dur, u32 slow_rate_dur) +{ + u16 *rates = mi->sample[type].sample_rates; + int i, j; + + for (i = 0, j = 0; i < MINSTREL_SAMPLE_RATES; i++) { + u32 duration; + bool valid = false; + u16 cur; + + cur = rates[i]; + if (!cur) + continue; + + duration = minstrel_get_duration(cur); + switch (type) { + case MINSTREL_SAMPLE_TYPE_SLOW: + valid = duration > fast_rate_dur && + duration < slow_rate_dur; + break; + case MINSTREL_SAMPLE_TYPE_INC: + case MINSTREL_SAMPLE_TYPE_JUMP: + valid = duration < fast_rate_dur; + break; + default: + valid = false; + break; + } + + if (!valid) { + rates[i] = 0; + continue; + } + + if (i == j) + continue; + + rates[j++] = cur; + rates[i] = 0; + } + + return j; +} + +static int +minstrel_ht_group_min_rate_offset(struct minstrel_ht_sta *mi, int group, + u32 max_duration) +{ + u16 supported = mi->supported[group]; + int i; + + for (i = 0; i < MCS_GROUP_RATES && supported; i++, supported >>= 1) { + if (!(supported & BIT(0))) + continue; + + if (minstrel_get_duration(MI_RATE(group, i)) >= max_duration) + continue; + + return i; + } + + return -1; +} + +/* + * Incremental update rates: + * Flip through groups and pick the first group rate that is faster than the + * highest currently selected rate + */ +static u16 +minstrel_ht_next_inc_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur) +{ + u8 type = MINSTREL_SAMPLE_TYPE_INC; + int i, index = 0; + u8 group; + + group = mi->sample[type].sample_group; + for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) { + group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups); + + index = minstrel_ht_group_min_rate_offset(mi, group, + fast_rate_dur); + if (index < 0) + continue; + + index = MI_RATE(group, index & 0xf); + if (!minstrel_ht_find_sample_rate(mi, type, index)) + goto out; + } + index = 0; + +out: + mi->sample[type].sample_group = group; + + return index; +} + +static int +minstrel_ht_next_group_sample_rate(struct minstrel_ht_sta *mi, int group, + u16 supported, int offset) +{ + struct minstrel_mcs_group_data *mg = &mi->groups[group]; + u16 idx; + int i; + + for (i = 0; i < MCS_GROUP_RATES; i++) { + idx = sample_table[mg->column][mg->index]; + if (++mg->index >= MCS_GROUP_RATES) { + mg->index = 0; + if (++mg->column >= ARRAY_SIZE(sample_table)) + mg->column = 0; + } + + if (idx < offset) + continue; + + if (!(supported & BIT(idx))) + continue; + + return MI_RATE(group, idx); + } + + return -1; +} + +/* + * Jump rates: + * Sample random rates, use those that are faster than the highest + * currently selected rate. Rates between the fastest and the slowest + * get sorted into the slow sample bucket, but only if it has room + */ +static u16 +minstrel_ht_next_jump_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur, + u32 slow_rate_dur, int *slow_rate_ofs) +{ + struct minstrel_rate_stats *mrs; + u32 max_duration = slow_rate_dur; + int i, index, offset; + u16 *slow_rates; + u16 supported; + u32 duration; + u8 group; + + if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES) + max_duration = fast_rate_dur; + + slow_rates = mi->sample[MINSTREL_SAMPLE_TYPE_SLOW].sample_rates; + group = mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_group; + for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) { + u8 type; + + group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups); + + supported = mi->supported[group]; + if (!supported) + continue; + + offset = minstrel_ht_group_min_rate_offset(mi, group, + max_duration); + if (offset < 0) + continue; + + index = minstrel_ht_next_group_sample_rate(mi, group, supported, + offset); + if (index < 0) + continue; + + duration = minstrel_get_duration(index); + if (duration < fast_rate_dur) + type = MINSTREL_SAMPLE_TYPE_JUMP; + else + type = MINSTREL_SAMPLE_TYPE_SLOW; + + if (minstrel_ht_find_sample_rate(mi, type, index)) + continue; + + if (type == MINSTREL_SAMPLE_TYPE_JUMP) + goto found; + + if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES) + continue; + + if (duration >= slow_rate_dur) + continue; + + /* skip slow rates with high success probability */ + mrs = minstrel_get_ratestats(mi, index); + if (mrs->prob_avg > MINSTREL_FRAC(95, 100)) + continue; + + slow_rates[(*slow_rate_ofs)++] = index; + if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES) + max_duration = fast_rate_dur; + } + index = 0; + +found: + mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_group = group; + + return index; +} + +static void +minstrel_ht_refill_sample_rates(struct minstrel_ht_sta *mi) +{ + u32 prob_dur = minstrel_get_duration(mi->max_prob_rate); + u32 tp_dur = minstrel_get_duration(mi->max_tp_rate[0]); + u32 tp2_dur = minstrel_get_duration(mi->max_tp_rate[1]); + u32 fast_rate_dur = min(min(tp_dur, tp2_dur), prob_dur); + u32 slow_rate_dur = max(max(tp_dur, tp2_dur), prob_dur); + u16 *rates; + int i, j; + + rates = mi->sample[MINSTREL_SAMPLE_TYPE_INC].sample_rates; + i = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_INC, + fast_rate_dur, slow_rate_dur); + while (i < MINSTREL_SAMPLE_RATES) { + rates[i] = minstrel_ht_next_inc_rate(mi, tp_dur); + if (!rates[i]) + break; + + i++; + } + + rates = mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_rates; + i = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_JUMP, + fast_rate_dur, slow_rate_dur); + j = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_SLOW, + fast_rate_dur, slow_rate_dur); + while (i < MINSTREL_SAMPLE_RATES) { + rates[i] = minstrel_ht_next_jump_rate(mi, fast_rate_dur, + slow_rate_dur, &j); + if (!rates[i]) + break; + + i++; + } + + for (i = 0; i < ARRAY_SIZE(mi->sample); i++) + memcpy(mi->sample[i].cur_sample_rates, mi->sample[i].sample_rates, + sizeof(mi->sample[i].cur_sample_rates)); +} + + +/* + * Update rate statistics and select new primary rates + * + * Rules for rate selection: + * - max_prob_rate must use only one stream, as a tradeoff between delivery + * probability and throughput during strong fluctuations + * - as long as the max prob rate has a probability of more than 75%, pick + * higher throughput rates, even if the probablity is a bit lower + */ +static void +minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) +{ + struct minstrel_mcs_group_data *mg; + struct minstrel_rate_stats *mrs; + int group, i, j, cur_prob; + u16 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES]; + u16 tmp_legacy_tp_rate[MAX_THR_RATES], tmp_max_prob_rate; + u16 index; + bool ht_supported = mi->sta->deflink.ht_cap.ht_supported; + + if (mi->ampdu_packets > 0) { + if (!ieee80211_hw_check(mp->hw, TX_STATUS_NO_AMPDU_LEN)) + mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len, + MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), + EWMA_LEVEL); + else + mi->avg_ampdu_len = 0; + mi->ampdu_len = 0; + mi->ampdu_packets = 0; + } + + if (mi->supported[MINSTREL_CCK_GROUP]) + group = MINSTREL_CCK_GROUP; + else if (mi->supported[MINSTREL_OFDM_GROUP]) + group = MINSTREL_OFDM_GROUP; + else + group = 0; + + index = MI_RATE(group, 0); + for (j = 0; j < ARRAY_SIZE(tmp_legacy_tp_rate); j++) + tmp_legacy_tp_rate[j] = index; + + if (mi->supported[MINSTREL_VHT_GROUP_0]) + group = MINSTREL_VHT_GROUP_0; + else if (ht_supported) + group = MINSTREL_HT_GROUP_0; + else if (mi->supported[MINSTREL_CCK_GROUP]) + group = MINSTREL_CCK_GROUP; + else + group = MINSTREL_OFDM_GROUP; + + index = MI_RATE(group, 0); + tmp_max_prob_rate = index; + for (j = 0; j < ARRAY_SIZE(tmp_mcs_tp_rate); j++) + tmp_mcs_tp_rate[j] = index; + + /* Find best rate sets within all MCS groups*/ + for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { + u16 *tp_rate = tmp_mcs_tp_rate; + u16 last_prob = 0; + + mg = &mi->groups[group]; + if (!mi->supported[group]) + continue; + + /* (re)Initialize group rate indexes */ + for(j = 0; j < MAX_THR_RATES; j++) + tmp_group_tp_rate[j] = MI_RATE(group, 0); + + if (group == MINSTREL_CCK_GROUP && ht_supported) + tp_rate = tmp_legacy_tp_rate; + + for (i = MCS_GROUP_RATES - 1; i >= 0; i--) { + if (!(mi->supported[group] & BIT(i))) + continue; + + index = MI_RATE(group, i); + + mrs = &mg->rates[i]; + mrs->retry_updated = false; + minstrel_ht_calc_rate_stats(mp, mrs); + + if (mrs->att_hist) + last_prob = max(last_prob, mrs->prob_avg); + else + mrs->prob_avg = max(last_prob, mrs->prob_avg); + cur_prob = mrs->prob_avg; + + if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0) + continue; + + /* Find max throughput rate set */ + minstrel_ht_sort_best_tp_rates(mi, index, tp_rate); + + /* Find max throughput rate set within a group */ + minstrel_ht_sort_best_tp_rates(mi, index, + tmp_group_tp_rate); + } + + memcpy(mg->max_group_tp_rate, tmp_group_tp_rate, + sizeof(mg->max_group_tp_rate)); + } + + /* Assign new rate set per sta */ + minstrel_ht_assign_best_tp_rates(mi, tmp_mcs_tp_rate, + tmp_legacy_tp_rate); + memcpy(mi->max_tp_rate, tmp_mcs_tp_rate, sizeof(mi->max_tp_rate)); + + for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { + if (!mi->supported[group]) + continue; + + mg = &mi->groups[group]; + mg->max_group_prob_rate = MI_RATE(group, 0); + + for (i = 0; i < MCS_GROUP_RATES; i++) { + if (!(mi->supported[group] & BIT(i))) + continue; + + index = MI_RATE(group, i); + + /* Find max probability rate per group and global */ + minstrel_ht_set_best_prob_rate(mi, &tmp_max_prob_rate, + index); + } + } + + mi->max_prob_rate = tmp_max_prob_rate; + + /* Try to increase robustness of max_prob_rate*/ + minstrel_ht_prob_rate_reduce_streams(mi); + minstrel_ht_refill_sample_rates(mi); + +#ifdef CONFIG_MAC80211_DEBUGFS + /* use fixed index if set */ + if (mp->fixed_rate_idx != -1) { + for (i = 0; i < 4; i++) + mi->max_tp_rate[i] = mp->fixed_rate_idx; + mi->max_prob_rate = mp->fixed_rate_idx; + } +#endif + + /* Reset update timer */ + mi->last_stats_update = jiffies; + mi->sample_time = jiffies; +} + +static bool +minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, + struct ieee80211_tx_rate *rate) +{ + int i; + + if (rate->idx < 0) + return false; + + if (!rate->count) + return false; + + if (rate->flags & IEEE80211_TX_RC_MCS || + rate->flags & IEEE80211_TX_RC_VHT_MCS) + return true; + + for (i = 0; i < ARRAY_SIZE(mp->cck_rates); i++) + if (rate->idx == mp->cck_rates[i]) + return true; + + for (i = 0; i < ARRAY_SIZE(mp->ofdm_rates[0]); i++) + if (rate->idx == mp->ofdm_rates[mi->band][i]) + return true; + + return false; +} + +/* + * Check whether rate_status contains valid information. + */ +static bool +minstrel_ht_ri_txstat_valid(struct minstrel_priv *mp, + struct minstrel_ht_sta *mi, + struct ieee80211_rate_status *rate_status) +{ + int i; + + if (!rate_status) + return false; + if (!rate_status->try_count) + return false; + + if (rate_status->rate_idx.flags & RATE_INFO_FLAGS_MCS || + rate_status->rate_idx.flags & RATE_INFO_FLAGS_VHT_MCS) + return true; + + for (i = 0; i < ARRAY_SIZE(mp->cck_rates); i++) { + if (rate_status->rate_idx.legacy == + minstrel_cck_bitrates[ mp->cck_rates[i] ]) + return true; + } + + for (i = 0; i < ARRAY_SIZE(mp->ofdm_rates); i++) { + if (rate_status->rate_idx.legacy == + minstrel_ofdm_bitrates[ mp->ofdm_rates[mi->band][i] ]) + return true; + } + + return false; +} + +static void +minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary) +{ + int group, orig_group; + + orig_group = group = MI_RATE_GROUP(*idx); + while (group > 0) { + group--; + + if (!mi->supported[group]) + continue; + + if (minstrel_mcs_groups[group].streams > + minstrel_mcs_groups[orig_group].streams) + continue; + + if (primary) + *idx = mi->groups[group].max_group_tp_rate[0]; + else + *idx = mi->groups[group].max_group_tp_rate[1]; + break; + } +} + +static void +minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, + void *priv_sta, struct ieee80211_tx_status *st) +{ + struct ieee80211_tx_info *info = st->info; + struct minstrel_ht_sta *mi = priv_sta; + struct ieee80211_tx_rate *ar = info->status.rates; + struct minstrel_rate_stats *rate, *rate2; + struct minstrel_priv *mp = priv; + u32 update_interval = mp->update_interval; + bool last, update = false; + int i; + + /* Ignore packet that was sent with noAck flag */ + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + return; + + /* This packet was aggregated but doesn't carry status info */ + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && + !(info->flags & IEEE80211_TX_STAT_AMPDU)) + return; + + if (!(info->flags & IEEE80211_TX_STAT_AMPDU)) { + info->status.ampdu_ack_len = + (info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0); + info->status.ampdu_len = 1; + } + + /* wraparound */ + if (mi->total_packets >= ~0 - info->status.ampdu_len) { + mi->total_packets = 0; + mi->sample_packets = 0; + } + + mi->total_packets += info->status.ampdu_len; + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + mi->sample_packets += info->status.ampdu_len; + + mi->ampdu_packets++; + mi->ampdu_len += info->status.ampdu_len; + + if (st->rates && st->n_rates) { + last = !minstrel_ht_ri_txstat_valid(mp, mi, &(st->rates[0])); + for (i = 0; !last; i++) { + last = (i == st->n_rates - 1) || + !minstrel_ht_ri_txstat_valid(mp, mi, + &(st->rates[i + 1])); + + rate = minstrel_ht_ri_get_stats(mp, mi, + &(st->rates[i])); + + if (last) + rate->success += info->status.ampdu_ack_len; + + rate->attempts += st->rates[i].try_count * + info->status.ampdu_len; + } + } else { + last = !minstrel_ht_txstat_valid(mp, mi, &ar[0]); + for (i = 0; !last; i++) { + last = (i == IEEE80211_TX_MAX_RATES - 1) || + !minstrel_ht_txstat_valid(mp, mi, &ar[i + 1]); + + rate = minstrel_ht_get_stats(mp, mi, &ar[i]); + if (last) + rate->success += info->status.ampdu_ack_len; + + rate->attempts += ar[i].count * info->status.ampdu_len; + } + } + + if (mp->hw->max_rates > 1) { + /* + * check for sudden death of spatial multiplexing, + * downgrade to a lower number of streams if necessary. + */ + rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]); + if (rate->attempts > 30 && + rate->success < rate->attempts / 4) { + minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true); + update = true; + } + + rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]); + if (rate2->attempts > 30 && + rate2->success < rate2->attempts / 4) { + minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false); + update = true; + } + } + + if (time_after(jiffies, mi->last_stats_update + update_interval)) { + update = true; + minstrel_ht_update_stats(mp, mi); + } + + if (update) + minstrel_ht_update_rates(mp, mi); +} + +static void +minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, + int index) +{ + struct minstrel_rate_stats *mrs; + unsigned int tx_time, tx_time_rtscts, tx_time_data; + unsigned int cw = mp->cw_min; + unsigned int ctime = 0; + unsigned int t_slot = 9; /* FIXME */ + unsigned int ampdu_len = minstrel_ht_avg_ampdu_len(mi); + unsigned int overhead = 0, overhead_rtscts = 0; + + mrs = minstrel_get_ratestats(mi, index); + if (mrs->prob_avg < MINSTREL_FRAC(1, 10)) { + mrs->retry_count = 1; + mrs->retry_count_rtscts = 1; + return; + } + + mrs->retry_count = 2; + mrs->retry_count_rtscts = 2; + mrs->retry_updated = true; + + tx_time_data = minstrel_get_duration(index) * ampdu_len / 1000; + + /* Contention time for first 2 tries */ + ctime = (t_slot * cw) >> 1; + cw = min((cw << 1) | 1, mp->cw_max); + ctime += (t_slot * cw) >> 1; + cw = min((cw << 1) | 1, mp->cw_max); + + if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(index))) { + overhead = mi->overhead_legacy; + overhead_rtscts = mi->overhead_legacy_rtscts; + } else { + overhead = mi->overhead; + overhead_rtscts = mi->overhead_rtscts; + } + + /* Total TX time for data and Contention after first 2 tries */ + tx_time = ctime + 2 * (overhead + tx_time_data); + tx_time_rtscts = ctime + 2 * (overhead_rtscts + tx_time_data); + + /* See how many more tries we can fit inside segment size */ + do { + /* Contention time for this try */ + ctime = (t_slot * cw) >> 1; + cw = min((cw << 1) | 1, mp->cw_max); + + /* Total TX time after this try */ + tx_time += ctime + overhead + tx_time_data; + tx_time_rtscts += ctime + overhead_rtscts + tx_time_data; + + if (tx_time_rtscts < mp->segment_size) + mrs->retry_count_rtscts++; + } while ((tx_time < mp->segment_size) && + (++mrs->retry_count < mp->max_retry)); +} + + +static void +minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, + struct ieee80211_sta_rates *ratetbl, int offset, int index) +{ + int group_idx = MI_RATE_GROUP(index); + const struct mcs_group *group = &minstrel_mcs_groups[group_idx]; + struct minstrel_rate_stats *mrs; + u8 idx; + u16 flags = group->flags; + + mrs = minstrel_get_ratestats(mi, index); + if (!mrs->retry_updated) + minstrel_calc_retransmit(mp, mi, index); + + if (mrs->prob_avg < MINSTREL_FRAC(20, 100) || !mrs->retry_count) { + ratetbl->rate[offset].count = 2; + ratetbl->rate[offset].count_rts = 2; + ratetbl->rate[offset].count_cts = 2; + } else { + ratetbl->rate[offset].count = mrs->retry_count; + ratetbl->rate[offset].count_cts = mrs->retry_count; + ratetbl->rate[offset].count_rts = mrs->retry_count_rtscts; + } + + index = MI_RATE_IDX(index); + if (group_idx == MINSTREL_CCK_GROUP) + idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)]; + else if (group_idx == MINSTREL_OFDM_GROUP) + idx = mp->ofdm_rates[mi->band][index % + ARRAY_SIZE(mp->ofdm_rates[0])]; + else if (flags & IEEE80211_TX_RC_VHT_MCS) + idx = ((group->streams - 1) << 4) | + (index & 0xF); + else + idx = index + (group->streams - 1) * 8; + + /* enable RTS/CTS if needed: + * - if station is in dynamic SMPS (and streams > 1) + * - for fallback rates, to increase chances of getting through + */ + if (offset > 0 || + (mi->sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC && + group->streams > 1)) { + ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts; + flags |= IEEE80211_TX_RC_USE_RTS_CTS; + } + + ratetbl->rate[offset].idx = idx; + ratetbl->rate[offset].flags = flags; +} + +static inline int +minstrel_ht_get_prob_avg(struct minstrel_ht_sta *mi, int rate) +{ + int group = MI_RATE_GROUP(rate); + rate = MI_RATE_IDX(rate); + return mi->groups[group].rates[rate].prob_avg; +} + +static int +minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi) +{ + int group = MI_RATE_GROUP(mi->max_prob_rate); + const struct mcs_group *g = &minstrel_mcs_groups[group]; + int rate = MI_RATE_IDX(mi->max_prob_rate); + unsigned int duration; + + /* Disable A-MSDU if max_prob_rate is bad */ + if (mi->groups[group].rates[rate].prob_avg < MINSTREL_FRAC(50, 100)) + return 1; + + duration = g->duration[rate]; + duration <<= g->shift; + + /* If the rate is slower than single-stream MCS1, make A-MSDU limit small */ + if (duration > MCS_DURATION(1, 0, 52)) + return 500; + + /* + * If the rate is slower than single-stream MCS4, limit A-MSDU to usual + * data packet size + */ + if (duration > MCS_DURATION(1, 0, 104)) + return 1600; + + /* + * If the rate is slower than single-stream MCS7, or if the max throughput + * rate success probability is less than 75%, limit A-MSDU to twice the usual + * data packet size + */ + if (duration > MCS_DURATION(1, 0, 260) || + (minstrel_ht_get_prob_avg(mi, mi->max_tp_rate[0]) < + MINSTREL_FRAC(75, 100))) + return 3200; + + /* + * HT A-MPDU limits maximum MPDU size under BA agreement to 4095 bytes. + * Since aggregation sessions are started/stopped without txq flush, use + * the limit here to avoid the complexity of having to de-aggregate + * packets in the queue. + */ + if (!mi->sta->deflink.vht_cap.vht_supported) + return IEEE80211_MAX_MPDU_LEN_HT_BA; + + /* unlimited */ + return 0; +} + +static void +minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) +{ + struct ieee80211_sta_rates *rates; + int i = 0; + int max_rates = min_t(int, mp->hw->max_rates, IEEE80211_TX_RATE_TABLE_SIZE); + + rates = kzalloc(sizeof(*rates), GFP_ATOMIC); + if (!rates) + return; + + /* Start with max_tp_rate[0] */ + minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]); + + /* Fill up remaining, keep one entry for max_probe_rate */ + for (; i < (max_rates - 1); i++) + minstrel_ht_set_rate(mp, mi, rates, i, mi->max_tp_rate[i]); + + if (i < max_rates) + minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate); + + if (i < IEEE80211_TX_RATE_TABLE_SIZE) + rates->rate[i].idx = -1; + + mi->sta->deflink.agg.max_rc_amsdu_len = minstrel_ht_get_max_amsdu_len(mi); + ieee80211_sta_recalc_aggregates(mi->sta); + rate_control_set_rates(mp->hw, mi->sta, rates); +} + +static u16 +minstrel_ht_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) +{ + u8 seq; + + if (mp->hw->max_rates > 1) { + seq = mi->sample_seq; + mi->sample_seq = (seq + 1) % ARRAY_SIZE(minstrel_sample_seq); + seq = minstrel_sample_seq[seq]; + } else { + seq = MINSTREL_SAMPLE_TYPE_INC; + } + + return __minstrel_ht_get_sample_rate(mi, seq); +} + +static void +minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, + struct ieee80211_tx_rate_control *txrc) +{ + const struct mcs_group *sample_group; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); + struct ieee80211_tx_rate *rate = &info->status.rates[0]; + struct minstrel_ht_sta *mi = priv_sta; + struct minstrel_priv *mp = priv; + u16 sample_idx; + + info->flags |= mi->tx_flags; + +#ifdef CONFIG_MAC80211_DEBUGFS + if (mp->fixed_rate_idx != -1) + return; +#endif + + /* Don't use EAPOL frames for sampling on non-mrr hw */ + if (mp->hw->max_rates == 1 && + (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) + return; + + if (time_is_after_jiffies(mi->sample_time)) + return; + + mi->sample_time = jiffies + MINSTREL_SAMPLE_INTERVAL; + sample_idx = minstrel_ht_get_sample_rate(mp, mi); + if (!sample_idx) + return; + + sample_group = &minstrel_mcs_groups[MI_RATE_GROUP(sample_idx)]; + sample_idx = MI_RATE_IDX(sample_idx); + + if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP] && + (sample_idx >= 4) != txrc->short_preamble) + return; + + info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; + rate->count = 1; + + if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP]) { + int idx = sample_idx % ARRAY_SIZE(mp->cck_rates); + rate->idx = mp->cck_rates[idx]; + } else if (sample_group == &minstrel_mcs_groups[MINSTREL_OFDM_GROUP]) { + int idx = sample_idx % ARRAY_SIZE(mp->ofdm_rates[0]); + rate->idx = mp->ofdm_rates[mi->band][idx]; + } else if (sample_group->flags & IEEE80211_TX_RC_VHT_MCS) { + ieee80211_rate_set_vht(rate, MI_RATE_IDX(sample_idx), + sample_group->streams); + } else { + rate->idx = sample_idx + (sample_group->streams - 1) * 8; + } + + rate->flags = sample_group->flags; +} + +static void +minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta) +{ + int i; + + if (sband->band != NL80211_BAND_2GHZ) + return; + + if (sta->deflink.ht_cap.ht_supported && + !ieee80211_hw_check(mp->hw, SUPPORTS_HT_CCK_RATES)) + return; + + for (i = 0; i < 4; i++) { + if (mp->cck_rates[i] == 0xff || + !rate_supported(sta, sband->band, mp->cck_rates[i])) + continue; + + mi->supported[MINSTREL_CCK_GROUP] |= BIT(i); + if (sband->bitrates[i].flags & IEEE80211_RATE_SHORT_PREAMBLE) + mi->supported[MINSTREL_CCK_GROUP] |= BIT(i + 4); + } +} + +static void +minstrel_ht_update_ofdm(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta) +{ + const u8 *rates; + int i; + + if (sta->deflink.ht_cap.ht_supported) + return; + + rates = mp->ofdm_rates[sband->band]; + for (i = 0; i < ARRAY_SIZE(mp->ofdm_rates[0]); i++) { + if (rates[i] == 0xff || + !rate_supported(sta, sband->band, rates[i])) + continue; + + mi->supported[MINSTREL_OFDM_GROUP] |= BIT(i); + } +} + +static void +minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, void *priv_sta) +{ + struct minstrel_priv *mp = priv; + struct minstrel_ht_sta *mi = priv_sta; + struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs; + u16 ht_cap = sta->deflink.ht_cap.cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; + const struct ieee80211_rate *ctl_rate; + struct sta_info *sta_info; + bool ldpc, erp; + int use_vht; + int ack_dur; + int stbc; + int i; + + BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != MINSTREL_GROUPS_NB); + + if (vht_cap->vht_supported) + use_vht = vht_cap->vht_mcs.tx_mcs_map != cpu_to_le16(~0); + else + use_vht = 0; + + memset(mi, 0, sizeof(*mi)); + + mi->sta = sta; + mi->band = sband->band; + mi->last_stats_update = jiffies; + + ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0); + mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0); + mi->overhead += ack_dur; + mi->overhead_rtscts = mi->overhead + 2 * ack_dur; + + ctl_rate = &sband->bitrates[rate_lowest_index(sband, sta)]; + erp = ctl_rate->flags & IEEE80211_RATE_ERP_G; + ack_dur = ieee80211_frame_duration(sband->band, 10, + ctl_rate->bitrate, erp, 1, + ieee80211_chandef_get_shift(chandef)); + mi->overhead_legacy = ack_dur; + mi->overhead_legacy_rtscts = mi->overhead_legacy + 2 * ack_dur; + + mi->avg_ampdu_len = MINSTREL_FRAC(1, 1); + + if (!use_vht) { + stbc = (ht_cap & IEEE80211_HT_CAP_RX_STBC) >> + IEEE80211_HT_CAP_RX_STBC_SHIFT; + + ldpc = ht_cap & IEEE80211_HT_CAP_LDPC_CODING; + } else { + stbc = (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK) >> + IEEE80211_VHT_CAP_RXSTBC_SHIFT; + + ldpc = vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC; + } + + mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT; + if (ldpc) + mi->tx_flags |= IEEE80211_TX_CTL_LDPC; + + for (i = 0; i < ARRAY_SIZE(mi->groups); i++) { + u32 gflags = minstrel_mcs_groups[i].flags; + int bw, nss; + + mi->supported[i] = 0; + if (minstrel_ht_is_legacy_group(i)) + continue; + + if (gflags & IEEE80211_TX_RC_SHORT_GI) { + if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) { + if (!(ht_cap & IEEE80211_HT_CAP_SGI_40)) + continue; + } else { + if (!(ht_cap & IEEE80211_HT_CAP_SGI_20)) + continue; + } + } + + if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH && + sta->deflink.bandwidth < IEEE80211_STA_RX_BW_40) + continue; + + nss = minstrel_mcs_groups[i].streams; + + /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */ + if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC && nss > 1) + continue; + + /* HT rate */ + if (gflags & IEEE80211_TX_RC_MCS) { + if (use_vht && minstrel_vht_only) + continue; + + mi->supported[i] = mcs->rx_mask[nss - 1]; + continue; + } + + /* VHT rate */ + if (!vht_cap->vht_supported || + WARN_ON(!(gflags & IEEE80211_TX_RC_VHT_MCS)) || + WARN_ON(gflags & IEEE80211_TX_RC_160_MHZ_WIDTH)) + continue; + + if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) { + if (sta->deflink.bandwidth < IEEE80211_STA_RX_BW_80 || + ((gflags & IEEE80211_TX_RC_SHORT_GI) && + !(vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80))) { + continue; + } + } + + if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) + bw = BW_40; + else if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) + bw = BW_80; + else + bw = BW_20; + + mi->supported[i] = minstrel_get_valid_vht_rates(bw, nss, + vht_cap->vht_mcs.tx_mcs_map); + } + + sta_info = container_of(sta, struct sta_info, sta); + mi->use_short_preamble = test_sta_flag(sta_info, WLAN_STA_SHORT_PREAMBLE) && + sta_info->sdata->vif.bss_conf.use_short_preamble; + + minstrel_ht_update_cck(mp, mi, sband, sta); + minstrel_ht_update_ofdm(mp, mi, sband, sta); + + /* create an initial rate table with the lowest supported rates */ + minstrel_ht_update_stats(mp, mi); + minstrel_ht_update_rates(mp, mi); +} + +static void +minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, void *priv_sta) +{ + minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta); +} + +static void +minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, void *priv_sta, + u32 changed) +{ + minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta); +} + +static void * +minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) +{ + struct ieee80211_supported_band *sband; + struct minstrel_ht_sta *mi; + struct minstrel_priv *mp = priv; + struct ieee80211_hw *hw = mp->hw; + int max_rates = 0; + int i; + + for (i = 0; i < NUM_NL80211_BANDS; i++) { + sband = hw->wiphy->bands[i]; + if (sband && sband->n_bitrates > max_rates) + max_rates = sband->n_bitrates; + } + + return kzalloc(sizeof(*mi), gfp); +} + +static void +minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta) +{ + kfree(priv_sta); +} + +static void +minstrel_ht_fill_rate_array(u8 *dest, struct ieee80211_supported_band *sband, + const s16 *bitrates, int n_rates, u32 rate_flags) +{ + int i, j; + + for (i = 0; i < sband->n_bitrates; i++) { + struct ieee80211_rate *rate = &sband->bitrates[i]; + + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + + for (j = 0; j < n_rates; j++) { + if (rate->bitrate != bitrates[j]) + continue; + + dest[j] = i; + break; + } + } +} + +static void +minstrel_ht_init_cck_rates(struct minstrel_priv *mp) +{ + static const s16 bitrates[4] = { 10, 20, 55, 110 }; + struct ieee80211_supported_band *sband; + u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef); + + memset(mp->cck_rates, 0xff, sizeof(mp->cck_rates)); + sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ]; + if (!sband) + return; + + BUILD_BUG_ON(ARRAY_SIZE(mp->cck_rates) != ARRAY_SIZE(bitrates)); + minstrel_ht_fill_rate_array(mp->cck_rates, sband, + minstrel_cck_bitrates, + ARRAY_SIZE(minstrel_cck_bitrates), + rate_flags); +} + +static void +minstrel_ht_init_ofdm_rates(struct minstrel_priv *mp, enum nl80211_band band) +{ + static const s16 bitrates[8] = { 60, 90, 120, 180, 240, 360, 480, 540 }; + struct ieee80211_supported_band *sband; + u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef); + + memset(mp->ofdm_rates[band], 0xff, sizeof(mp->ofdm_rates[band])); + sband = mp->hw->wiphy->bands[band]; + if (!sband) + return; + + BUILD_BUG_ON(ARRAY_SIZE(mp->ofdm_rates[band]) != ARRAY_SIZE(bitrates)); + minstrel_ht_fill_rate_array(mp->ofdm_rates[band], sband, + minstrel_ofdm_bitrates, + ARRAY_SIZE(minstrel_ofdm_bitrates), + rate_flags); +} + +static void * +minstrel_ht_alloc(struct ieee80211_hw *hw) +{ + struct minstrel_priv *mp; + int i; + + mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC); + if (!mp) + return NULL; + + /* contention window settings + * Just an approximation. Using the per-queue values would complicate + * the calculations and is probably unnecessary */ + mp->cw_min = 15; + mp->cw_max = 1023; + + /* maximum time that the hw is allowed to stay in one MRR segment */ + mp->segment_size = 6000; + + if (hw->max_rate_tries > 0) + mp->max_retry = hw->max_rate_tries; + else + /* safe default, does not necessarily have to match hw properties */ + mp->max_retry = 7; + + mp->hw = hw; + mp->update_interval = HZ / 20; + + minstrel_ht_init_cck_rates(mp); + for (i = 0; i < ARRAY_SIZE(mp->hw->wiphy->bands); i++) + minstrel_ht_init_ofdm_rates(mp, i); + + return mp; +} + +#ifdef CONFIG_MAC80211_DEBUGFS +static void minstrel_ht_add_debugfs(struct ieee80211_hw *hw, void *priv, + struct dentry *debugfsdir) +{ + struct minstrel_priv *mp = priv; + + mp->fixed_rate_idx = (u32) -1; + debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir, + &mp->fixed_rate_idx); +} +#endif + +static void +minstrel_ht_free(void *priv) +{ + kfree(priv); +} + +static u32 minstrel_ht_get_expected_throughput(void *priv_sta) +{ + struct minstrel_ht_sta *mi = priv_sta; + int i, j, prob, tp_avg; + + i = MI_RATE_GROUP(mi->max_tp_rate[0]); + j = MI_RATE_IDX(mi->max_tp_rate[0]); + prob = mi->groups[i].rates[j].prob_avg; + + /* convert tp_avg from pkt per second in kbps */ + tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10; + tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024; + + return tp_avg; +} + +static const struct rate_control_ops mac80211_minstrel_ht = { + .name = "minstrel_ht", + .capa = RATE_CTRL_CAPA_AMPDU_TRIGGER, + .tx_status_ext = minstrel_ht_tx_status, + .get_rate = minstrel_ht_get_rate, + .rate_init = minstrel_ht_rate_init, + .rate_update = minstrel_ht_rate_update, + .alloc_sta = minstrel_ht_alloc_sta, + .free_sta = minstrel_ht_free_sta, + .alloc = minstrel_ht_alloc, + .free = minstrel_ht_free, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_debugfs = minstrel_ht_add_debugfs, + .add_sta_debugfs = minstrel_ht_add_sta_debugfs, +#endif + .get_expected_throughput = minstrel_ht_get_expected_throughput, +}; + + +static void __init init_sample_table(void) +{ + int col, i, new_idx; + u8 rnd[MCS_GROUP_RATES]; + + memset(sample_table, 0xff, sizeof(sample_table)); + for (col = 0; col < SAMPLE_COLUMNS; col++) { + get_random_bytes(rnd, sizeof(rnd)); + for (i = 0; i < MCS_GROUP_RATES; i++) { + new_idx = (i + rnd[i]) % MCS_GROUP_RATES; + while (sample_table[col][new_idx] != 0xff) + new_idx = (new_idx + 1) % MCS_GROUP_RATES; + + sample_table[col][new_idx] = i; + } + } +} + +int __init +rc80211_minstrel_init(void) +{ + init_sample_table(); + return ieee80211_rate_control_register(&mac80211_minstrel_ht); +} + +void +rc80211_minstrel_exit(void) +{ + ieee80211_rate_control_unregister(&mac80211_minstrel_ht); +} diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h new file mode 100644 index 0000000000..4be0401f77 --- /dev/null +++ b/net/mac80211/rc80211_minstrel_ht.h @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org> + */ + +#ifndef __RC_MINSTREL_HT_H +#define __RC_MINSTREL_HT_H + +#include <linux/bitfield.h> + +/* number of highest throughput rates to consider*/ +#define MAX_THR_RATES 4 +#define SAMPLE_COLUMNS 10 /* number of columns in sample table */ + +/* scaled fraction values */ +#define MINSTREL_SCALE 12 +#define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div) +#define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE) + +#define EWMA_LEVEL 96 /* ewma weighting factor [/EWMA_DIV] */ +#define EWMA_DIV 128 + +/* + * Coefficients for moving average with noise filter (period=16), + * scaled by 10 bits + * + * a1 = exp(-pi * sqrt(2) / period) + * coeff2 = 2 * a1 * cos(sqrt(2) * 2 * pi / period) + * coeff3 = -sqr(a1) + * coeff1 = 1 - coeff2 - coeff3 + */ +#define MINSTREL_AVG_COEFF1 (MINSTREL_FRAC(1, 1) - \ + MINSTREL_AVG_COEFF2 - \ + MINSTREL_AVG_COEFF3) +#define MINSTREL_AVG_COEFF2 0x00001499 +#define MINSTREL_AVG_COEFF3 -0x0000092e + +/* + * The number of streams can be changed to 2 to reduce code + * size and memory footprint. + */ +#define MINSTREL_MAX_STREAMS 4 +#define MINSTREL_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */ +#define MINSTREL_VHT_STREAM_GROUPS 6 /* BW(=3) * SGI(=2) */ + +#define MINSTREL_HT_GROUPS_NB (MINSTREL_MAX_STREAMS * \ + MINSTREL_HT_STREAM_GROUPS) +#define MINSTREL_VHT_GROUPS_NB (MINSTREL_MAX_STREAMS * \ + MINSTREL_VHT_STREAM_GROUPS) +#define MINSTREL_LEGACY_GROUPS_NB 2 +#define MINSTREL_GROUPS_NB (MINSTREL_HT_GROUPS_NB + \ + MINSTREL_VHT_GROUPS_NB + \ + MINSTREL_LEGACY_GROUPS_NB) + +#define MINSTREL_HT_GROUP_0 0 +#define MINSTREL_CCK_GROUP (MINSTREL_HT_GROUP_0 + MINSTREL_HT_GROUPS_NB) +#define MINSTREL_OFDM_GROUP (MINSTREL_CCK_GROUP + 1) +#define MINSTREL_VHT_GROUP_0 (MINSTREL_OFDM_GROUP + 1) + +#define MCS_GROUP_RATES 10 + +#define MI_RATE_IDX_MASK GENMASK(3, 0) +#define MI_RATE_GROUP_MASK GENMASK(15, 4) + +#define MI_RATE(_group, _idx) \ + (FIELD_PREP(MI_RATE_GROUP_MASK, _group) | \ + FIELD_PREP(MI_RATE_IDX_MASK, _idx)) + +#define MI_RATE_IDX(_rate) FIELD_GET(MI_RATE_IDX_MASK, _rate) +#define MI_RATE_GROUP(_rate) FIELD_GET(MI_RATE_GROUP_MASK, _rate) + +#define MINSTREL_SAMPLE_RATES 5 /* rates per sample type */ +#define MINSTREL_SAMPLE_INTERVAL (HZ / 50) + +struct minstrel_priv { + struct ieee80211_hw *hw; + unsigned int cw_min; + unsigned int cw_max; + unsigned int max_retry; + unsigned int segment_size; + unsigned int update_interval; + + u8 cck_rates[4]; + u8 ofdm_rates[NUM_NL80211_BANDS][8]; + +#ifdef CONFIG_MAC80211_DEBUGFS + /* + * enable fixed rate processing per RC + * - write static index to debugfs:ieee80211/phyX/rc/fixed_rate_idx + * - write -1 to enable RC processing again + * - setting will be applied on next update + */ + u32 fixed_rate_idx; +#endif +}; + + +struct mcs_group { + u16 flags; + u8 streams; + u8 shift; + u8 bw; + u16 duration[MCS_GROUP_RATES]; +}; + +extern const s16 minstrel_cck_bitrates[4]; +extern const s16 minstrel_ofdm_bitrates[8]; +extern const struct mcs_group minstrel_mcs_groups[]; + +struct minstrel_rate_stats { + /* current / last sampling period attempts/success counters */ + u16 attempts, last_attempts; + u16 success, last_success; + + /* total attempts/success counters */ + u32 att_hist, succ_hist; + + /* prob_avg - moving average of prob */ + u16 prob_avg; + u16 prob_avg_1; + + /* maximum retry counts */ + u8 retry_count; + u8 retry_count_rtscts; + + bool retry_updated; +}; + +enum minstrel_sample_type { + MINSTREL_SAMPLE_TYPE_INC, + MINSTREL_SAMPLE_TYPE_JUMP, + MINSTREL_SAMPLE_TYPE_SLOW, + __MINSTREL_SAMPLE_TYPE_MAX +}; + +struct minstrel_mcs_group_data { + u8 index; + u8 column; + + /* sorted rate set within a MCS group*/ + u16 max_group_tp_rate[MAX_THR_RATES]; + u16 max_group_prob_rate; + + /* MCS rate statistics */ + struct minstrel_rate_stats rates[MCS_GROUP_RATES]; +}; + +struct minstrel_sample_category { + u8 sample_group; + u16 sample_rates[MINSTREL_SAMPLE_RATES]; + u16 cur_sample_rates[MINSTREL_SAMPLE_RATES]; +}; + +struct minstrel_ht_sta { + struct ieee80211_sta *sta; + + /* ampdu length (average, per sampling interval) */ + unsigned int ampdu_len; + unsigned int ampdu_packets; + + /* ampdu length (EWMA) */ + unsigned int avg_ampdu_len; + + /* overall sorted rate set */ + u16 max_tp_rate[MAX_THR_RATES]; + u16 max_prob_rate; + + /* time of last status update */ + unsigned long last_stats_update; + + /* overhead time in usec for each frame */ + unsigned int overhead; + unsigned int overhead_rtscts; + unsigned int overhead_legacy; + unsigned int overhead_legacy_rtscts; + + unsigned int total_packets; + unsigned int sample_packets; + + /* tx flags to add for frames for this sta */ + u32 tx_flags; + bool use_short_preamble; + u8 band; + + u8 sample_seq; + u16 sample_rate; + + unsigned long sample_time; + struct minstrel_sample_category sample[__MINSTREL_SAMPLE_TYPE_MAX]; + + /* Bitfield of supported MCS rates of all groups */ + u16 supported[MINSTREL_GROUPS_NB]; + + /* MCS rate group info and statistics */ + struct minstrel_mcs_group_data groups[MINSTREL_GROUPS_NB]; +}; + +void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); +int minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate, + int prob_avg); + +#endif diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c new file mode 100644 index 0000000000..25b8a67a63 --- /dev/null +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c @@ -0,0 +1,336 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org> + */ +#include <linux/netdevice.h> +#include <linux/types.h> +#include <linux/skbuff.h> +#include <linux/debugfs.h> +#include <linux/ieee80211.h> +#include <linux/export.h> +#include <net/mac80211.h> +#include "rc80211_minstrel_ht.h" + +struct minstrel_debugfs_info { + size_t len; + char buf[]; +}; + +static ssize_t +minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) +{ + struct minstrel_debugfs_info *ms; + + ms = file->private_data; + return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len); +} + +static int +minstrel_stats_release(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + return 0; +} + +static bool +minstrel_ht_is_sample_rate(struct minstrel_ht_sta *mi, int idx) +{ + int type, i; + + for (type = 0; type < ARRAY_SIZE(mi->sample); type++) + for (i = 0; i < MINSTREL_SAMPLE_RATES; i++) + if (mi->sample[type].cur_sample_rates[i] == idx) + return true; + return false; +} + +static char * +minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) +{ + const struct mcs_group *mg; + unsigned int j, tp_max, tp_avg, eprob, tx_time; + char htmode = '2'; + char gimode = 'L'; + u32 gflags; + + if (!mi->supported[i]) + return p; + + mg = &minstrel_mcs_groups[i]; + gflags = mg->flags; + + if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) + htmode = '4'; + else if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) + htmode = '8'; + if (gflags & IEEE80211_TX_RC_SHORT_GI) + gimode = 'S'; + + for (j = 0; j < MCS_GROUP_RATES; j++) { + struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j]; + int idx = MI_RATE(i, j); + unsigned int duration; + + if (!(mi->supported[i] & BIT(j))) + continue; + + if (gflags & IEEE80211_TX_RC_MCS) { + p += sprintf(p, "HT%c0 ", htmode); + p += sprintf(p, "%cGI ", gimode); + p += sprintf(p, "%d ", mg->streams); + } else if (gflags & IEEE80211_TX_RC_VHT_MCS) { + p += sprintf(p, "VHT%c0 ", htmode); + p += sprintf(p, "%cGI ", gimode); + p += sprintf(p, "%d ", mg->streams); + } else if (i == MINSTREL_OFDM_GROUP) { + p += sprintf(p, "OFDM "); + p += sprintf(p, "1 "); + } else { + p += sprintf(p, "CCK "); + p += sprintf(p, "%cP ", j < 4 ? 'L' : 'S'); + p += sprintf(p, "1 "); + } + + *(p++) = (idx == mi->max_tp_rate[0]) ? 'A' : ' '; + *(p++) = (idx == mi->max_tp_rate[1]) ? 'B' : ' '; + *(p++) = (idx == mi->max_tp_rate[2]) ? 'C' : ' '; + *(p++) = (idx == mi->max_tp_rate[3]) ? 'D' : ' '; + *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' '; + *(p++) = minstrel_ht_is_sample_rate(mi, idx) ? 'S' : ' '; + + if (gflags & IEEE80211_TX_RC_MCS) { + p += sprintf(p, " MCS%-2u", (mg->streams - 1) * 8 + j); + } else if (gflags & IEEE80211_TX_RC_VHT_MCS) { + p += sprintf(p, " MCS%-1u/%1u", j, mg->streams); + } else { + int r; + + if (i == MINSTREL_OFDM_GROUP) + r = minstrel_ofdm_bitrates[j % 8]; + else + r = minstrel_cck_bitrates[j % 4]; + + p += sprintf(p, " %2u.%1uM", r / 10, r % 10); + } + + p += sprintf(p, " %3u ", idx); + + /* tx_time[rate(i)] in usec */ + duration = mg->duration[j]; + duration <<= mg->shift; + tx_time = DIV_ROUND_CLOSEST(duration, 1000); + p += sprintf(p, "%6u ", tx_time); + + tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100)); + tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_avg); + eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000); + + p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u" + " %3u %3u %-3u " + "%9llu %-9llu\n", + tp_max / 10, tp_max % 10, + tp_avg / 10, tp_avg % 10, + eprob / 10, eprob % 10, + mrs->retry_count, + mrs->last_success, + mrs->last_attempts, + (unsigned long long)mrs->succ_hist, + (unsigned long long)mrs->att_hist); + } + + return p; +} + +static int +minstrel_ht_stats_open(struct inode *inode, struct file *file) +{ + struct minstrel_ht_sta *mi = inode->i_private; + struct minstrel_debugfs_info *ms; + unsigned int i; + char *p; + + ms = kmalloc(32768, GFP_KERNEL); + if (!ms) + return -ENOMEM; + + file->private_data = ms; + p = ms->buf; + + p += sprintf(p, "\n"); + p += sprintf(p, + " best ____________rate__________ ____statistics___ _____last____ ______sum-of________\n"); + p += sprintf(p, + "mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob)] [retry|suc|att] [#success | #attempts]\n"); + + p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p); + for (i = 0; i < MINSTREL_CCK_GROUP; i++) + p = minstrel_ht_stats_dump(mi, i, p); + for (i++; i < ARRAY_SIZE(mi->groups); i++) + p = minstrel_ht_stats_dump(mi, i, p); + + p += sprintf(p, "\nTotal packet count:: ideal %d " + "lookaround %d\n", + max(0, (int) mi->total_packets - (int) mi->sample_packets), + mi->sample_packets); + if (mi->avg_ampdu_len) + p += sprintf(p, "Average # of aggregated frames per A-MPDU: %d.%d\n", + MINSTREL_TRUNC(mi->avg_ampdu_len), + MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10); + ms->len = p - ms->buf; + WARN_ON(ms->len + sizeof(*ms) > 32768); + + return nonseekable_open(inode, file); +} + +static const struct file_operations minstrel_ht_stat_fops = { + .owner = THIS_MODULE, + .open = minstrel_ht_stats_open, + .read = minstrel_stats_read, + .release = minstrel_stats_release, + .llseek = no_llseek, +}; + +static char * +minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p) +{ + const struct mcs_group *mg; + unsigned int j, tp_max, tp_avg, eprob, tx_time; + char htmode = '2'; + char gimode = 'L'; + u32 gflags; + + if (!mi->supported[i]) + return p; + + mg = &minstrel_mcs_groups[i]; + gflags = mg->flags; + + if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) + htmode = '4'; + else if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) + htmode = '8'; + if (gflags & IEEE80211_TX_RC_SHORT_GI) + gimode = 'S'; + + for (j = 0; j < MCS_GROUP_RATES; j++) { + struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j]; + int idx = MI_RATE(i, j); + unsigned int duration; + + if (!(mi->supported[i] & BIT(j))) + continue; + + if (gflags & IEEE80211_TX_RC_MCS) { + p += sprintf(p, "HT%c0,", htmode); + p += sprintf(p, "%cGI,", gimode); + p += sprintf(p, "%d,", mg->streams); + } else if (gflags & IEEE80211_TX_RC_VHT_MCS) { + p += sprintf(p, "VHT%c0,", htmode); + p += sprintf(p, "%cGI,", gimode); + p += sprintf(p, "%d,", mg->streams); + } else if (i == MINSTREL_OFDM_GROUP) { + p += sprintf(p, "OFDM,,1,"); + } else { + p += sprintf(p, "CCK,"); + p += sprintf(p, "%cP,", j < 4 ? 'L' : 'S'); + p += sprintf(p, "1,"); + } + + p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[0]) ? "A" : "")); + p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[1]) ? "B" : "")); + p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[2]) ? "C" : "")); + p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[3]) ? "D" : "")); + p += sprintf(p, "%s" ,((idx == mi->max_prob_rate) ? "P" : "")); + p += sprintf(p, "%s", (minstrel_ht_is_sample_rate(mi, idx) ? "S" : "")); + + if (gflags & IEEE80211_TX_RC_MCS) { + p += sprintf(p, ",MCS%-2u,", (mg->streams - 1) * 8 + j); + } else if (gflags & IEEE80211_TX_RC_VHT_MCS) { + p += sprintf(p, ",MCS%-1u/%1u,", j, mg->streams); + } else { + int r; + + if (i == MINSTREL_OFDM_GROUP) + r = minstrel_ofdm_bitrates[j % 8]; + else + r = minstrel_cck_bitrates[j % 4]; + + p += sprintf(p, ",%2u.%1uM,", r / 10, r % 10); + } + + p += sprintf(p, "%u,", idx); + + duration = mg->duration[j]; + duration <<= mg->shift; + tx_time = DIV_ROUND_CLOSEST(duration, 1000); + p += sprintf(p, "%u,", tx_time); + + tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100)); + tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_avg); + eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000); + + p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u," + "%u,%llu,%llu,", + tp_max / 10, tp_max % 10, + tp_avg / 10, tp_avg % 10, + eprob / 10, eprob % 10, + mrs->retry_count, + mrs->last_success, + mrs->last_attempts, + (unsigned long long)mrs->succ_hist, + (unsigned long long)mrs->att_hist); + p += sprintf(p, "%d,%d,%d.%d\n", + max(0, (int) mi->total_packets - + (int) mi->sample_packets), + mi->sample_packets, + MINSTREL_TRUNC(mi->avg_ampdu_len), + MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10); + } + + return p; +} + +static int +minstrel_ht_stats_csv_open(struct inode *inode, struct file *file) +{ + struct minstrel_ht_sta *mi = inode->i_private; + struct minstrel_debugfs_info *ms; + unsigned int i; + char *p; + + ms = kmalloc(32768, GFP_KERNEL); + if (!ms) + return -ENOMEM; + + file->private_data = ms; + + p = ms->buf; + + p = minstrel_ht_stats_csv_dump(mi, MINSTREL_CCK_GROUP, p); + for (i = 0; i < MINSTREL_CCK_GROUP; i++) + p = minstrel_ht_stats_csv_dump(mi, i, p); + for (i++; i < ARRAY_SIZE(mi->groups); i++) + p = minstrel_ht_stats_csv_dump(mi, i, p); + + ms->len = p - ms->buf; + WARN_ON(ms->len + sizeof(*ms) > 32768); + + return nonseekable_open(inode, file); +} + +static const struct file_operations minstrel_ht_stat_csv_fops = { + .owner = THIS_MODULE, + .open = minstrel_ht_stats_csv_open, + .read = minstrel_stats_read, + .release = minstrel_stats_release, + .llseek = no_llseek, +}; + +void +minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir) +{ + debugfs_create_file("rc_stats", 0444, dir, priv_sta, + &minstrel_ht_stat_fops); + debugfs_create_file("rc_stats_csv", 0444, dir, priv_sta, + &minstrel_ht_stat_csv_fops); +} diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c new file mode 100644 index 0000000000..26ca2f5dc5 --- /dev/null +++ b/net/mac80211/rx.c @@ -0,0 +1,5444 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018-2023 Intel Corporation + */ + +#include <linux/jiffies.h> +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/rcupdate.h> +#include <linux/export.h> +#include <linux/kcov.h> +#include <linux/bitops.h> +#include <net/mac80211.h> +#include <net/ieee80211_radiotap.h> +#include <asm/unaligned.h> + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "led.h" +#include "mesh.h" +#include "wep.h" +#include "wpa.h" +#include "tkip.h" +#include "wme.h" +#include "rate.h" + +/* + * monitor mode reception + * + * This function cleans up the SKB, i.e. it removes all the stuff + * only useful for monitoring. + */ +static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb, + unsigned int present_fcs_len, + unsigned int rtap_space) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr; + unsigned int hdrlen; + __le16 fc; + + if (present_fcs_len) + __pskb_trim(skb, skb->len - present_fcs_len); + pskb_pull(skb, rtap_space); + + /* After pulling radiotap header, clear all flags that indicate + * info in skb->data. + */ + status->flag &= ~(RX_FLAG_RADIOTAP_TLV_AT_END | + RX_FLAG_RADIOTAP_LSIG | + RX_FLAG_RADIOTAP_HE_MU | + RX_FLAG_RADIOTAP_HE); + + hdr = (void *)skb->data; + fc = hdr->frame_control; + + /* + * Remove the HT-Control field (if present) on management + * frames after we've sent the frame to monitoring. We + * (currently) don't need it, and don't properly parse + * frames with it present, due to the assumption of a + * fixed management header length. + */ + if (likely(!ieee80211_is_mgmt(fc) || !ieee80211_has_order(fc))) + return skb; + + hdrlen = ieee80211_hdrlen(fc); + hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_ORDER); + + if (!pskb_may_pull(skb, hdrlen)) { + dev_kfree_skb(skb); + return NULL; + } + + memmove(skb->data + IEEE80211_HT_CTL_LEN, skb->data, + hdrlen - IEEE80211_HT_CTL_LEN); + pskb_pull(skb, IEEE80211_HT_CTL_LEN); + + return skb; +} + +static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, + unsigned int rtap_space) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr; + + hdr = (void *)(skb->data + rtap_space); + + if (status->flag & (RX_FLAG_FAILED_FCS_CRC | + RX_FLAG_FAILED_PLCP_CRC | + RX_FLAG_ONLY_MONITOR | + RX_FLAG_NO_PSDU)) + return true; + + if (unlikely(skb->len < 16 + present_fcs_len + rtap_space)) + return true; + + if (ieee80211_is_ctl(hdr->frame_control) && + !ieee80211_is_pspoll(hdr->frame_control) && + !ieee80211_is_back_req(hdr->frame_control)) + return true; + + return false; +} + +static int +ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, + struct ieee80211_rx_status *status, + struct sk_buff *skb) +{ + int len; + + /* always present fields */ + len = sizeof(struct ieee80211_radiotap_header) + 8; + + /* allocate extra bitmaps */ + if (status->chains) + len += 4 * hweight8(status->chains); + + if (ieee80211_have_rx_timestamp(status)) { + len = ALIGN(len, 8); + len += 8; + } + if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) + len += 1; + + /* antenna field, if we don't have per-chain info */ + if (!status->chains) + len += 1; + + /* padding for RX_FLAGS if necessary */ + len = ALIGN(len, 2); + + if (status->encoding == RX_ENC_HT) /* HT info */ + len += 3; + + if (status->flag & RX_FLAG_AMPDU_DETAILS) { + len = ALIGN(len, 4); + len += 8; + } + + if (status->encoding == RX_ENC_VHT) { + len = ALIGN(len, 2); + len += 12; + } + + if (local->hw.radiotap_timestamp.units_pos >= 0) { + len = ALIGN(len, 8); + len += 12; + } + + if (status->encoding == RX_ENC_HE && + status->flag & RX_FLAG_RADIOTAP_HE) { + len = ALIGN(len, 2); + len += 12; + BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12); + } + + if (status->encoding == RX_ENC_HE && + status->flag & RX_FLAG_RADIOTAP_HE_MU) { + len = ALIGN(len, 2); + len += 12; + BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12); + } + + if (status->flag & RX_FLAG_NO_PSDU) + len += 1; + + if (status->flag & RX_FLAG_RADIOTAP_LSIG) { + len = ALIGN(len, 2); + len += 4; + BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4); + } + + if (status->chains) { + /* antenna and antenna signal fields */ + len += 2 * hweight8(status->chains); + } + + if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) { + int tlv_offset = 0; + + /* + * The position to look at depends on the existence (or non- + * existence) of other elements, so take that into account... + */ + if (status->flag & RX_FLAG_RADIOTAP_HE) + tlv_offset += + sizeof(struct ieee80211_radiotap_he); + if (status->flag & RX_FLAG_RADIOTAP_HE_MU) + tlv_offset += + sizeof(struct ieee80211_radiotap_he_mu); + if (status->flag & RX_FLAG_RADIOTAP_LSIG) + tlv_offset += + sizeof(struct ieee80211_radiotap_lsig); + + /* ensure 4 byte alignment for TLV */ + len = ALIGN(len, 4); + + /* TLVs until the mac header */ + len += skb_mac_header(skb) - &skb->data[tlv_offset]; + } + + return len; +} + +static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, + int link_id, + struct sta_info *sta, + struct sk_buff *skb) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + + if (link_id >= 0) { + status->link_valid = 1; + status->link_id = link_id; + } else { + status->link_valid = 0; + } + + skb_queue_tail(&sdata->skb_queue, skb); + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); + if (sta) + sta->deflink.rx_stats.packets++; +} + +static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, + int link_id, + struct sta_info *sta, + struct sk_buff *skb) +{ + skb->protocol = 0; + __ieee80211_queue_skb_to_iface(sdata, link_id, sta, skb); +} + +static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, + int rtap_space) +{ + struct { + struct ieee80211_hdr_3addr hdr; + u8 category; + u8 action_code; + } __packed __aligned(2) action; + + if (!sdata) + return; + + BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1); + + if (skb->len < rtap_space + sizeof(action) + + VHT_MUMIMO_GROUPS_DATA_LEN) + return; + + if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr)) + return; + + skb_copy_bits(skb, rtap_space, &action, sizeof(action)); + + if (!ieee80211_is_action(action.hdr.frame_control)) + return; + + if (action.category != WLAN_CATEGORY_VHT) + return; + + if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT) + return; + + if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr)) + return; + + skb = skb_copy(skb, GFP_ATOMIC); + if (!skb) + return; + + ieee80211_queue_skb_to_iface(sdata, -1, NULL, skb); +} + +/* + * ieee80211_add_rx_radiotap_header - add radiotap header + * + * add a radiotap header containing all the fields which the hardware provided. + */ +static void +ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, + struct sk_buff *skb, + struct ieee80211_rate *rate, + int rtap_len, bool has_fcs) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_radiotap_header *rthdr; + unsigned char *pos; + __le32 *it_present; + u32 it_present_val; + u16 rx_flags = 0; + u16 channel_flags = 0; + u32 tlvs_len = 0; + int mpdulen, chain; + unsigned long chains = status->chains; + struct ieee80211_radiotap_he he = {}; + struct ieee80211_radiotap_he_mu he_mu = {}; + struct ieee80211_radiotap_lsig lsig = {}; + + if (status->flag & RX_FLAG_RADIOTAP_HE) { + he = *(struct ieee80211_radiotap_he *)skb->data; + skb_pull(skb, sizeof(he)); + WARN_ON_ONCE(status->encoding != RX_ENC_HE); + } + + if (status->flag & RX_FLAG_RADIOTAP_HE_MU) { + he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data; + skb_pull(skb, sizeof(he_mu)); + } + + if (status->flag & RX_FLAG_RADIOTAP_LSIG) { + lsig = *(struct ieee80211_radiotap_lsig *)skb->data; + skb_pull(skb, sizeof(lsig)); + } + + if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) { + /* data is pointer at tlv all other info was pulled off */ + tlvs_len = skb_mac_header(skb) - skb->data; + } + + mpdulen = skb->len; + if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))) + mpdulen += FCS_LEN; + + rthdr = skb_push(skb, rtap_len - tlvs_len); + memset(rthdr, 0, rtap_len - tlvs_len); + it_present = &rthdr->it_present; + + /* radiotap header, set always present flags */ + rthdr->it_len = cpu_to_le16(rtap_len); + it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | + BIT(IEEE80211_RADIOTAP_CHANNEL) | + BIT(IEEE80211_RADIOTAP_RX_FLAGS); + + if (!status->chains) + it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); + + for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { + it_present_val |= + BIT(IEEE80211_RADIOTAP_EXT) | + BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); + put_unaligned_le32(it_present_val, it_present); + it_present++; + it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | + BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); + } + + if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) + it_present_val |= BIT(IEEE80211_RADIOTAP_TLV); + + put_unaligned_le32(it_present_val, it_present); + + /* This references through an offset into it_optional[] rather + * than via it_present otherwise later uses of pos will cause + * the compiler to think we have walked past the end of the + * struct member. + */ + pos = (void *)&rthdr->it_optional[it_present + 1 - rthdr->it_optional]; + + /* the order of the following fields is important */ + + /* IEEE80211_RADIOTAP_TSFT */ + if (ieee80211_have_rx_timestamp(status)) { + /* padding */ + while ((pos - (u8 *)rthdr) & 7) + *pos++ = 0; + put_unaligned_le64( + ieee80211_calculate_rx_timestamp(local, status, + mpdulen, 0), + pos); + rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_TSFT)); + pos += 8; + } + + /* IEEE80211_RADIOTAP_FLAGS */ + if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) + *pos |= IEEE80211_RADIOTAP_F_FCS; + if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) + *pos |= IEEE80211_RADIOTAP_F_BADFCS; + if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) + *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; + pos++; + + /* IEEE80211_RADIOTAP_RATE */ + if (!rate || status->encoding != RX_ENC_LEGACY) { + /* + * Without rate information don't add it. If we have, + * MCS information is a separate field in radiotap, + * added below. The byte here is needed as padding + * for the channel though, so initialise it to 0. + */ + *pos = 0; + } else { + int shift = 0; + rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_RATE)); + if (status->bw == RATE_INFO_BW_10) + shift = 1; + else if (status->bw == RATE_INFO_BW_5) + shift = 2; + *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); + } + pos++; + + /* IEEE80211_RADIOTAP_CHANNEL */ + /* TODO: frequency offset in KHz */ + put_unaligned_le16(status->freq, pos); + pos += 2; + if (status->bw == RATE_INFO_BW_10) + channel_flags |= IEEE80211_CHAN_HALF; + else if (status->bw == RATE_INFO_BW_5) + channel_flags |= IEEE80211_CHAN_QUARTER; + + if (status->band == NL80211_BAND_5GHZ || + status->band == NL80211_BAND_6GHZ) + channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; + else if (status->encoding != RX_ENC_LEGACY) + channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; + else if (rate && rate->flags & IEEE80211_RATE_ERP_G) + channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; + else if (rate) + channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; + else + channel_flags |= IEEE80211_CHAN_2GHZ; + put_unaligned_le16(channel_flags, pos); + pos += 2; + + /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ + if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) && + !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { + *pos = status->signal; + rthdr->it_present |= + cpu_to_le32(BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL)); + pos++; + } + + /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ + + if (!status->chains) { + /* IEEE80211_RADIOTAP_ANTENNA */ + *pos = status->antenna; + pos++; + } + + /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ + + /* IEEE80211_RADIOTAP_RX_FLAGS */ + /* ensure 2 byte alignment for the 2 byte field as required */ + if ((pos - (u8 *)rthdr) & 1) + *pos++ = 0; + if (status->flag & RX_FLAG_FAILED_PLCP_CRC) + rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; + put_unaligned_le16(rx_flags, pos); + pos += 2; + + if (status->encoding == RX_ENC_HT) { + unsigned int stbc; + + rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS)); + *pos = local->hw.radiotap_mcs_details; + if (status->enc_flags & RX_ENC_FLAG_HT_GF) + *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FMT; + if (status->enc_flags & RX_ENC_FLAG_LDPC) + *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FEC; + pos++; + *pos = 0; + if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) + *pos |= IEEE80211_RADIOTAP_MCS_SGI; + if (status->bw == RATE_INFO_BW_40) + *pos |= IEEE80211_RADIOTAP_MCS_BW_40; + if (status->enc_flags & RX_ENC_FLAG_HT_GF) + *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; + if (status->enc_flags & RX_ENC_FLAG_LDPC) + *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; + stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT; + *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; + pos++; + *pos++ = status->rate_idx; + } + + if (status->flag & RX_FLAG_AMPDU_DETAILS) { + u16 flags = 0; + + /* ensure 4 byte alignment */ + while ((pos - (u8 *)rthdr) & 3) + pos++; + rthdr->it_present |= + cpu_to_le32(BIT(IEEE80211_RADIOTAP_AMPDU_STATUS)); + put_unaligned_le32(status->ampdu_reference, pos); + pos += 4; + if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) + flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; + if (status->flag & RX_FLAG_AMPDU_IS_LAST) + flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; + if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) + flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; + if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) + flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN; + if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN) + flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN; + if (status->flag & RX_FLAG_AMPDU_EOF_BIT) + flags |= IEEE80211_RADIOTAP_AMPDU_EOF; + put_unaligned_le16(flags, pos); + pos += 2; + if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) + *pos++ = status->ampdu_delimiter_crc; + else + *pos++ = 0; + *pos++ = 0; + } + + if (status->encoding == RX_ENC_VHT) { + u16 known = local->hw.radiotap_vht_details; + + rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_VHT)); + put_unaligned_le16(known, pos); + pos += 2; + /* flags */ + if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) + *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; + /* in VHT, STBC is binary */ + if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) + *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; + if (status->enc_flags & RX_ENC_FLAG_BF) + *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; + pos++; + /* bandwidth */ + switch (status->bw) { + case RATE_INFO_BW_80: + *pos++ = 4; + break; + case RATE_INFO_BW_160: + *pos++ = 11; + break; + case RATE_INFO_BW_40: + *pos++ = 1; + break; + default: + *pos++ = 0; + } + /* MCS/NSS */ + *pos = (status->rate_idx << 4) | status->nss; + pos += 4; + /* coding field */ + if (status->enc_flags & RX_ENC_FLAG_LDPC) + *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; + pos++; + /* group ID */ + pos++; + /* partial_aid */ + pos += 2; + } + + if (local->hw.radiotap_timestamp.units_pos >= 0) { + u16 accuracy = 0; + u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT; + + rthdr->it_present |= + cpu_to_le32(BIT(IEEE80211_RADIOTAP_TIMESTAMP)); + + /* ensure 8 byte alignment */ + while ((pos - (u8 *)rthdr) & 7) + pos++; + + put_unaligned_le64(status->device_timestamp, pos); + pos += sizeof(u64); + + if (local->hw.radiotap_timestamp.accuracy >= 0) { + accuracy = local->hw.radiotap_timestamp.accuracy; + flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY; + } + put_unaligned_le16(accuracy, pos); + pos += sizeof(u16); + + *pos++ = local->hw.radiotap_timestamp.units_pos; + *pos++ = flags; + } + + if (status->encoding == RX_ENC_HE && + status->flag & RX_FLAG_RADIOTAP_HE) { +#define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f) + + if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) { + he.data6 |= HE_PREP(DATA6_NSTS, + FIELD_GET(RX_ENC_FLAG_STBC_MASK, + status->enc_flags)); + he.data3 |= HE_PREP(DATA3_STBC, 1); + } else { + he.data6 |= HE_PREP(DATA6_NSTS, status->nss); + } + +#define CHECK_GI(s) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ + (int)NL80211_RATE_INFO_HE_GI_##s) + + CHECK_GI(0_8); + CHECK_GI(1_6); + CHECK_GI(3_2); + + he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx); + he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm); + he.data3 |= HE_PREP(DATA3_CODING, + !!(status->enc_flags & RX_ENC_FLAG_LDPC)); + + he.data5 |= HE_PREP(DATA5_GI, status->he_gi); + + switch (status->bw) { + case RATE_INFO_BW_20: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); + break; + case RATE_INFO_BW_40: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); + break; + case RATE_INFO_BW_80: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); + break; + case RATE_INFO_BW_160: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); + break; + case RATE_INFO_BW_HE_RU: +#define CHECK_RU_ALLOC(s) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \ + NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4) + + CHECK_RU_ALLOC(26); + CHECK_RU_ALLOC(52); + CHECK_RU_ALLOC(106); + CHECK_RU_ALLOC(242); + CHECK_RU_ALLOC(484); + CHECK_RU_ALLOC(996); + CHECK_RU_ALLOC(2x996); + + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + status->he_ru + 4); + break; + default: + WARN_ONCE(1, "Invalid SU BW %d\n", status->bw); + } + + /* ensure 2 byte alignment */ + while ((pos - (u8 *)rthdr) & 1) + pos++; + rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE)); + memcpy(pos, &he, sizeof(he)); + pos += sizeof(he); + } + + if (status->encoding == RX_ENC_HE && + status->flag & RX_FLAG_RADIOTAP_HE_MU) { + /* ensure 2 byte alignment */ + while ((pos - (u8 *)rthdr) & 1) + pos++; + rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE_MU)); + memcpy(pos, &he_mu, sizeof(he_mu)); + pos += sizeof(he_mu); + } + + if (status->flag & RX_FLAG_NO_PSDU) { + rthdr->it_present |= + cpu_to_le32(BIT(IEEE80211_RADIOTAP_ZERO_LEN_PSDU)); + *pos++ = status->zero_length_psdu_type; + } + + if (status->flag & RX_FLAG_RADIOTAP_LSIG) { + /* ensure 2 byte alignment */ + while ((pos - (u8 *)rthdr) & 1) + pos++; + rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_LSIG)); + memcpy(pos, &lsig, sizeof(lsig)); + pos += sizeof(lsig); + } + + for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { + *pos++ = status->chain_signal[chain]; + *pos++ = chain; + } +} + +static struct sk_buff * +ieee80211_make_monitor_skb(struct ieee80211_local *local, + struct sk_buff **origskb, + struct ieee80211_rate *rate, + int rtap_space, bool use_origskb) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb); + int rt_hdrlen, needed_headroom; + struct sk_buff *skb; + + /* room for the radiotap header based on driver features */ + rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb); + needed_headroom = rt_hdrlen - rtap_space; + + if (use_origskb) { + /* only need to expand headroom if necessary */ + skb = *origskb; + *origskb = NULL; + + /* + * This shouldn't trigger often because most devices have an + * RX header they pull before we get here, and that should + * be big enough for our radiotap information. We should + * probably export the length to drivers so that we can have + * them allocate enough headroom to start with. + */ + if (skb_headroom(skb) < needed_headroom && + pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { + dev_kfree_skb(skb); + return NULL; + } + } else { + /* + * Need to make a copy and possibly remove radiotap header + * and FCS from the original. + */ + skb = skb_copy_expand(*origskb, needed_headroom + NET_SKB_PAD, + 0, GFP_ATOMIC); + + if (!skb) + return NULL; + } + + /* prepend radiotap information */ + ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); + + skb_reset_mac_header(skb); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + + return skb; +} + +/* + * This function copies a received frame to all monitor interfaces and + * returns a cleaned-up SKB that no longer includes the FCS nor the + * radiotap header the driver might have added. + */ +static struct sk_buff * +ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, + struct ieee80211_rate *rate) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); + struct ieee80211_sub_if_data *sdata; + struct sk_buff *monskb = NULL; + int present_fcs_len = 0; + unsigned int rtap_space = 0; + struct ieee80211_sub_if_data *monitor_sdata = + rcu_dereference(local->monitor_sdata); + bool only_monitor = false; + unsigned int min_head_len; + + if (WARN_ON_ONCE(status->flag & RX_FLAG_RADIOTAP_TLV_AT_END && + !skb_mac_header_was_set(origskb))) { + /* with this skb no way to know where frame payload starts */ + dev_kfree_skb(origskb); + return NULL; + } + + if (status->flag & RX_FLAG_RADIOTAP_HE) + rtap_space += sizeof(struct ieee80211_radiotap_he); + + if (status->flag & RX_FLAG_RADIOTAP_HE_MU) + rtap_space += sizeof(struct ieee80211_radiotap_he_mu); + + if (status->flag & RX_FLAG_RADIOTAP_LSIG) + rtap_space += sizeof(struct ieee80211_radiotap_lsig); + + if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) + rtap_space += skb_mac_header(origskb) - &origskb->data[rtap_space]; + + min_head_len = rtap_space; + + /* + * First, we may need to make a copy of the skb because + * (1) we need to modify it for radiotap (if not present), and + * (2) the other RX handlers will modify the skb we got. + * + * We don't need to, of course, if we aren't going to return + * the SKB because it has a bad FCS/PLCP checksum. + */ + + if (!(status->flag & RX_FLAG_NO_PSDU)) { + if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { + if (unlikely(origskb->len <= FCS_LEN + rtap_space)) { + /* driver bug */ + WARN_ON(1); + dev_kfree_skb(origskb); + return NULL; + } + present_fcs_len = FCS_LEN; + } + + /* also consider the hdr->frame_control */ + min_head_len += 2; + } + + /* ensure that the expected data elements are in skb head */ + if (!pskb_may_pull(origskb, min_head_len)) { + dev_kfree_skb(origskb); + return NULL; + } + + only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space); + + if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) { + if (only_monitor) { + dev_kfree_skb(origskb); + return NULL; + } + + return ieee80211_clean_skb(origskb, present_fcs_len, + rtap_space); + } + + ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space); + + list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) { + bool last_monitor = list_is_last(&sdata->u.mntr.list, + &local->mon_list); + + if (!monskb) + monskb = ieee80211_make_monitor_skb(local, &origskb, + rate, rtap_space, + only_monitor && + last_monitor); + + if (monskb) { + struct sk_buff *skb; + + if (last_monitor) { + skb = monskb; + monskb = NULL; + } else { + skb = skb_clone(monskb, GFP_ATOMIC); + } + + if (skb) { + skb->dev = sdata->dev; + dev_sw_netstats_rx_add(skb->dev, skb->len); + netif_receive_skb(skb); + } + } + + if (last_monitor) + break; + } + + /* this happens if last_monitor was erroneously false */ + dev_kfree_skb(monskb); + + /* ditto */ + if (!origskb) + return NULL; + + return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space); +} + +static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + int tid, seqno_idx, security_idx; + + /* does the frame have a qos control field? */ + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + /* frame has qos control */ + tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) + status->rx_flags |= IEEE80211_RX_AMSDU; + + seqno_idx = tid; + security_idx = tid; + } else { + /* + * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): + * + * Sequence numbers for management frames, QoS data + * frames with a broadcast/multicast address in the + * Address 1 field, and all non-QoS data frames sent + * by QoS STAs are assigned using an additional single + * modulo-4096 counter, [...] + * + * We also use that counter for non-QoS STAs. + */ + seqno_idx = IEEE80211_NUM_TIDS; + security_idx = 0; + if (ieee80211_is_mgmt(hdr->frame_control)) + security_idx = IEEE80211_NUM_TIDS; + tid = 0; + } + + rx->seqno_idx = seqno_idx; + rx->security_idx = security_idx; + /* Set skb->priority to 1d tag if highest order bit of TID is not set. + * For now, set skb->priority to 0 for other cases. */ + rx->skb->priority = (tid > 7) ? 0 : tid; +} + +/** + * DOC: Packet alignment + * + * Drivers always need to pass packets that are aligned to two-byte boundaries + * to the stack. + * + * Additionally, should, if possible, align the payload data in a way that + * guarantees that the contained IP header is aligned to a four-byte + * boundary. In the case of regular frames, this simply means aligning the + * payload to a four-byte boundary (because either the IP header is directly + * contained, or IV/RFC1042 headers that have a length divisible by four are + * in front of it). If the payload data is not properly aligned and the + * architecture doesn't support efficient unaligned operations, mac80211 + * will align the data. + * + * With A-MSDU frames, however, the payload data address must yield two modulo + * four because there are 14-byte 802.3 headers within the A-MSDU frames that + * push the IP header further back to a multiple of four again. Thankfully, the + * specs were sane enough this time around to require padding each A-MSDU + * subframe to a length that is a multiple of four. + * + * Padding like Atheros hardware adds which is between the 802.11 header and + * the payload is not supported, the driver is required to move the 802.11 + * header to be directly in front of the payload in that case. + */ +static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) +{ +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + WARN_ON_ONCE((unsigned long)rx->skb->data & 1); +#endif +} + + +/* rx handlers */ + +static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + + if (is_multicast_ether_addr(hdr->addr1)) + return 0; + + return ieee80211_is_robust_mgmt_frame(skb); +} + + +static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + + if (!is_multicast_ether_addr(hdr->addr1)) + return 0; + + return ieee80211_is_robust_mgmt_frame(skb); +} + + +/* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ +static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) +{ + struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; + struct ieee80211_mmie *mmie; + struct ieee80211_mmie_16 *mmie16; + + if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) + return -1; + + if (!ieee80211_is_robust_mgmt_frame(skb) && + !ieee80211_is_beacon(hdr->frame_control)) + return -1; /* not a robust management frame */ + + mmie = (struct ieee80211_mmie *) + (skb->data + skb->len - sizeof(*mmie)); + if (mmie->element_id == WLAN_EID_MMIE && + mmie->length == sizeof(*mmie) - 2) + return le16_to_cpu(mmie->key_id); + + mmie16 = (struct ieee80211_mmie_16 *) + (skb->data + skb->len - sizeof(*mmie16)); + if (skb->len >= 24 + sizeof(*mmie16) && + mmie16->element_id == WLAN_EID_MMIE && + mmie16->length == sizeof(*mmie16) - 2) + return le16_to_cpu(mmie16->key_id); + + return -1; +} + +static int ieee80211_get_keyid(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + __le16 fc = hdr->frame_control; + int hdrlen = ieee80211_hdrlen(fc); + u8 keyid; + + /* WEP, TKIP, CCMP and GCMP */ + if (unlikely(skb->len < hdrlen + IEEE80211_WEP_IV_LEN)) + return -EINVAL; + + skb_copy_bits(skb, hdrlen + 3, &keyid, 1); + + keyid >>= 6; + + return keyid; +} + +static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + char *dev_addr = rx->sdata->vif.addr; + + if (ieee80211_is_data(hdr->frame_control)) { + if (is_multicast_ether_addr(hdr->addr1)) { + if (ieee80211_has_tods(hdr->frame_control) || + !ieee80211_has_fromds(hdr->frame_control)) + return RX_DROP_MONITOR; + if (ether_addr_equal(hdr->addr3, dev_addr)) + return RX_DROP_MONITOR; + } else { + if (!ieee80211_has_a4(hdr->frame_control)) + return RX_DROP_MONITOR; + if (ether_addr_equal(hdr->addr4, dev_addr)) + return RX_DROP_MONITOR; + } + } + + /* If there is not an established peer link and this is not a peer link + * establisment frame, beacon or probe, drop the frame. + */ + + if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { + struct ieee80211_mgmt *mgmt; + + if (!ieee80211_is_mgmt(hdr->frame_control)) + return RX_DROP_MONITOR; + + if (ieee80211_is_action(hdr->frame_control)) { + u8 category; + + /* make sure category field is present */ + if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) + return RX_DROP_MONITOR; + + mgmt = (struct ieee80211_mgmt *)hdr; + category = mgmt->u.action.category; + if (category != WLAN_CATEGORY_MESH_ACTION && + category != WLAN_CATEGORY_SELF_PROTECTED) + return RX_DROP_MONITOR; + return RX_CONTINUE; + } + + if (ieee80211_is_probe_req(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control) || + ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_auth(hdr->frame_control)) + return RX_CONTINUE; + + return RX_DROP_MONITOR; + } + + return RX_CONTINUE; +} + +static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx, + int index) +{ + struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index]; + struct sk_buff *tail = skb_peek_tail(frames); + struct ieee80211_rx_status *status; + + if (tid_agg_rx->reorder_buf_filtered && + tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) + return true; + + if (!tail) + return false; + + status = IEEE80211_SKB_RXCB(tail); + if (status->flag & RX_FLAG_AMSDU_MORE) + return false; + + return true; +} + +static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, + struct tid_ampdu_rx *tid_agg_rx, + int index, + struct sk_buff_head *frames) +{ + struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; + struct sk_buff *skb; + struct ieee80211_rx_status *status; + + lockdep_assert_held(&tid_agg_rx->reorder_lock); + + if (skb_queue_empty(skb_list)) + goto no_frame; + + if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) { + __skb_queue_purge(skb_list); + goto no_frame; + } + + /* release frames from the reorder ring buffer */ + tid_agg_rx->stored_mpdu_num--; + while ((skb = __skb_dequeue(skb_list))) { + status = IEEE80211_SKB_RXCB(skb); + status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; + __skb_queue_tail(frames, skb); + } + +no_frame: + if (tid_agg_rx->reorder_buf_filtered) + tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); + tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); +} + +static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, + struct tid_ampdu_rx *tid_agg_rx, + u16 head_seq_num, + struct sk_buff_head *frames) +{ + int index; + + lockdep_assert_held(&tid_agg_rx->reorder_lock); + + while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { + index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; + ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, + frames); + } +} + +/* + * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If + * the skb was added to the buffer longer than this time ago, the earlier + * frames that have not yet been received are assumed to be lost and the skb + * can be released for processing. This may also release other skb's from the + * reorder buffer if there are no additional gaps between the frames. + * + * Callers must hold tid_agg_rx->reorder_lock. + */ +#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) + +static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, + struct tid_ampdu_rx *tid_agg_rx, + struct sk_buff_head *frames) +{ + int index, i, j; + + lockdep_assert_held(&tid_agg_rx->reorder_lock); + + /* release the buffer until next missing frame */ + index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; + if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) && + tid_agg_rx->stored_mpdu_num) { + /* + * No buffers ready to be released, but check whether any + * frames in the reorder buffer have timed out. + */ + int skipped = 1; + for (j = (index + 1) % tid_agg_rx->buf_size; j != index; + j = (j + 1) % tid_agg_rx->buf_size) { + if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) { + skipped++; + continue; + } + if (skipped && + !time_after(jiffies, tid_agg_rx->reorder_time[j] + + HT_RX_REORDER_BUF_TIMEOUT)) + goto set_release_timer; + + /* don't leave incomplete A-MSDUs around */ + for (i = (index + 1) % tid_agg_rx->buf_size; i != j; + i = (i + 1) % tid_agg_rx->buf_size) + __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); + + ht_dbg_ratelimited(sdata, + "release an RX reorder frame due to timeout on earlier frames\n"); + ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, + frames); + + /* + * Increment the head seq# also for the skipped slots. + */ + tid_agg_rx->head_seq_num = + (tid_agg_rx->head_seq_num + + skipped) & IEEE80211_SN_MASK; + skipped = 0; + } + } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { + ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, + frames); + index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; + } + + if (tid_agg_rx->stored_mpdu_num) { + j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; + + for (; j != (index - 1) % tid_agg_rx->buf_size; + j = (j + 1) % tid_agg_rx->buf_size) { + if (ieee80211_rx_reorder_ready(tid_agg_rx, j)) + break; + } + + set_release_timer: + + if (!tid_agg_rx->removed) + mod_timer(&tid_agg_rx->reorder_timer, + tid_agg_rx->reorder_time[j] + 1 + + HT_RX_REORDER_BUF_TIMEOUT); + } else { + del_timer(&tid_agg_rx->reorder_timer); + } +} + +/* + * As this function belongs to the RX path it must be under + * rcu_read_lock protection. It returns false if the frame + * can be processed immediately, true if it was consumed. + */ +static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, + struct tid_ampdu_rx *tid_agg_rx, + struct sk_buff *skb, + struct sk_buff_head *frames) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + u16 sc = le16_to_cpu(hdr->seq_ctrl); + u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; + u16 head_seq_num, buf_size; + int index; + bool ret = true; + + spin_lock(&tid_agg_rx->reorder_lock); + + /* + * Offloaded BA sessions have no known starting sequence number so pick + * one from first Rxed frame for this tid after BA was started. + */ + if (unlikely(tid_agg_rx->auto_seq)) { + tid_agg_rx->auto_seq = false; + tid_agg_rx->ssn = mpdu_seq_num; + tid_agg_rx->head_seq_num = mpdu_seq_num; + } + + buf_size = tid_agg_rx->buf_size; + head_seq_num = tid_agg_rx->head_seq_num; + + /* + * If the current MPDU's SN is smaller than the SSN, it shouldn't + * be reordered. + */ + if (unlikely(!tid_agg_rx->started)) { + if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { + ret = false; + goto out; + } + tid_agg_rx->started = true; + } + + /* frame with out of date sequence number */ + if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { + dev_kfree_skb(skb); + goto out; + } + + /* + * If frame the sequence number exceeds our buffering window + * size release some previous frames to make room for this one. + */ + if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { + head_seq_num = ieee80211_sn_inc( + ieee80211_sn_sub(mpdu_seq_num, buf_size)); + /* release stored frames up to new head to stack */ + ieee80211_release_reorder_frames(sdata, tid_agg_rx, + head_seq_num, frames); + } + + /* Now the new frame is always in the range of the reordering buffer */ + + index = mpdu_seq_num % tid_agg_rx->buf_size; + + /* check if we already stored this frame */ + if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { + dev_kfree_skb(skb); + goto out; + } + + /* + * If the current MPDU is in the right order and nothing else + * is stored we can process it directly, no need to buffer it. + * If it is first but there's something stored, we may be able + * to release frames after this one. + */ + if (mpdu_seq_num == tid_agg_rx->head_seq_num && + tid_agg_rx->stored_mpdu_num == 0) { + if (!(status->flag & RX_FLAG_AMSDU_MORE)) + tid_agg_rx->head_seq_num = + ieee80211_sn_inc(tid_agg_rx->head_seq_num); + ret = false; + goto out; + } + + /* put the frame in the reordering buffer */ + __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); + if (!(status->flag & RX_FLAG_AMSDU_MORE)) { + tid_agg_rx->reorder_time[index] = jiffies; + tid_agg_rx->stored_mpdu_num++; + ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); + } + + out: + spin_unlock(&tid_agg_rx->reorder_lock); + return ret; +} + +/* + * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns + * true if the MPDU was buffered, false if it should be processed. + */ +static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, + struct sk_buff_head *frames) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct sta_info *sta = rx->sta; + struct tid_ampdu_rx *tid_agg_rx; + u16 sc; + u8 tid, ack_policy; + + if (!ieee80211_is_data_qos(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1)) + goto dont_reorder; + + /* + * filter the QoS data rx stream according to + * STA/TID and check if this STA/TID is on aggregation + */ + + if (!sta) + goto dont_reorder; + + ack_policy = *ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_ACK_POLICY_MASK; + tid = ieee80211_get_tid(hdr); + + tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); + if (!tid_agg_rx) { + if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && + !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && + !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) + ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, + WLAN_BACK_RECIPIENT, + WLAN_REASON_QSTA_REQUIRE_SETUP); + goto dont_reorder; + } + + /* qos null data frames are excluded */ + if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) + goto dont_reorder; + + /* not part of a BA session */ + if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK) + goto dont_reorder; + + /* new, potentially un-ordered, ampdu frame - process it */ + + /* reset session timer */ + if (tid_agg_rx->timeout) + tid_agg_rx->last_rx = jiffies; + + /* if this mpdu is fragmented - terminate rx aggregation session */ + sc = le16_to_cpu(hdr->seq_ctrl); + if (sc & IEEE80211_SCTL_FRAG) { + ieee80211_queue_skb_to_iface(rx->sdata, rx->link_id, NULL, skb); + return; + } + + /* + * No locking needed -- we will only ever process one + * RX packet at a time, and thus own tid_agg_rx. All + * other code manipulating it needs to (and does) make + * sure that we cannot get to it any more before doing + * anything with it. + */ + if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, + frames)) + return; + + dont_reorder: + __skb_queue_tail(frames, skb); +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + + if (status->flag & RX_FLAG_DUP_VALIDATED) + return RX_CONTINUE; + + /* + * Drop duplicate 802.11 retransmissions + * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") + */ + + if (rx->skb->len < 24) + return RX_CONTINUE; + + if (ieee80211_is_ctl(hdr->frame_control) || + ieee80211_is_any_nullfunc(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1)) + return RX_CONTINUE; + + if (!rx->sta) + return RX_CONTINUE; + + if (unlikely(ieee80211_has_retry(hdr->frame_control) && + rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { + I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); + rx->link_sta->rx_stats.num_duplicates++; + return RX_DROP_UNUSABLE; + } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { + rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; + } + + return RX_CONTINUE; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_check(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + + /* Drop disallowed frame classes based on STA auth/assoc state; + * IEEE 802.11, Chap 5.5. + * + * mac80211 filters only based on association state, i.e. it drops + * Class 3 frames from not associated stations. hostapd sends + * deauth/disassoc frames when needed. In addition, hostapd is + * responsible for filtering on both auth and assoc states. + */ + + if (ieee80211_vif_is_mesh(&rx->sdata->vif)) + return ieee80211_rx_mesh_check(rx); + + if (unlikely((ieee80211_is_data(hdr->frame_control) || + ieee80211_is_pspoll(hdr->frame_control)) && + rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && + rx->sdata->vif.type != NL80211_IFTYPE_OCB && + (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { + /* + * accept port control frames from the AP even when it's not + * yet marked ASSOC to prevent a race where we don't set the + * assoc bit quickly enough before it sends the first frame + */ + if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && + ieee80211_is_data_present(hdr->frame_control)) { + unsigned int hdrlen; + __be16 ethertype; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + + if (rx->skb->len < hdrlen + 8) + return RX_DROP_MONITOR; + + skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); + if (ethertype == rx->sdata->control_port_protocol) + return RX_CONTINUE; + } + + if (rx->sdata->vif.type == NL80211_IFTYPE_AP && + cfg80211_rx_spurious_frame(rx->sdata->dev, + hdr->addr2, + GFP_ATOMIC)) + return RX_DROP_UNUSABLE; + + return RX_DROP_MONITOR; + } + + return RX_CONTINUE; +} + + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) +{ + struct ieee80211_local *local; + struct ieee80211_hdr *hdr; + struct sk_buff *skb; + + local = rx->local; + skb = rx->skb; + hdr = (struct ieee80211_hdr *) skb->data; + + if (!local->pspolling) + return RX_CONTINUE; + + if (!ieee80211_has_fromds(hdr->frame_control)) + /* this is not from AP */ + return RX_CONTINUE; + + if (!ieee80211_is_data(hdr->frame_control)) + return RX_CONTINUE; + + if (!ieee80211_has_moredata(hdr->frame_control)) { + /* AP has no more frames buffered for us */ + local->pspolling = false; + return RX_CONTINUE; + } + + /* more data bit is set, let's request a new frame from the AP */ + ieee80211_send_pspoll(local, rx->sdata); + + return RX_CONTINUE; +} + +static void sta_ps_start(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct ps_data *ps; + int tid; + + if (sta->sdata->vif.type == NL80211_IFTYPE_AP || + sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + ps = &sdata->bss->ps; + else + return; + + atomic_inc(&ps->num_sta_ps); + set_sta_flag(sta, WLAN_STA_PS_STA); + if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) + drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); + ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", + sta->sta.addr, sta->sta.aid); + + ieee80211_clear_fast_xmit(sta); + + for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { + struct ieee80211_txq *txq = sta->sta.txq[tid]; + struct txq_info *txqi = to_txq_info(txq); + + spin_lock(&local->active_txq_lock[txq->ac]); + if (!list_empty(&txqi->schedule_order)) + list_del_init(&txqi->schedule_order); + spin_unlock(&local->active_txq_lock[txq->ac]); + + if (txq_has_queue(txq)) + set_bit(tid, &sta->txq_buffered_tids); + else + clear_bit(tid, &sta->txq_buffered_tids); + } +} + +static void sta_ps_end(struct sta_info *sta) +{ + ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", + sta->sta.addr, sta->sta.aid); + + if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { + /* + * Clear the flag only if the other one is still set + * so that the TX path won't start TX'ing new frames + * directly ... In the case that the driver flag isn't + * set ieee80211_sta_ps_deliver_wakeup() will clear it. + */ + clear_sta_flag(sta, WLAN_STA_PS_STA); + ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", + sta->sta.addr, sta->sta.aid); + return; + } + + set_sta_flag(sta, WLAN_STA_PS_DELIVER); + clear_sta_flag(sta, WLAN_STA_PS_STA); + ieee80211_sta_ps_deliver_wakeup(sta); +} + +int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + bool in_ps; + + WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS)); + + /* Don't let the same PS state be set twice */ + in_ps = test_sta_flag(sta, WLAN_STA_PS_STA); + if ((start && in_ps) || (!start && !in_ps)) + return -EINVAL; + + if (start) + sta_ps_start(sta); + else + sta_ps_end(sta); + + return 0; +} +EXPORT_SYMBOL(ieee80211_sta_ps_transition); + +void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + + if (test_sta_flag(sta, WLAN_STA_SP)) + return; + + if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) + ieee80211_sta_ps_deliver_poll_response(sta); + else + set_sta_flag(sta, WLAN_STA_PSPOLL); +} +EXPORT_SYMBOL(ieee80211_sta_pspoll); + +void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + int ac = ieee80211_ac_from_tid(tid); + + /* + * If this AC is not trigger-enabled do nothing unless the + * driver is calling us after it already checked. + * + * NB: This could/should check a separate bitmap of trigger- + * enabled queues, but for now we only implement uAPSD w/o + * TSPEC changes to the ACs, so they're always the same. + */ + if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) && + tid != IEEE80211_NUM_TIDS) + return; + + /* if we are in a service period, do nothing */ + if (test_sta_flag(sta, WLAN_STA_SP)) + return; + + if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) + ieee80211_sta_ps_deliver_uapsd(sta); + else + set_sta_flag(sta, WLAN_STA_UAPSD); +} +EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger); + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) +{ + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct ieee80211_hdr *hdr = (void *)rx->skb->data; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + + if (!rx->sta) + return RX_CONTINUE; + + if (sdata->vif.type != NL80211_IFTYPE_AP && + sdata->vif.type != NL80211_IFTYPE_AP_VLAN) + return RX_CONTINUE; + + /* + * The device handles station powersave, so don't do anything about + * uAPSD and PS-Poll frames (the latter shouldn't even come up from + * it to mac80211 since they're handled.) + */ + if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS)) + return RX_CONTINUE; + + /* + * Don't do anything if the station isn't already asleep. In + * the uAPSD case, the station will probably be marked asleep, + * in the PS-Poll case the station must be confused ... + */ + if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) + return RX_CONTINUE; + + if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { + ieee80211_sta_pspoll(&rx->sta->sta); + + /* Free PS Poll skb here instead of returning RX_DROP that would + * count as an dropped frame. */ + dev_kfree_skb(rx->skb); + + return RX_QUEUED; + } else if (!ieee80211_has_morefrags(hdr->frame_control) && + !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && + ieee80211_has_pm(hdr->frame_control) && + (ieee80211_is_data_qos(hdr->frame_control) || + ieee80211_is_qos_nullfunc(hdr->frame_control))) { + u8 tid = ieee80211_get_tid(hdr); + + ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid); + } + + return RX_CONTINUE; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) +{ + struct sta_info *sta = rx->sta; + struct link_sta_info *link_sta = rx->link_sta; + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int i; + + if (!sta || !link_sta) + return RX_CONTINUE; + + /* + * Update last_rx only for IBSS packets which are for the current + * BSSID and for station already AUTHORIZED to avoid keeping the + * current IBSS network alive in cases where other STAs start + * using different BSSID. This will also give the station another + * chance to restart the authentication/authorization in case + * something went wrong the first time. + */ + if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { + u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, + NL80211_IFTYPE_ADHOC); + if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && + test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { + link_sta->rx_stats.last_rx = jiffies; + if (ieee80211_is_data_present(hdr->frame_control) && + !is_multicast_ether_addr(hdr->addr1)) + link_sta->rx_stats.last_rate = + sta_stats_encode_rate(status); + } + } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { + link_sta->rx_stats.last_rx = jiffies; + } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) && + !is_multicast_ether_addr(hdr->addr1)) { + /* + * Mesh beacons will update last_rx when if they are found to + * match the current local configuration when processed. + */ + link_sta->rx_stats.last_rx = jiffies; + if (ieee80211_is_data_present(hdr->frame_control)) + link_sta->rx_stats.last_rate = sta_stats_encode_rate(status); + } + + link_sta->rx_stats.fragments++; + + u64_stats_update_begin(&link_sta->rx_stats.syncp); + link_sta->rx_stats.bytes += rx->skb->len; + u64_stats_update_end(&link_sta->rx_stats.syncp); + + if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { + link_sta->rx_stats.last_signal = status->signal; + ewma_signal_add(&link_sta->rx_stats_avg.signal, + -status->signal); + } + + if (status->chains) { + link_sta->rx_stats.chains = status->chains; + for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { + int signal = status->chain_signal[i]; + + if (!(status->chains & BIT(i))) + continue; + + link_sta->rx_stats.chain_signal_last[i] = signal; + ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i], + -signal); + } + } + + if (ieee80211_is_s1g_beacon(hdr->frame_control)) + return RX_CONTINUE; + + /* + * Change STA power saving mode only at the end of a frame + * exchange sequence, and only for a data or management + * frame as specified in IEEE 802.11-2016 11.2.3.2 + */ + if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && + !ieee80211_has_morefrags(hdr->frame_control) && + !is_multicast_ether_addr(hdr->addr1) && + (ieee80211_is_mgmt(hdr->frame_control) || + ieee80211_is_data(hdr->frame_control)) && + !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && + (rx->sdata->vif.type == NL80211_IFTYPE_AP || + rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { + if (test_sta_flag(sta, WLAN_STA_PS_STA)) { + if (!ieee80211_has_pm(hdr->frame_control)) + sta_ps_end(sta); + } else { + if (ieee80211_has_pm(hdr->frame_control)) + sta_ps_start(sta); + } + } + + /* mesh power save support */ + if (ieee80211_vif_is_mesh(&rx->sdata->vif)) + ieee80211_mps_rx_h_sta_process(sta, hdr); + + /* + * Drop (qos-)data::nullfunc frames silently, since they + * are used only to control station power saving mode. + */ + if (ieee80211_is_any_nullfunc(hdr->frame_control)) { + I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); + + /* + * If we receive a 4-addr nullfunc frame from a STA + * that was not moved to a 4-addr STA vlan yet send + * the event to userspace and for older hostapd drop + * the frame to the monitor interface. + */ + if (ieee80211_has_a4(hdr->frame_control) && + (rx->sdata->vif.type == NL80211_IFTYPE_AP || + (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + !rx->sdata->u.vlan.sta))) { + if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) + cfg80211_rx_unexpected_4addr_frame( + rx->sdata->dev, sta->sta.addr, + GFP_ATOMIC); + return RX_DROP_M_UNEXPECTED_4ADDR_FRAME; + } + /* + * Update counter and free packet here to avoid + * counting this as a dropped packed. + */ + link_sta->rx_stats.packets++; + dev_kfree_skb(rx->skb); + return RX_QUEUED; + } + + return RX_CONTINUE; +} /* ieee80211_rx_h_sta_process */ + +static struct ieee80211_key * +ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx) +{ + struct ieee80211_key *key = NULL; + int idx2; + + /* Make sure key gets set if either BIGTK key index is set so that + * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected + * Beacon frames and Beacon frames that claim to use another BIGTK key + * index (i.e., a key that we do not have). + */ + + if (idx < 0) { + idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS; + idx2 = idx + 1; + } else { + if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) + idx2 = idx + 1; + else + idx2 = idx - 1; + } + + if (rx->link_sta) + key = rcu_dereference(rx->link_sta->gtk[idx]); + if (!key) + key = rcu_dereference(rx->link->gtk[idx]); + if (!key && rx->link_sta) + key = rcu_dereference(rx->link_sta->gtk[idx2]); + if (!key) + key = rcu_dereference(rx->link->gtk[idx2]); + + return key; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int keyidx; + ieee80211_rx_result result = RX_DROP_UNUSABLE; + struct ieee80211_key *sta_ptk = NULL; + struct ieee80211_key *ptk_idx = NULL; + int mmie_keyidx = -1; + __le16 fc; + + if (ieee80211_is_ext(hdr->frame_control)) + return RX_CONTINUE; + + /* + * Key selection 101 + * + * There are five types of keys: + * - GTK (group keys) + * - IGTK (group keys for management frames) + * - BIGTK (group keys for Beacon frames) + * - PTK (pairwise keys) + * - STK (station-to-station pairwise keys) + * + * When selecting a key, we have to distinguish between multicast + * (including broadcast) and unicast frames, the latter can only + * use PTKs and STKs while the former always use GTKs, IGTKs, and + * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used, + * then unicast frames can also use key indices like GTKs. Hence, if we + * don't have a PTK/STK we check the key index for a WEP key. + * + * Note that in a regular BSS, multicast frames are sent by the + * AP only, associated stations unicast the frame to the AP first + * which then multicasts it on their behalf. + * + * There is also a slight problem in IBSS mode: GTKs are negotiated + * with each station, that is something we don't currently handle. + * The spec seems to expect that one negotiates the same key with + * every station but there's no such requirement; VLANs could be + * possible. + */ + + /* start without a key */ + rx->key = NULL; + fc = hdr->frame_control; + + if (rx->sta) { + int keyid = rx->sta->ptk_idx; + sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); + + if (ieee80211_has_protected(fc) && + !(status->flag & RX_FLAG_IV_STRIPPED)) { + keyid = ieee80211_get_keyid(rx->skb); + + if (unlikely(keyid < 0)) + return RX_DROP_UNUSABLE; + + ptk_idx = rcu_dereference(rx->sta->ptk[keyid]); + } + } + + if (!ieee80211_has_protected(fc)) + mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); + + if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { + rx->key = ptk_idx ? ptk_idx : sta_ptk; + if ((status->flag & RX_FLAG_DECRYPTED) && + (status->flag & RX_FLAG_IV_STRIPPED)) + return RX_CONTINUE; + /* Skip decryption if the frame is not protected. */ + if (!ieee80211_has_protected(fc)) + return RX_CONTINUE; + } else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) { + /* Broadcast/multicast robust management frame / BIP */ + if ((status->flag & RX_FLAG_DECRYPTED) && + (status->flag & RX_FLAG_IV_STRIPPED)) + return RX_CONTINUE; + + if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS || + mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + + NUM_DEFAULT_BEACON_KEYS) { + if (rx->sdata->dev) + cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, + skb->data, + skb->len); + return RX_DROP_M_BAD_BCN_KEYIDX; + } + + rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx); + if (!rx->key) + return RX_CONTINUE; /* Beacon protection not in use */ + } else if (mmie_keyidx >= 0) { + /* Broadcast/multicast robust management frame / BIP */ + if ((status->flag & RX_FLAG_DECRYPTED) && + (status->flag & RX_FLAG_IV_STRIPPED)) + return RX_CONTINUE; + + if (mmie_keyidx < NUM_DEFAULT_KEYS || + mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) + return RX_DROP_M_BAD_MGMT_KEYIDX; /* unexpected BIP keyidx */ + if (rx->link_sta) { + if (ieee80211_is_group_privacy_action(skb) && + test_sta_flag(rx->sta, WLAN_STA_MFP)) + return RX_DROP_MONITOR; + + rx->key = rcu_dereference(rx->link_sta->gtk[mmie_keyidx]); + } + if (!rx->key) + rx->key = rcu_dereference(rx->link->gtk[mmie_keyidx]); + } else if (!ieee80211_has_protected(fc)) { + /* + * The frame was not protected, so skip decryption. However, we + * need to set rx->key if there is a key that could have been + * used so that the frame may be dropped if encryption would + * have been expected. + */ + struct ieee80211_key *key = NULL; + int i; + + if (ieee80211_is_beacon(fc)) { + key = ieee80211_rx_get_bigtk(rx, -1); + } else if (ieee80211_is_mgmt(fc) && + is_multicast_ether_addr(hdr->addr1)) { + key = rcu_dereference(rx->link->default_mgmt_key); + } else { + if (rx->link_sta) { + for (i = 0; i < NUM_DEFAULT_KEYS; i++) { + key = rcu_dereference(rx->link_sta->gtk[i]); + if (key) + break; + } + } + if (!key) { + for (i = 0; i < NUM_DEFAULT_KEYS; i++) { + key = rcu_dereference(rx->link->gtk[i]); + if (key) + break; + } + } + } + if (key) + rx->key = key; + return RX_CONTINUE; + } else { + /* + * The device doesn't give us the IV so we won't be + * able to look up the key. That's ok though, we + * don't need to decrypt the frame, we just won't + * be able to keep statistics accurate. + * Except for key threshold notifications, should + * we somehow allow the driver to tell us which key + * the hardware used if this flag is set? + */ + if ((status->flag & RX_FLAG_DECRYPTED) && + (status->flag & RX_FLAG_IV_STRIPPED)) + return RX_CONTINUE; + + keyidx = ieee80211_get_keyid(rx->skb); + + if (unlikely(keyidx < 0)) + return RX_DROP_UNUSABLE; + + /* check per-station GTK first, if multicast packet */ + if (is_multicast_ether_addr(hdr->addr1) && rx->link_sta) + rx->key = rcu_dereference(rx->link_sta->gtk[keyidx]); + + /* if not found, try default key */ + if (!rx->key) { + if (is_multicast_ether_addr(hdr->addr1)) + rx->key = rcu_dereference(rx->link->gtk[keyidx]); + if (!rx->key) + rx->key = rcu_dereference(rx->sdata->keys[keyidx]); + + /* + * RSNA-protected unicast frames should always be + * sent with pairwise or station-to-station keys, + * but for WEP we allow using a key index as well. + */ + if (rx->key && + rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && + rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && + !is_multicast_ether_addr(hdr->addr1)) + rx->key = NULL; + } + } + + if (rx->key) { + if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) + return RX_DROP_MONITOR; + + /* TODO: add threshold stuff again */ + } else { + return RX_DROP_MONITOR; + } + + switch (rx->key->conf.cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + result = ieee80211_crypto_wep_decrypt(rx); + break; + case WLAN_CIPHER_SUITE_TKIP: + result = ieee80211_crypto_tkip_decrypt(rx); + break; + case WLAN_CIPHER_SUITE_CCMP: + result = ieee80211_crypto_ccmp_decrypt( + rx, IEEE80211_CCMP_MIC_LEN); + break; + case WLAN_CIPHER_SUITE_CCMP_256: + result = ieee80211_crypto_ccmp_decrypt( + rx, IEEE80211_CCMP_256_MIC_LEN); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + result = ieee80211_crypto_aes_cmac_decrypt(rx); + break; + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + result = ieee80211_crypto_aes_cmac_256_decrypt(rx); + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + result = ieee80211_crypto_aes_gmac_decrypt(rx); + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + result = ieee80211_crypto_gcmp_decrypt(rx); + break; + default: + result = RX_DROP_UNUSABLE; + } + + /* the hdr variable is invalid after the decrypt handlers */ + + /* either the frame has been decrypted or will be dropped */ + status->flag |= RX_FLAG_DECRYPTED; + + if (unlikely(ieee80211_is_beacon(fc) && RX_RES_IS_UNUSABLE(result) && + rx->sdata->dev)) + cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, + skb->data, skb->len); + + return result; +} + +void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cache->entries); i++) + skb_queue_head_init(&cache->entries[i].skb_list); +} + +void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cache->entries); i++) + __skb_queue_purge(&cache->entries[i].skb_list); +} + +static inline struct ieee80211_fragment_entry * +ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache, + unsigned int frag, unsigned int seq, int rx_queue, + struct sk_buff **skb) +{ + struct ieee80211_fragment_entry *entry; + + entry = &cache->entries[cache->next++]; + if (cache->next >= IEEE80211_FRAGMENT_MAX) + cache->next = 0; + + __skb_queue_purge(&entry->skb_list); + + __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ + *skb = NULL; + entry->first_frag_time = jiffies; + entry->seq = seq; + entry->rx_queue = rx_queue; + entry->last_frag = frag; + entry->check_sequential_pn = false; + entry->extra_len = 0; + + return entry; +} + +static inline struct ieee80211_fragment_entry * +ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache, + unsigned int frag, unsigned int seq, + int rx_queue, struct ieee80211_hdr *hdr) +{ + struct ieee80211_fragment_entry *entry; + int i, idx; + + idx = cache->next; + for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { + struct ieee80211_hdr *f_hdr; + struct sk_buff *f_skb; + + idx--; + if (idx < 0) + idx = IEEE80211_FRAGMENT_MAX - 1; + + entry = &cache->entries[idx]; + if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || + entry->rx_queue != rx_queue || + entry->last_frag + 1 != frag) + continue; + + f_skb = __skb_peek(&entry->skb_list); + f_hdr = (struct ieee80211_hdr *) f_skb->data; + + /* + * Check ftype and addresses are equal, else check next fragment + */ + if (((hdr->frame_control ^ f_hdr->frame_control) & + cpu_to_le16(IEEE80211_FCTL_FTYPE)) || + !ether_addr_equal(hdr->addr1, f_hdr->addr1) || + !ether_addr_equal(hdr->addr2, f_hdr->addr2)) + continue; + + if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { + __skb_queue_purge(&entry->skb_list); + continue; + } + return entry; + } + + return NULL; +} + +static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc) +{ + return rx->key && + (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || + rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || + rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || + rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && + ieee80211_has_protected(fc); +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) +{ + struct ieee80211_fragment_cache *cache = &rx->sdata->frags; + struct ieee80211_hdr *hdr; + u16 sc; + __le16 fc; + unsigned int frag, seq; + struct ieee80211_fragment_entry *entry; + struct sk_buff *skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + + hdr = (struct ieee80211_hdr *)rx->skb->data; + fc = hdr->frame_control; + + if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc)) + return RX_CONTINUE; + + sc = le16_to_cpu(hdr->seq_ctrl); + frag = sc & IEEE80211_SCTL_FRAG; + + if (rx->sta) + cache = &rx->sta->frags; + + if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) + goto out; + + if (is_multicast_ether_addr(hdr->addr1)) + return RX_DROP_MONITOR; + + I802_DEBUG_INC(rx->local->rx_handlers_fragments); + + if (skb_linearize(rx->skb)) + return RX_DROP_UNUSABLE; + + /* + * skb_linearize() might change the skb->data and + * previously cached variables (in this case, hdr) need to + * be refreshed with the new data. + */ + hdr = (struct ieee80211_hdr *)rx->skb->data; + seq = (sc & IEEE80211_SCTL_SEQ) >> 4; + + if (frag == 0) { + /* This is the first fragment of a new frame. */ + entry = ieee80211_reassemble_add(cache, frag, seq, + rx->seqno_idx, &(rx->skb)); + if (requires_sequential_pn(rx, fc)) { + int queue = rx->security_idx; + + /* Store CCMP/GCMP PN so that we can verify that the + * next fragment has a sequential PN value. + */ + entry->check_sequential_pn = true; + entry->is_protected = true; + entry->key_color = rx->key->color; + memcpy(entry->last_pn, + rx->key->u.ccmp.rx_pn[queue], + IEEE80211_CCMP_PN_LEN); + BUILD_BUG_ON(offsetof(struct ieee80211_key, + u.ccmp.rx_pn) != + offsetof(struct ieee80211_key, + u.gcmp.rx_pn)); + BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != + sizeof(rx->key->u.gcmp.rx_pn[queue])); + BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != + IEEE80211_GCMP_PN_LEN); + } else if (rx->key && + (ieee80211_has_protected(fc) || + (status->flag & RX_FLAG_DECRYPTED))) { + entry->is_protected = true; + entry->key_color = rx->key->color; + } + return RX_QUEUED; + } + + /* This is a fragment for a frame that should already be pending in + * fragment cache. Add this fragment to the end of the pending entry. + */ + entry = ieee80211_reassemble_find(cache, frag, seq, + rx->seqno_idx, hdr); + if (!entry) { + I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); + return RX_DROP_MONITOR; + } + + /* "The receiver shall discard MSDUs and MMPDUs whose constituent + * MPDU PN values are not incrementing in steps of 1." + * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) + * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) + */ + if (entry->check_sequential_pn) { + int i; + u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; + + if (!requires_sequential_pn(rx, fc)) + return RX_DROP_UNUSABLE; + + /* Prevent mixed key and fragment cache attacks */ + if (entry->key_color != rx->key->color) + return RX_DROP_UNUSABLE; + + memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); + for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { + pn[i]++; + if (pn[i]) + break; + } + + rpn = rx->ccm_gcm.pn; + if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) + return RX_DROP_UNUSABLE; + memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); + } else if (entry->is_protected && + (!rx->key || + (!ieee80211_has_protected(fc) && + !(status->flag & RX_FLAG_DECRYPTED)) || + rx->key->color != entry->key_color)) { + /* Drop this as a mixed key or fragment cache attack, even + * if for TKIP Michael MIC should protect us, and WEP is a + * lost cause anyway. + */ + return RX_DROP_UNUSABLE; + } else if (entry->is_protected && rx->key && + entry->key_color != rx->key->color && + (status->flag & RX_FLAG_DECRYPTED)) { + return RX_DROP_UNUSABLE; + } + + skb_pull(rx->skb, ieee80211_hdrlen(fc)); + __skb_queue_tail(&entry->skb_list, rx->skb); + entry->last_frag = frag; + entry->extra_len += rx->skb->len; + if (ieee80211_has_morefrags(fc)) { + rx->skb = NULL; + return RX_QUEUED; + } + + rx->skb = __skb_dequeue(&entry->skb_list); + if (skb_tailroom(rx->skb) < entry->extra_len) { + I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag); + if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, + GFP_ATOMIC))) { + I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); + __skb_queue_purge(&entry->skb_list); + return RX_DROP_UNUSABLE; + } + } + while ((skb = __skb_dequeue(&entry->skb_list))) { + skb_put_data(rx->skb, skb->data, skb->len); + dev_kfree_skb(skb); + } + + out: + ieee80211_led_rx(rx->local); + if (rx->sta) + rx->link_sta->rx_stats.packets++; + return RX_CONTINUE; +} + +static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) +{ + if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) + return -EACCES; + + return 0; +} + +static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + + /* + * Pass through unencrypted frames if the hardware has + * decrypted them already. + */ + if (status->flag & RX_FLAG_DECRYPTED) + return 0; + + /* Drop unencrypted frames if key is set. */ + if (unlikely(!ieee80211_has_protected(fc) && + !ieee80211_is_any_nullfunc(fc) && + ieee80211_is_data(fc) && rx->key)) + return -EACCES; + + return 0; +} + +static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; + __le16 fc = mgmt->frame_control; + + /* + * Pass through unencrypted frames if the hardware has + * decrypted them already. + */ + if (status->flag & RX_FLAG_DECRYPTED) + return 0; + + /* drop unicast protected dual (that wasn't protected) */ + if (ieee80211_is_action(fc) && + mgmt->u.action.category == WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION) + return -EACCES; + + if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { + if (unlikely(!ieee80211_has_protected(fc) && + ieee80211_is_unicast_robust_mgmt_frame(rx->skb))) { + if (ieee80211_is_deauth(fc) || + ieee80211_is_disassoc(fc)) { + /* + * Permit unprotected deauth/disassoc frames + * during 4-way-HS (key is installed after HS). + */ + if (!rx->key) + return 0; + + cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, + rx->skb->data, + rx->skb->len); + } + return -EACCES; + } + /* BIP does not use Protected field, so need to check MMIE */ + if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && + ieee80211_get_mmie_keyidx(rx->skb) < 0)) { + if (ieee80211_is_deauth(fc) || + ieee80211_is_disassoc(fc)) + cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, + rx->skb->data, + rx->skb->len); + return -EACCES; + } + if (unlikely(ieee80211_is_beacon(fc) && rx->key && + ieee80211_get_mmie_keyidx(rx->skb) < 0)) { + cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, + rx->skb->data, + rx->skb->len); + return -EACCES; + } + /* + * When using MFP, Action frames are not allowed prior to + * having configured keys. + */ + if (unlikely(ieee80211_is_action(fc) && !rx->key && + ieee80211_is_robust_mgmt_frame(rx->skb))) + return -EACCES; + + /* drop unicast public action frames when using MPF */ + if (is_unicast_ether_addr(mgmt->da) && + ieee80211_is_protected_dual_of_public_action(rx->skb)) + return -EACCES; + } + + return 0; +} + +static int +__ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) +{ + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + bool check_port_control = false; + struct ethhdr *ehdr; + int ret; + + *port_control = false; + if (ieee80211_has_a4(hdr->frame_control) && + sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) + return -1; + + if (sdata->vif.type == NL80211_IFTYPE_STATION && + !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { + + if (!sdata->u.mgd.use_4addr) + return -1; + else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr)) + check_port_control = true; + } + + if (is_multicast_ether_addr(hdr->addr1) && + sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) + return -1; + + ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); + if (ret < 0) + return ret; + + ehdr = (struct ethhdr *) rx->skb->data; + if (ehdr->h_proto == rx->sdata->control_port_protocol) + *port_control = true; + else if (check_port_control) + return -1; + + return 0; +} + +bool ieee80211_is_our_addr(struct ieee80211_sub_if_data *sdata, + const u8 *addr, int *out_link_id) +{ + unsigned int link_id; + + /* non-MLO, or MLD address replaced by hardware */ + if (ether_addr_equal(sdata->vif.addr, addr)) + return true; + + if (!ieee80211_vif_is_mld(&sdata->vif)) + return false; + + for (link_id = 0; link_id < ARRAY_SIZE(sdata->vif.link_conf); link_id++) { + struct ieee80211_bss_conf *conf; + + conf = rcu_dereference(sdata->vif.link_conf[link_id]); + + if (!conf) + continue; + if (ether_addr_equal(conf->addr, addr)) { + if (out_link_id) + *out_link_id = link_id; + return true; + } + } + + return false; +} + +/* + * requires that rx->skb is a frame with ethernet header + */ +static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) +{ + static const u8 pae_group_addr[ETH_ALEN] __aligned(2) + = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; + struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; + + /* + * Allow EAPOL frames to us/the PAE group address regardless of + * whether the frame was encrypted or not, and always disallow + * all other destination addresses for them. + */ + if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol)) + return ieee80211_is_our_addr(rx->sdata, ehdr->h_dest, NULL) || + ether_addr_equal(ehdr->h_dest, pae_group_addr); + + if (ieee80211_802_1x_port_control(rx) || + ieee80211_drop_unencrypted(rx, fc)) + return false; + + return true; +} + +static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, + struct ieee80211_rx_data *rx) +{ + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct net_device *dev = sdata->dev; + + if (unlikely((skb->protocol == sdata->control_port_protocol || + (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) && + !sdata->control_port_no_preauth)) && + sdata->control_port_over_nl80211)) { + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED); + + cfg80211_rx_control_port(dev, skb, noencrypt, rx->link_id); + dev_kfree_skb(skb); + } else { + struct ethhdr *ehdr = (void *)skb_mac_header(skb); + + memset(skb->cb, 0, sizeof(skb->cb)); + + /* + * 802.1X over 802.11 requires that the authenticator address + * be used for EAPOL frames. However, 802.1X allows the use of + * the PAE group address instead. If the interface is part of + * a bridge and we pass the frame with the PAE group address, + * then the bridge will forward it to the network (even if the + * client was not associated yet), which isn't supposed to + * happen. + * To avoid that, rewrite the destination address to our own + * address, so that the authenticator (e.g. hostapd) will see + * the frame, but bridge won't forward it anywhere else. Note + * that due to earlier filtering, the only other address can + * be the PAE group address, unless the hardware allowed them + * through in 802.3 offloaded mode. + */ + if (unlikely(skb->protocol == sdata->control_port_protocol && + !ether_addr_equal(ehdr->h_dest, sdata->vif.addr))) + ether_addr_copy(ehdr->h_dest, sdata->vif.addr); + + /* deliver to local stack */ + if (rx->list) + list_add_tail(&skb->list, rx->list); + else + netif_receive_skb(skb); + } +} + +/* + * requires that rx->skb is a frame with ethernet header + */ +static void +ieee80211_deliver_skb(struct ieee80211_rx_data *rx) +{ + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct net_device *dev = sdata->dev; + struct sk_buff *skb, *xmit_skb; + struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; + struct sta_info *dsta; + + skb = rx->skb; + xmit_skb = NULL; + + dev_sw_netstats_rx_add(dev, skb->len); + + if (rx->sta) { + /* The seqno index has the same property as needed + * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS + * for non-QoS-data frames. Here we know it's a data + * frame, so count MSDUs. + */ + u64_stats_update_begin(&rx->link_sta->rx_stats.syncp); + rx->link_sta->rx_stats.msdu[rx->seqno_idx]++; + u64_stats_update_end(&rx->link_sta->rx_stats.syncp); + } + + if ((sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && + !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && + ehdr->h_proto != rx->sdata->control_port_protocol && + (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { + if (is_multicast_ether_addr(ehdr->h_dest) && + ieee80211_vif_get_num_mcast_if(sdata) != 0) { + /* + * send multicast frames both to higher layers in + * local net stack and back to the wireless medium + */ + xmit_skb = skb_copy(skb, GFP_ATOMIC); + if (!xmit_skb) + net_info_ratelimited("%s: failed to clone multicast frame\n", + dev->name); + } else if (!is_multicast_ether_addr(ehdr->h_dest) && + !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) { + dsta = sta_info_get(sdata, ehdr->h_dest); + if (dsta) { + /* + * The destination station is associated to + * this AP (in this VLAN), so send the frame + * directly to it and do not pass it to local + * net stack. + */ + xmit_skb = skb; + skb = NULL; + } + } + } + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (skb) { + /* 'align' will only take the values 0 or 2 here since all + * frames are required to be aligned to 2-byte boundaries + * when being passed to mac80211; the code here works just + * as well if that isn't true, but mac80211 assumes it can + * access fields as 2-byte aligned (e.g. for ether_addr_equal) + */ + int align; + + align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; + if (align) { + if (WARN_ON(skb_headroom(skb) < 3)) { + dev_kfree_skb(skb); + skb = NULL; + } else { + u8 *data = skb->data; + size_t len = skb_headlen(skb); + skb->data -= align; + memmove(skb->data, data, len); + skb_set_tail_pointer(skb, len); + } + } + } +#endif + + if (skb) { + skb->protocol = eth_type_trans(skb, dev); + ieee80211_deliver_skb_to_local_stack(skb, rx); + } + + if (xmit_skb) { + /* + * Send to wireless media and increase priority by 256 to + * keep the received priority instead of reclassifying + * the frame (see cfg80211_classify8021d). + */ + xmit_skb->priority += 256; + xmit_skb->protocol = htons(ETH_P_802_3); + skb_reset_network_header(xmit_skb); + skb_reset_mac_header(xmit_skb); + dev_queue_xmit(xmit_skb); + } +} + +#ifdef CONFIG_MAC80211_MESH +static bool +ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, int hdrlen) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_mesh_fast_tx *entry = NULL; + struct ieee80211s_hdr *mesh_hdr; + struct tid_ampdu_tx *tid_tx; + struct sta_info *sta; + struct ethhdr eth; + u8 tid; + + mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(eth)); + if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) + entry = mesh_fast_tx_get(sdata, mesh_hdr->eaddr1); + else if (!(mesh_hdr->flags & MESH_FLAGS_AE)) + entry = mesh_fast_tx_get(sdata, skb->data); + if (!entry) + return false; + + sta = rcu_dereference(entry->mpath->next_hop); + if (!sta) + return false; + + if (skb_linearize(skb)) + return false; + + tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; + tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); + if (tid_tx) { + if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) + return false; + + if (tid_tx->timeout) + tid_tx->last_tx = jiffies; + } + + ieee80211_aggr_check(sdata, sta, skb); + + if (ieee80211_get_8023_tunnel_proto(skb->data + hdrlen, + &skb->protocol)) + hdrlen += ETH_ALEN; + else + skb->protocol = htons(skb->len - hdrlen); + skb_set_network_header(skb, hdrlen + 2); + + skb->dev = sdata->dev; + memcpy(ð, skb->data, ETH_HLEN - 2); + skb_pull(skb, 2); + __ieee80211_xmit_fast(sdata, sta, &entry->fast_tx, skb, tid_tx, + eth.h_dest, eth.h_source); + IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); + IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); + + return true; +} +#endif + +static ieee80211_rx_result +ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, + struct sk_buff *skb) +{ +#ifdef CONFIG_MAC80211_MESH + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_local *local = sdata->local; + uint16_t fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA; + struct ieee80211_hdr hdr = { + .frame_control = cpu_to_le16(fc) + }; + struct ieee80211_hdr *fwd_hdr; + struct ieee80211s_hdr *mesh_hdr; + struct ieee80211_tx_info *info; + struct sk_buff *fwd_skb; + struct ethhdr *eth; + bool multicast; + int tailroom = 0; + int hdrlen, mesh_hdrlen; + u8 *qos; + + if (!ieee80211_vif_is_mesh(&sdata->vif)) + return RX_CONTINUE; + + if (!pskb_may_pull(skb, sizeof(*eth) + 6)) + return RX_DROP_MONITOR; + + mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(*eth)); + mesh_hdrlen = ieee80211_get_mesh_hdrlen(mesh_hdr); + + if (!pskb_may_pull(skb, sizeof(*eth) + mesh_hdrlen)) + return RX_DROP_MONITOR; + + eth = (struct ethhdr *)skb->data; + multicast = is_multicast_ether_addr(eth->h_dest); + + mesh_hdr = (struct ieee80211s_hdr *)(eth + 1); + if (!mesh_hdr->ttl) + return RX_DROP_MONITOR; + + /* frame is in RMC, don't forward */ + if (is_multicast_ether_addr(eth->h_dest) && + mesh_rmc_check(sdata, eth->h_source, mesh_hdr)) + return RX_DROP_MONITOR; + + /* forward packet */ + if (sdata->crypto_tx_tailroom_needed_cnt) + tailroom = IEEE80211_ENCRYPT_TAILROOM; + + if (mesh_hdr->flags & MESH_FLAGS_AE) { + struct mesh_path *mppath; + char *proxied_addr; + bool update = false; + + if (multicast) + proxied_addr = mesh_hdr->eaddr1; + else if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) + /* has_a4 already checked in ieee80211_rx_mesh_check */ + proxied_addr = mesh_hdr->eaddr2; + else + return RX_DROP_MONITOR; + + rcu_read_lock(); + mppath = mpp_path_lookup(sdata, proxied_addr); + if (!mppath) { + mpp_path_add(sdata, proxied_addr, eth->h_source); + } else { + spin_lock_bh(&mppath->state_lock); + if (!ether_addr_equal(mppath->mpp, eth->h_source)) { + memcpy(mppath->mpp, eth->h_source, ETH_ALEN); + update = true; + } + mppath->exp_time = jiffies; + spin_unlock_bh(&mppath->state_lock); + } + + /* flush fast xmit cache if the address path changed */ + if (update) + mesh_fast_tx_flush_addr(sdata, proxied_addr); + + rcu_read_unlock(); + } + + /* Frame has reached destination. Don't forward */ + if (ether_addr_equal(sdata->vif.addr, eth->h_dest)) + goto rx_accept; + + if (!--mesh_hdr->ttl) { + if (multicast) + goto rx_accept; + + IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); + return RX_DROP_MONITOR; + } + + if (!ifmsh->mshcfg.dot11MeshForwarding) { + if (is_multicast_ether_addr(eth->h_dest)) + goto rx_accept; + + return RX_DROP_MONITOR; + } + + skb_set_queue_mapping(skb, ieee802_1d_to_ac[skb->priority]); + + if (!multicast && + ieee80211_rx_mesh_fast_forward(sdata, skb, mesh_hdrlen)) + return RX_QUEUED; + + ieee80211_fill_mesh_addresses(&hdr, &hdr.frame_control, + eth->h_dest, eth->h_source); + hdrlen = ieee80211_hdrlen(hdr.frame_control); + if (multicast) { + int extra_head = sizeof(struct ieee80211_hdr) - sizeof(*eth); + + fwd_skb = skb_copy_expand(skb, local->tx_headroom + extra_head + + IEEE80211_ENCRYPT_HEADROOM, + tailroom, GFP_ATOMIC); + if (!fwd_skb) + goto rx_accept; + } else { + fwd_skb = skb; + skb = NULL; + + if (skb_cow_head(fwd_skb, hdrlen - sizeof(struct ethhdr))) + return RX_DROP_UNUSABLE; + + if (skb_linearize(fwd_skb)) + return RX_DROP_UNUSABLE; + } + + fwd_hdr = skb_push(fwd_skb, hdrlen - sizeof(struct ethhdr)); + memcpy(fwd_hdr, &hdr, hdrlen - 2); + qos = ieee80211_get_qos_ctl(fwd_hdr); + qos[0] = qos[1] = 0; + + skb_reset_mac_header(fwd_skb); + hdrlen += mesh_hdrlen; + if (ieee80211_get_8023_tunnel_proto(fwd_skb->data + hdrlen, + &fwd_skb->protocol)) + hdrlen += ETH_ALEN; + else + fwd_skb->protocol = htons(fwd_skb->len - hdrlen); + skb_set_network_header(fwd_skb, hdrlen + 2); + + info = IEEE80211_SKB_CB(fwd_skb); + memset(info, 0, sizeof(*info)); + info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; + info->control.vif = &sdata->vif; + info->control.jiffies = jiffies; + fwd_skb->dev = sdata->dev; + if (multicast) { + IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); + memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); + /* update power mode indication when forwarding */ + ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); + } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { + /* mesh power mode flags updated in mesh_nexthop_lookup */ + IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); + } else { + /* unable to resolve next hop */ + if (sta) + mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, + hdr.addr3, 0, + WLAN_REASON_MESH_PATH_NOFORWARD, + sta->sta.addr); + IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); + kfree_skb(fwd_skb); + goto rx_accept; + } + + IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); + ieee80211_add_pending_skb(local, fwd_skb); + +rx_accept: + if (!skb) + return RX_QUEUED; + + ieee80211_strip_8023_mesh_hdr(skb); +#endif + + return RX_CONTINUE; +} + +static ieee80211_rx_result debug_noinline +__ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset) +{ + struct net_device *dev = rx->sdata->dev; + struct sk_buff *skb = rx->skb; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + __le16 fc = hdr->frame_control; + struct sk_buff_head frame_list; + ieee80211_rx_result res; + struct ethhdr ethhdr; + const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source; + + if (unlikely(ieee80211_has_a4(hdr->frame_control))) { + check_da = NULL; + check_sa = NULL; + } else switch (rx->sdata->vif.type) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + check_da = NULL; + break; + case NL80211_IFTYPE_STATION: + if (!rx->sta || + !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER)) + check_sa = NULL; + break; + case NL80211_IFTYPE_MESH_POINT: + check_sa = NULL; + check_da = NULL; + break; + default: + break; + } + + skb->dev = dev; + __skb_queue_head_init(&frame_list); + + if (ieee80211_data_to_8023_exthdr(skb, ðhdr, + rx->sdata->vif.addr, + rx->sdata->vif.type, + data_offset, true)) + return RX_DROP_UNUSABLE; + + if (rx->sta->amsdu_mesh_control < 0) { + s8 valid = -1; + int i; + + for (i = 0; i <= 2; i++) { + if (!ieee80211_is_valid_amsdu(skb, i)) + continue; + + if (valid >= 0) { + /* ambiguous */ + valid = -1; + break; + } + + valid = i; + } + + rx->sta->amsdu_mesh_control = valid; + } + + ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, + rx->sdata->vif.type, + rx->local->hw.extra_tx_headroom, + check_da, check_sa, + rx->sta->amsdu_mesh_control); + + while (!skb_queue_empty(&frame_list)) { + rx->skb = __skb_dequeue(&frame_list); + + res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb); + switch (res) { + case RX_QUEUED: + continue; + case RX_CONTINUE: + break; + default: + goto free; + } + + if (!ieee80211_frame_allowed(rx, fc)) + goto free; + + ieee80211_deliver_skb(rx); + continue; + +free: + dev_kfree_skb(rx->skb); + } + + return RX_QUEUED; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + __le16 fc = hdr->frame_control; + + if (!(status->rx_flags & IEEE80211_RX_AMSDU)) + return RX_CONTINUE; + + if (unlikely(!ieee80211_is_data(fc))) + return RX_CONTINUE; + + if (unlikely(!ieee80211_is_data_present(fc))) + return RX_DROP_MONITOR; + + if (unlikely(ieee80211_has_a4(hdr->frame_control))) { + switch (rx->sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + if (!rx->sdata->u.vlan.sta) + return RX_DROP_UNUSABLE; + break; + case NL80211_IFTYPE_STATION: + if (!rx->sdata->u.mgd.use_4addr) + return RX_DROP_UNUSABLE; + break; + case NL80211_IFTYPE_MESH_POINT: + break; + default: + return RX_DROP_UNUSABLE; + } + } + + if (is_multicast_ether_addr(hdr->addr1) || !rx->sta) + return RX_DROP_UNUSABLE; + + if (rx->key) { + /* + * We should not receive A-MSDUs on pre-HT connections, + * and HT connections cannot use old ciphers. Thus drop + * them, as in those cases we couldn't even have SPP + * A-MSDUs or such. + */ + switch (rx->key->conf.cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + return RX_DROP_UNUSABLE; + default: + break; + } + } + + return __ieee80211_rx_h_amsdu(rx, 0); +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_data(struct ieee80211_rx_data *rx) +{ + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct ieee80211_local *local = rx->local; + struct net_device *dev = sdata->dev; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + __le16 fc = hdr->frame_control; + ieee80211_rx_result res; + bool port_control; + int err; + + if (unlikely(!ieee80211_is_data(hdr->frame_control))) + return RX_CONTINUE; + + if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) + return RX_DROP_MONITOR; + + /* + * Send unexpected-4addr-frame event to hostapd. For older versions, + * also drop the frame to cooked monitor interfaces. + */ + if (ieee80211_has_a4(hdr->frame_control) && + sdata->vif.type == NL80211_IFTYPE_AP) { + if (rx->sta && + !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) + cfg80211_rx_unexpected_4addr_frame( + rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); + return RX_DROP_MONITOR; + } + + err = __ieee80211_data_to_8023(rx, &port_control); + if (unlikely(err)) + return RX_DROP_UNUSABLE; + + res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb); + if (res != RX_CONTINUE) + return res; + + if (!ieee80211_frame_allowed(rx, fc)) + return RX_DROP_MONITOR; + + /* directly handle TDLS channel switch requests/responses */ + if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == + cpu_to_be16(ETH_P_TDLS))) { + struct ieee80211_tdls_data *tf = (void *)rx->skb->data; + + if (pskb_may_pull(rx->skb, + offsetof(struct ieee80211_tdls_data, u)) && + tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && + tf->category == WLAN_CATEGORY_TDLS && + (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || + tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { + rx->skb->protocol = cpu_to_be16(ETH_P_TDLS); + __ieee80211_queue_skb_to_iface(sdata, rx->link_id, + rx->sta, rx->skb); + return RX_QUEUED; + } + } + + if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + unlikely(port_control) && sdata->bss) { + sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, + u.ap); + dev = sdata->dev; + rx->sdata = sdata; + } + + rx->skb->dev = dev; + + if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && + local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && + !is_multicast_ether_addr( + ((struct ethhdr *)rx->skb->data)->h_dest) && + (!local->scanning && + !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) + mod_timer(&local->dynamic_ps_timer, jiffies + + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); + + ieee80211_deliver_skb(rx); + + return RX_QUEUED; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; + struct tid_ampdu_rx *tid_agg_rx; + u16 start_seq_num; + u16 tid; + + if (likely(!ieee80211_is_ctl(bar->frame_control))) + return RX_CONTINUE; + + if (ieee80211_is_back_req(bar->frame_control)) { + struct { + __le16 control, start_seq_num; + } __packed bar_data; + struct ieee80211_event event = { + .type = BAR_RX_EVENT, + }; + + if (!rx->sta) + return RX_DROP_MONITOR; + + if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), + &bar_data, sizeof(bar_data))) + return RX_DROP_MONITOR; + + tid = le16_to_cpu(bar_data.control) >> 12; + + if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && + !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) + ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, + WLAN_BACK_RECIPIENT, + WLAN_REASON_QSTA_REQUIRE_SETUP); + + tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); + if (!tid_agg_rx) + return RX_DROP_MONITOR; + + start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; + event.u.ba.tid = tid; + event.u.ba.ssn = start_seq_num; + event.u.ba.sta = &rx->sta->sta; + + /* reset session timer */ + if (tid_agg_rx->timeout) + mod_timer(&tid_agg_rx->session_timer, + TU_TO_EXP_TIME(tid_agg_rx->timeout)); + + spin_lock(&tid_agg_rx->reorder_lock); + /* release stored frames up to start of BAR */ + ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, + start_seq_num, frames); + spin_unlock(&tid_agg_rx->reorder_lock); + + drv_event_callback(rx->local, rx->sdata, &event); + + kfree_skb(skb); + return RX_QUEUED; + } + + /* + * After this point, we only want management frames, + * so we can drop all remaining control frames to + * cooked monitor interfaces. + */ + return RX_DROP_MONITOR; +} + +static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *resp; + + if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { + /* Not to own unicast address */ + return; + } + + if (!ether_addr_equal(mgmt->sa, sdata->deflink.u.mgd.bssid) || + !ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid)) { + /* Not from the current AP or not associated yet. */ + return; + } + + if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { + /* Too short SA Query request frame */ + return; + } + + skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); + if (skb == NULL) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom); + resp = skb_put_zero(skb, 24); + memcpy(resp->da, mgmt->sa, ETH_ALEN); + memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); + memcpy(resp->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN); + resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); + resp->u.action.category = WLAN_CATEGORY_SA_QUERY; + resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; + memcpy(resp->u.action.u.sa_query.trans_id, + mgmt->u.action.u.sa_query.trans_id, + WLAN_SA_QUERY_TR_ID_LEN); + + ieee80211_tx_skb(sdata, skb); +} + +static void +ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx) +{ + struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; + const struct element *ie; + size_t baselen; + + if (!wiphy_ext_feature_isset(rx->local->hw.wiphy, + NL80211_EXT_FEATURE_BSS_COLOR)) + return; + + if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION)) + return; + + if (rx->sdata->vif.bss_conf.csa_active) + return; + + baselen = mgmt->u.beacon.variable - rx->skb->data; + if (baselen > rx->skb->len) + return; + + ie = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, + mgmt->u.beacon.variable, + rx->skb->len - baselen); + if (ie && ie->datalen >= sizeof(struct ieee80211_he_operation) && + ie->datalen >= ieee80211_he_oper_size(ie->data + 1)) { + struct ieee80211_bss_conf *bss_conf = &rx->sdata->vif.bss_conf; + const struct ieee80211_he_operation *he_oper; + u8 color; + + he_oper = (void *)(ie->data + 1); + if (le32_get_bits(he_oper->he_oper_params, + IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED)) + return; + + color = le32_get_bits(he_oper->he_oper_params, + IEEE80211_HE_OPERATION_BSS_COLOR_MASK); + if (color == bss_conf->he_bss_color.color) + ieee80211_obss_color_collision_notify(&rx->sdata->vif, + BIT_ULL(color), + GFP_ATOMIC); + } +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + + if (ieee80211_is_s1g_beacon(mgmt->frame_control)) + return RX_CONTINUE; + + /* + * From here on, look only at management frames. + * Data and control frames are already handled, + * and unknown (reserved) frames are useless. + */ + if (rx->skb->len < 24) + return RX_DROP_MONITOR; + + if (!ieee80211_is_mgmt(mgmt->frame_control)) + return RX_DROP_MONITOR; + + /* drop too small action frames */ + if (ieee80211_is_action(mgmt->frame_control) && + rx->skb->len < IEEE80211_MIN_ACTION_SIZE) + return RX_DROP_UNUSABLE; + + if (rx->sdata->vif.type == NL80211_IFTYPE_AP && + ieee80211_is_beacon(mgmt->frame_control) && + !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { + int sig = 0; + + /* sw bss color collision detection */ + ieee80211_rx_check_bss_color_collision(rx); + + if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && + !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) + sig = status->signal; + + cfg80211_report_obss_beacon_khz(rx->local->hw.wiphy, + rx->skb->data, rx->skb->len, + ieee80211_rx_status_to_khz(status), + sig); + rx->flags |= IEEE80211_RX_BEACON_REPORTED; + } + + if (ieee80211_drop_unencrypted_mgmt(rx)) + return RX_DROP_UNUSABLE; + + return RX_CONTINUE; +} + +static bool +ieee80211_process_rx_twt_action(struct ieee80211_rx_data *rx) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)rx->skb->data; + struct ieee80211_sub_if_data *sdata = rx->sdata; + + /* TWT actions are only supported in AP for the moment */ + if (sdata->vif.type != NL80211_IFTYPE_AP) + return false; + + if (!rx->local->ops->add_twt_setup) + return false; + + if (!sdata->vif.bss_conf.twt_responder) + return false; + + if (!rx->sta) + return false; + + switch (mgmt->u.action.u.s1g.action_code) { + case WLAN_S1G_TWT_SETUP: { + struct ieee80211_twt_setup *twt; + + if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + + 1 + /* action code */ + sizeof(struct ieee80211_twt_setup) + + 2 /* TWT req_type agrt */) + break; + + twt = (void *)mgmt->u.action.u.s1g.variable; + if (twt->element_id != WLAN_EID_S1G_TWT) + break; + + if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + + 4 + /* action code + token + tlv */ + twt->length) + break; + + return true; /* queue the frame */ + } + case WLAN_S1G_TWT_TEARDOWN: + if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + 2) + break; + + return true; /* queue the frame */ + default: + break; + } + + return false; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_action(struct ieee80211_rx_data *rx) +{ + struct ieee80211_local *local = rx->local; + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + int len = rx->skb->len; + + if (!ieee80211_is_action(mgmt->frame_control)) + return RX_CONTINUE; + + if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && + mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && + mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) + return RX_DROP_UNUSABLE; + + switch (mgmt->u.action.category) { + case WLAN_CATEGORY_HT: + /* reject HT action frames from stations not supporting HT */ + if (!rx->link_sta->pub->ht_cap.ht_supported) + goto invalid; + + if (sdata->vif.type != NL80211_IFTYPE_STATION && + sdata->vif.type != NL80211_IFTYPE_MESH_POINT && + sdata->vif.type != NL80211_IFTYPE_AP_VLAN && + sdata->vif.type != NL80211_IFTYPE_AP && + sdata->vif.type != NL80211_IFTYPE_ADHOC) + break; + + /* verify action & smps_control/chanwidth are present */ + if (len < IEEE80211_MIN_ACTION_SIZE + 2) + goto invalid; + + switch (mgmt->u.action.u.ht_smps.action) { + case WLAN_HT_ACTION_SMPS: { + struct ieee80211_supported_band *sband; + enum ieee80211_smps_mode smps_mode; + struct sta_opmode_info sta_opmode = {}; + + if (sdata->vif.type != NL80211_IFTYPE_AP && + sdata->vif.type != NL80211_IFTYPE_AP_VLAN) + goto handled; + + /* convert to HT capability */ + switch (mgmt->u.action.u.ht_smps.smps_control) { + case WLAN_HT_SMPS_CONTROL_DISABLED: + smps_mode = IEEE80211_SMPS_OFF; + break; + case WLAN_HT_SMPS_CONTROL_STATIC: + smps_mode = IEEE80211_SMPS_STATIC; + break; + case WLAN_HT_SMPS_CONTROL_DYNAMIC: + smps_mode = IEEE80211_SMPS_DYNAMIC; + break; + default: + goto invalid; + } + + /* if no change do nothing */ + if (rx->link_sta->pub->smps_mode == smps_mode) + goto handled; + rx->link_sta->pub->smps_mode = smps_mode; + sta_opmode.smps_mode = + ieee80211_smps_mode_to_smps_mode(smps_mode); + sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED; + + sband = rx->local->hw.wiphy->bands[status->band]; + + rate_control_rate_update(local, sband, rx->sta, 0, + IEEE80211_RC_SMPS_CHANGED); + cfg80211_sta_opmode_change_notify(sdata->dev, + rx->sta->addr, + &sta_opmode, + GFP_ATOMIC); + goto handled; + } + case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { + struct ieee80211_supported_band *sband; + u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; + enum ieee80211_sta_rx_bandwidth max_bw, new_bw; + struct sta_opmode_info sta_opmode = {}; + + /* If it doesn't support 40 MHz it can't change ... */ + if (!(rx->link_sta->pub->ht_cap.cap & + IEEE80211_HT_CAP_SUP_WIDTH_20_40)) + goto handled; + + if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ) + max_bw = IEEE80211_STA_RX_BW_20; + else + max_bw = ieee80211_sta_cap_rx_bw(rx->link_sta); + + /* set cur_max_bandwidth and recalc sta bw */ + rx->link_sta->cur_max_bandwidth = max_bw; + new_bw = ieee80211_sta_cur_vht_bw(rx->link_sta); + + if (rx->link_sta->pub->bandwidth == new_bw) + goto handled; + + rx->link_sta->pub->bandwidth = new_bw; + sband = rx->local->hw.wiphy->bands[status->band]; + sta_opmode.bw = + ieee80211_sta_rx_bw_to_chan_width(rx->link_sta); + sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED; + + rate_control_rate_update(local, sband, rx->sta, 0, + IEEE80211_RC_BW_CHANGED); + cfg80211_sta_opmode_change_notify(sdata->dev, + rx->sta->addr, + &sta_opmode, + GFP_ATOMIC); + goto handled; + } + default: + goto invalid; + } + + break; + case WLAN_CATEGORY_PUBLIC: + if (len < IEEE80211_MIN_ACTION_SIZE + 1) + goto invalid; + if (sdata->vif.type != NL80211_IFTYPE_STATION) + break; + if (!rx->sta) + break; + if (!ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid)) + break; + if (mgmt->u.action.u.ext_chan_switch.action_code != + WLAN_PUB_ACTION_EXT_CHANSW_ANN) + break; + if (len < offsetof(struct ieee80211_mgmt, + u.action.u.ext_chan_switch.variable)) + goto invalid; + goto queue; + case WLAN_CATEGORY_VHT: + if (sdata->vif.type != NL80211_IFTYPE_STATION && + sdata->vif.type != NL80211_IFTYPE_MESH_POINT && + sdata->vif.type != NL80211_IFTYPE_AP_VLAN && + sdata->vif.type != NL80211_IFTYPE_AP && + sdata->vif.type != NL80211_IFTYPE_ADHOC) + break; + + /* verify action code is present */ + if (len < IEEE80211_MIN_ACTION_SIZE + 1) + goto invalid; + + switch (mgmt->u.action.u.vht_opmode_notif.action_code) { + case WLAN_VHT_ACTION_OPMODE_NOTIF: { + /* verify opmode is present */ + if (len < IEEE80211_MIN_ACTION_SIZE + 2) + goto invalid; + goto queue; + } + case WLAN_VHT_ACTION_GROUPID_MGMT: { + if (len < IEEE80211_MIN_ACTION_SIZE + 25) + goto invalid; + goto queue; + } + default: + break; + } + break; + case WLAN_CATEGORY_BACK: + if (sdata->vif.type != NL80211_IFTYPE_STATION && + sdata->vif.type != NL80211_IFTYPE_MESH_POINT && + sdata->vif.type != NL80211_IFTYPE_AP_VLAN && + sdata->vif.type != NL80211_IFTYPE_AP && + sdata->vif.type != NL80211_IFTYPE_ADHOC) + break; + + /* verify action_code is present */ + if (len < IEEE80211_MIN_ACTION_SIZE + 1) + break; + + switch (mgmt->u.action.u.addba_req.action_code) { + case WLAN_ACTION_ADDBA_REQ: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.addba_req))) + goto invalid; + break; + case WLAN_ACTION_ADDBA_RESP: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.addba_resp))) + goto invalid; + break; + case WLAN_ACTION_DELBA: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.delba))) + goto invalid; + break; + default: + goto invalid; + } + + goto queue; + case WLAN_CATEGORY_SPECTRUM_MGMT: + /* verify action_code is present */ + if (len < IEEE80211_MIN_ACTION_SIZE + 1) + break; + + switch (mgmt->u.action.u.measurement.action_code) { + case WLAN_ACTION_SPCT_MSR_REQ: + if (status->band != NL80211_BAND_5GHZ) + break; + + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.measurement))) + break; + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + break; + + ieee80211_process_measurement_req(sdata, mgmt, len); + goto handled; + case WLAN_ACTION_SPCT_CHL_SWITCH: { + u8 *bssid; + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.chan_switch))) + break; + + if (sdata->vif.type != NL80211_IFTYPE_STATION && + sdata->vif.type != NL80211_IFTYPE_ADHOC && + sdata->vif.type != NL80211_IFTYPE_MESH_POINT) + break; + + if (sdata->vif.type == NL80211_IFTYPE_STATION) + bssid = sdata->deflink.u.mgd.bssid; + else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) + bssid = sdata->u.ibss.bssid; + else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) + bssid = mgmt->sa; + else + break; + + if (!ether_addr_equal(mgmt->bssid, bssid)) + break; + + goto queue; + } + } + break; + case WLAN_CATEGORY_SELF_PROTECTED: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.self_prot.action_code))) + break; + + switch (mgmt->u.action.u.self_prot.action_code) { + case WLAN_SP_MESH_PEERING_OPEN: + case WLAN_SP_MESH_PEERING_CLOSE: + case WLAN_SP_MESH_PEERING_CONFIRM: + if (!ieee80211_vif_is_mesh(&sdata->vif)) + goto invalid; + if (sdata->u.mesh.user_mpm) + /* userspace handles this frame */ + break; + goto queue; + case WLAN_SP_MGK_INFORM: + case WLAN_SP_MGK_ACK: + if (!ieee80211_vif_is_mesh(&sdata->vif)) + goto invalid; + break; + } + break; + case WLAN_CATEGORY_MESH_ACTION: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.mesh_action.action_code))) + break; + + if (!ieee80211_vif_is_mesh(&sdata->vif)) + break; + if (mesh_action_is_path_sel(mgmt) && + !mesh_path_sel_is_hwmp(sdata)) + break; + goto queue; + case WLAN_CATEGORY_S1G: + if (len < offsetofend(typeof(*mgmt), + u.action.u.s1g.action_code)) + break; + + switch (mgmt->u.action.u.s1g.action_code) { + case WLAN_S1G_TWT_SETUP: + case WLAN_S1G_TWT_TEARDOWN: + if (ieee80211_process_rx_twt_action(rx)) + goto queue; + break; + default: + break; + } + break; + } + + return RX_CONTINUE; + + invalid: + status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; + /* will return in the next handlers */ + return RX_CONTINUE; + + handled: + if (rx->sta) + rx->link_sta->rx_stats.packets++; + dev_kfree_skb(rx->skb); + return RX_QUEUED; + + queue: + ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); + return RX_QUEUED; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + struct cfg80211_rx_info info = { + .freq = ieee80211_rx_status_to_khz(status), + .buf = rx->skb->data, + .len = rx->skb->len, + .link_id = rx->link_id, + .have_link_id = rx->link_id >= 0, + }; + + /* skip known-bad action frames and return them in the next handler */ + if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) + return RX_CONTINUE; + + /* + * Getting here means the kernel doesn't know how to handle + * it, but maybe userspace does ... include returned frames + * so userspace can register for those to know whether ones + * it transmitted were processed or returned. + */ + + if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && + !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) + info.sig_dbm = status->signal; + + if (ieee80211_is_timing_measurement(rx->skb) || + ieee80211_is_ftm(rx->skb)) { + info.rx_tstamp = ktime_to_ns(skb_hwtstamps(rx->skb)->hwtstamp); + info.ack_tstamp = ktime_to_ns(status->ack_tx_hwtstamp); + } + + if (cfg80211_rx_mgmt_ext(&rx->sdata->wdev, &info)) { + if (rx->sta) + rx->link_sta->rx_stats.packets++; + dev_kfree_skb(rx->skb); + return RX_QUEUED; + } + + return RX_CONTINUE; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx) +{ + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; + int len = rx->skb->len; + + if (!ieee80211_is_action(mgmt->frame_control)) + return RX_CONTINUE; + + switch (mgmt->u.action.category) { + case WLAN_CATEGORY_SA_QUERY: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.sa_query))) + break; + + switch (mgmt->u.action.u.sa_query.action) { + case WLAN_ACTION_SA_QUERY_REQUEST: + if (sdata->vif.type != NL80211_IFTYPE_STATION) + break; + ieee80211_process_sa_query_req(sdata, mgmt, len); + goto handled; + } + break; + } + + return RX_CONTINUE; + + handled: + if (rx->sta) + rx->link_sta->rx_stats.packets++; + dev_kfree_skb(rx->skb); + return RX_QUEUED; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) +{ + struct ieee80211_local *local = rx->local; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; + struct sk_buff *nskb; + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + + if (!ieee80211_is_action(mgmt->frame_control)) + return RX_CONTINUE; + + /* + * For AP mode, hostapd is responsible for handling any action + * frames that we didn't handle, including returning unknown + * ones. For all other modes we will return them to the sender, + * setting the 0x80 bit in the action category, as required by + * 802.11-2012 9.24.4. + * Newer versions of hostapd shall also use the management frame + * registration mechanisms, but older ones still use cooked + * monitor interfaces so push all frames there. + */ + if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && + (sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) + return RX_DROP_MONITOR; + + if (is_multicast_ether_addr(mgmt->da)) + return RX_DROP_MONITOR; + + /* do not return rejected action frames */ + if (mgmt->u.action.category & 0x80) + return RX_DROP_UNUSABLE; + + nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, + GFP_ATOMIC); + if (nskb) { + struct ieee80211_mgmt *nmgmt = (void *)nskb->data; + + nmgmt->u.action.category |= 0x80; + memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); + memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); + + memset(nskb->cb, 0, sizeof(nskb->cb)); + + if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); + + info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | + IEEE80211_TX_INTFL_OFFCHAN_TX_OK | + IEEE80211_TX_CTL_NO_CCK_RATE; + if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) + info->hw_queue = + local->hw.offchannel_tx_hw_queue; + } + + __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, -1, + status->band); + } + dev_kfree_skb(rx->skb); + return RX_QUEUED; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_ext(struct ieee80211_rx_data *rx) +{ + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct ieee80211_hdr *hdr = (void *)rx->skb->data; + + if (!ieee80211_is_ext(hdr->frame_control)) + return RX_CONTINUE; + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return RX_DROP_MONITOR; + + /* for now only beacons are ext, so queue them */ + ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); + + return RX_QUEUED; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) +{ + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; + __le16 stype; + + stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); + + if (!ieee80211_vif_is_mesh(&sdata->vif) && + sdata->vif.type != NL80211_IFTYPE_ADHOC && + sdata->vif.type != NL80211_IFTYPE_OCB && + sdata->vif.type != NL80211_IFTYPE_STATION) + return RX_DROP_MONITOR; + + switch (stype) { + case cpu_to_le16(IEEE80211_STYPE_AUTH): + case cpu_to_le16(IEEE80211_STYPE_BEACON): + case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): + /* process for all: mesh, mlme, ibss */ + break; + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): + if (is_multicast_ether_addr(mgmt->da) && + !is_broadcast_ether_addr(mgmt->da)) + return RX_DROP_MONITOR; + + /* process only for station/IBSS */ + if (sdata->vif.type != NL80211_IFTYPE_STATION && + sdata->vif.type != NL80211_IFTYPE_ADHOC) + return RX_DROP_MONITOR; + break; + case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): + case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): + case cpu_to_le16(IEEE80211_STYPE_DISASSOC): + if (is_multicast_ether_addr(mgmt->da) && + !is_broadcast_ether_addr(mgmt->da)) + return RX_DROP_MONITOR; + + /* process only for station */ + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return RX_DROP_MONITOR; + break; + case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): + /* process only for ibss and mesh */ + if (sdata->vif.type != NL80211_IFTYPE_ADHOC && + sdata->vif.type != NL80211_IFTYPE_MESH_POINT) + return RX_DROP_MONITOR; + break; + default: + return RX_DROP_MONITOR; + } + + ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); + + return RX_QUEUED; +} + +static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, + struct ieee80211_rate *rate, + ieee80211_rx_result reason) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_local *local = rx->local; + struct sk_buff *skb = rx->skb, *skb2; + struct net_device *prev_dev = NULL; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + int needed_headroom; + + /* + * If cooked monitor has been processed already, then + * don't do it again. If not, set the flag. + */ + if (rx->flags & IEEE80211_RX_CMNTR) + goto out_free_skb; + rx->flags |= IEEE80211_RX_CMNTR; + + /* If there are no cooked monitor interfaces, just free the SKB */ + if (!local->cooked_mntrs) + goto out_free_skb; + + /* room for the radiotap header based on driver features */ + needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb); + + if (skb_headroom(skb) < needed_headroom && + pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) + goto out_free_skb; + + /* prepend radiotap information */ + ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, + false); + + skb_reset_mac_header(skb); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + + if (sdata->vif.type != NL80211_IFTYPE_MONITOR || + !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)) + continue; + + if (prev_dev) { + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2) { + skb2->dev = prev_dev; + netif_receive_skb(skb2); + } + } + + prev_dev = sdata->dev; + dev_sw_netstats_rx_add(sdata->dev, skb->len); + } + + if (prev_dev) { + skb->dev = prev_dev; + netif_receive_skb(skb); + return; + } + + out_free_skb: + kfree_skb_reason(skb, (__force u32)reason); +} + +static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, + ieee80211_rx_result res) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + struct ieee80211_supported_band *sband; + struct ieee80211_rate *rate = NULL; + + if (res == RX_QUEUED) { + I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); + return; + } + + if (res != RX_CONTINUE) { + I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); + if (rx->sta) + rx->link_sta->rx_stats.dropped++; + } + + if (u32_get_bits((__force u32)res, SKB_DROP_REASON_SUBSYS_MASK) == + SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE) { + kfree_skb_reason(rx->skb, (__force u32)res); + return; + } + + sband = rx->local->hw.wiphy->bands[status->band]; + if (status->encoding == RX_ENC_LEGACY) + rate = &sband->bitrates[status->rate_idx]; + + ieee80211_rx_cooked_monitor(rx, rate, res); +} + +static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, + struct sk_buff_head *frames) +{ + ieee80211_rx_result res = RX_DROP_MONITOR; + struct sk_buff *skb; + +#define CALL_RXH(rxh) \ + do { \ + res = rxh(rx); \ + if (res != RX_CONTINUE) \ + goto rxh_next; \ + } while (0) + + /* Lock here to avoid hitting all of the data used in the RX + * path (e.g. key data, station data, ...) concurrently when + * a frame is released from the reorder buffer due to timeout + * from the timer, potentially concurrently with RX from the + * driver. + */ + spin_lock_bh(&rx->local->rx_path_lock); + + while ((skb = __skb_dequeue(frames))) { + /* + * all the other fields are valid across frames + * that belong to an aMPDU since they are on the + * same TID from the same station + */ + rx->skb = skb; + + if (WARN_ON_ONCE(!rx->link)) + goto rxh_next; + + CALL_RXH(ieee80211_rx_h_check_more_data); + CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll); + CALL_RXH(ieee80211_rx_h_sta_process); + CALL_RXH(ieee80211_rx_h_decrypt); + CALL_RXH(ieee80211_rx_h_defragment); + CALL_RXH(ieee80211_rx_h_michael_mic_verify); + /* must be after MMIC verify so header is counted in MPDU mic */ + CALL_RXH(ieee80211_rx_h_amsdu); + CALL_RXH(ieee80211_rx_h_data); + + /* special treatment -- needs the queue */ + res = ieee80211_rx_h_ctrl(rx, frames); + if (res != RX_CONTINUE) + goto rxh_next; + + CALL_RXH(ieee80211_rx_h_mgmt_check); + CALL_RXH(ieee80211_rx_h_action); + CALL_RXH(ieee80211_rx_h_userspace_mgmt); + CALL_RXH(ieee80211_rx_h_action_post_userspace); + CALL_RXH(ieee80211_rx_h_action_return); + CALL_RXH(ieee80211_rx_h_ext); + CALL_RXH(ieee80211_rx_h_mgmt); + + rxh_next: + ieee80211_rx_handlers_result(rx, res); + +#undef CALL_RXH + } + + spin_unlock_bh(&rx->local->rx_path_lock); +} + +static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) +{ + struct sk_buff_head reorder_release; + ieee80211_rx_result res = RX_DROP_MONITOR; + + __skb_queue_head_init(&reorder_release); + +#define CALL_RXH(rxh) \ + do { \ + res = rxh(rx); \ + if (res != RX_CONTINUE) \ + goto rxh_next; \ + } while (0) + + CALL_RXH(ieee80211_rx_h_check_dup); + CALL_RXH(ieee80211_rx_h_check); + + ieee80211_rx_reorder_ampdu(rx, &reorder_release); + + ieee80211_rx_handlers(rx, &reorder_release); + return; + + rxh_next: + ieee80211_rx_handlers_result(rx, res); + +#undef CALL_RXH +} + +static bool +ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id) +{ + return !!(sta->valid_links & BIT(link_id)); +} + +static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx, + u8 link_id) +{ + rx->link_id = link_id; + rx->link = rcu_dereference(rx->sdata->link[link_id]); + + if (!rx->sta) + return rx->link; + + if (!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, link_id)) + return false; + + rx->link_sta = rcu_dereference(rx->sta->link[link_id]); + + return rx->link && rx->link_sta; +} + +static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx, + struct sta_info *sta, int link_id) +{ + rx->link_id = link_id; + rx->sta = sta; + + if (sta) { + rx->local = sta->sdata->local; + if (!rx->sdata) + rx->sdata = sta->sdata; + rx->link_sta = &sta->deflink; + } else { + rx->link_sta = NULL; + } + + if (link_id < 0) + rx->link = &rx->sdata->deflink; + else if (!ieee80211_rx_data_set_link(rx, link_id)) + return false; + + return true; +} + +/* + * This function makes calls into the RX path, therefore + * it has to be invoked under RCU read lock. + */ +void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) +{ + struct sk_buff_head frames; + struct ieee80211_rx_data rx = { + /* This is OK -- must be QoS data frame */ + .security_idx = tid, + .seqno_idx = tid, + }; + struct tid_ampdu_rx *tid_agg_rx; + int link_id = -1; + + /* FIXME: statistics won't be right with this */ + if (sta->sta.valid_links) + link_id = ffs(sta->sta.valid_links) - 1; + + if (!ieee80211_rx_data_set_sta(&rx, sta, link_id)) + return; + + tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); + if (!tid_agg_rx) + return; + + __skb_queue_head_init(&frames); + + spin_lock(&tid_agg_rx->reorder_lock); + ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); + spin_unlock(&tid_agg_rx->reorder_lock); + + if (!skb_queue_empty(&frames)) { + struct ieee80211_event event = { + .type = BA_FRAME_TIMEOUT, + .u.ba.tid = tid, + .u.ba.sta = &sta->sta, + }; + drv_event_callback(rx.local, rx.sdata, &event); + } + + ieee80211_rx_handlers(&rx, &frames); +} + +void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, + u16 ssn, u64 filtered, + u16 received_mpdus) +{ + struct ieee80211_local *local; + struct sta_info *sta; + struct tid_ampdu_rx *tid_agg_rx; + struct sk_buff_head frames; + struct ieee80211_rx_data rx = { + /* This is OK -- must be QoS data frame */ + .security_idx = tid, + .seqno_idx = tid, + }; + int i, diff; + + if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS)) + return; + + __skb_queue_head_init(&frames); + + sta = container_of(pubsta, struct sta_info, sta); + + local = sta->sdata->local; + WARN_ONCE(local->hw.max_rx_aggregation_subframes > 64, + "RX BA marker can't support max_rx_aggregation_subframes %u > 64\n", + local->hw.max_rx_aggregation_subframes); + + if (!ieee80211_rx_data_set_sta(&rx, sta, -1)) + return; + + rcu_read_lock(); + tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); + if (!tid_agg_rx) + goto out; + + spin_lock_bh(&tid_agg_rx->reorder_lock); + + if (received_mpdus >= IEEE80211_SN_MODULO >> 1) { + int release; + + /* release all frames in the reorder buffer */ + release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) % + IEEE80211_SN_MODULO; + ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, + release, &frames); + /* update ssn to match received ssn */ + tid_agg_rx->head_seq_num = ssn; + } else { + ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn, + &frames); + } + + /* handle the case that received ssn is behind the mac ssn. + * it can be tid_agg_rx->buf_size behind and still be valid */ + diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK; + if (diff >= tid_agg_rx->buf_size) { + tid_agg_rx->reorder_buf_filtered = 0; + goto release; + } + filtered = filtered >> diff; + ssn += diff; + + /* update bitmap */ + for (i = 0; i < tid_agg_rx->buf_size; i++) { + int index = (ssn + i) % tid_agg_rx->buf_size; + + tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); + if (filtered & BIT_ULL(i)) + tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index); + } + + /* now process also frames that the filter marking released */ + ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); + +release: + spin_unlock_bh(&tid_agg_rx->reorder_lock); + + ieee80211_rx_handlers(&rx, &frames); + + out: + rcu_read_unlock(); +} +EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames); + +/* main receive path */ + +static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) +{ + return ether_addr_equal(raddr, addr) || + is_broadcast_ether_addr(raddr); +} + +static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) +{ + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct sk_buff *skb = rx->skb; + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); + bool multicast = is_multicast_ether_addr(hdr->addr1) || + ieee80211_is_s1g_beacon(hdr->frame_control); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + if (!bssid && !sdata->u.mgd.use_4addr) + return false; + if (ieee80211_is_first_frag(hdr->seq_ctrl) && + ieee80211_is_robust_mgmt_frame(skb) && !rx->sta) + return false; + if (multicast) + return true; + return ieee80211_is_our_addr(sdata, hdr->addr1, &rx->link_id); + case NL80211_IFTYPE_ADHOC: + if (!bssid) + return false; + if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || + ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) || + !is_valid_ether_addr(hdr->addr2)) + return false; + if (ieee80211_is_beacon(hdr->frame_control)) + return true; + if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) + return false; + if (!multicast && + !ether_addr_equal(sdata->vif.addr, hdr->addr1)) + return false; + if (!rx->sta) { + int rate_idx; + if (status->encoding != RX_ENC_LEGACY) + rate_idx = 0; /* TODO: HT/VHT rates */ + else + rate_idx = status->rate_idx; + ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, + BIT(rate_idx)); + } + return true; + case NL80211_IFTYPE_OCB: + if (!bssid) + return false; + if (!ieee80211_is_data_present(hdr->frame_control)) + return false; + if (!is_broadcast_ether_addr(bssid)) + return false; + if (!multicast && + !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) + return false; + if (!rx->sta) { + int rate_idx; + if (status->encoding != RX_ENC_LEGACY) + rate_idx = 0; /* TODO: HT rates */ + else + rate_idx = status->rate_idx; + ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, + BIT(rate_idx)); + } + return true; + case NL80211_IFTYPE_MESH_POINT: + if (ether_addr_equal(sdata->vif.addr, hdr->addr2)) + return false; + if (multicast) + return true; + return ether_addr_equal(sdata->vif.addr, hdr->addr1); + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_AP: + if (!bssid) + return ieee80211_is_our_addr(sdata, hdr->addr1, + &rx->link_id); + + if (!is_broadcast_ether_addr(bssid) && + !ieee80211_is_our_addr(sdata, bssid, NULL)) { + /* + * Accept public action frames even when the + * BSSID doesn't match, this is used for P2P + * and location updates. Note that mac80211 + * itself never looks at these frames. + */ + if (!multicast && + !ieee80211_is_our_addr(sdata, hdr->addr1, + &rx->link_id)) + return false; + if (ieee80211_is_public_action(hdr, skb->len)) + return true; + return ieee80211_is_beacon(hdr->frame_control); + } + + if (!ieee80211_has_tods(hdr->frame_control)) { + /* ignore data frames to TDLS-peers */ + if (ieee80211_is_data(hdr->frame_control)) + return false; + /* ignore action frames to TDLS-peers */ + if (ieee80211_is_action(hdr->frame_control) && + !is_broadcast_ether_addr(bssid) && + !ether_addr_equal(bssid, hdr->addr1)) + return false; + } + + /* + * 802.11-2016 Table 9-26 says that for data frames, A1 must be + * the BSSID - we've checked that already but may have accepted + * the wildcard (ff:ff:ff:ff:ff:ff). + * + * It also says: + * The BSSID of the Data frame is determined as follows: + * a) If the STA is contained within an AP or is associated + * with an AP, the BSSID is the address currently in use + * by the STA contained in the AP. + * + * So we should not accept data frames with an address that's + * multicast. + * + * Accepting it also opens a security problem because stations + * could encrypt it with the GTK and inject traffic that way. + */ + if (ieee80211_is_data(hdr->frame_control) && multicast) + return false; + + return true; + case NL80211_IFTYPE_P2P_DEVICE: + return ieee80211_is_public_action(hdr, skb->len) || + ieee80211_is_probe_req(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control) || + ieee80211_is_beacon(hdr->frame_control); + case NL80211_IFTYPE_NAN: + /* Currently no frames on NAN interface are allowed */ + return false; + default: + break; + } + + WARN_ON_ONCE(1); + return false; +} + +void ieee80211_check_fast_rx(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_key *key; + struct ieee80211_fast_rx fastrx = { + .dev = sdata->dev, + .vif_type = sdata->vif.type, + .control_port_protocol = sdata->control_port_protocol, + }, *old, *new = NULL; + u32 offload_flags; + bool set_offload = false; + bool assign = false; + bool offload; + + /* use sparse to check that we don't return without updating */ + __acquire(check_fast_rx); + + BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header)); + BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN); + ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header); + ether_addr_copy(fastrx.vif_addr, sdata->vif.addr); + + fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS); + + /* fast-rx doesn't do reordering */ + if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && + !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) + goto clear; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + if (sta->sta.tdls) { + fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); + fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); + fastrx.expected_ds_bits = 0; + } else { + fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); + fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3); + fastrx.expected_ds_bits = + cpu_to_le16(IEEE80211_FCTL_FROMDS); + } + + if (sdata->u.mgd.use_4addr && !sta->sta.tdls) { + fastrx.expected_ds_bits |= + cpu_to_le16(IEEE80211_FCTL_TODS); + fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); + fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); + } + + if (!sdata->u.mgd.powersave) + break; + + /* software powersave is a huge mess, avoid all of it */ + if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) + goto clear; + if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && + !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) + goto clear; + break; + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_AP: + /* parallel-rx requires this, at least with calls to + * ieee80211_sta_ps_transition() + */ + if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) + goto clear; + fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); + fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); + fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS); + + fastrx.internal_forward = + !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && + (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || + !sdata->u.vlan.sta); + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + sdata->u.vlan.sta) { + fastrx.expected_ds_bits |= + cpu_to_le16(IEEE80211_FCTL_FROMDS); + fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); + fastrx.internal_forward = 0; + } + + break; + case NL80211_IFTYPE_MESH_POINT: + fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_FROMDS | + IEEE80211_FCTL_TODS); + fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); + fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); + break; + default: + goto clear; + } + + if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + goto clear; + + rcu_read_lock(); + key = rcu_dereference(sta->ptk[sta->ptk_idx]); + if (!key) + key = rcu_dereference(sdata->default_unicast_key); + if (key) { + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_TKIP: + /* we don't want to deal with MMIC in fast-rx */ + goto clear_rcu; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + break; + default: + /* We also don't want to deal with + * WEP or cipher scheme. + */ + goto clear_rcu; + } + + fastrx.key = true; + fastrx.icv_len = key->conf.icv_len; + } + + assign = true; + clear_rcu: + rcu_read_unlock(); + clear: + __release(check_fast_rx); + + if (assign) + new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL); + + offload_flags = get_bss_sdata(sdata)->vif.offload_flags; + offload = offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED; + + if (assign && offload) + set_offload = !test_and_set_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD); + else + set_offload = test_and_clear_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD); + + if (set_offload) + drv_sta_set_decap_offload(local, sdata, &sta->sta, assign); + + spin_lock_bh(&sta->lock); + old = rcu_dereference_protected(sta->fast_rx, true); + rcu_assign_pointer(sta->fast_rx, new); + spin_unlock_bh(&sta->lock); + + if (old) + kfree_rcu(old, rcu_head); +} + +void ieee80211_clear_fast_rx(struct sta_info *sta) +{ + struct ieee80211_fast_rx *old; + + spin_lock_bh(&sta->lock); + old = rcu_dereference_protected(sta->fast_rx, true); + RCU_INIT_POINTER(sta->fast_rx, NULL); + spin_unlock_bh(&sta->lock); + + if (old) + kfree_rcu(old, rcu_head); +} + +void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + lockdep_assert_held(&local->sta_mtx); + + list_for_each_entry(sta, &local->sta_list, list) { + if (sdata != sta->sdata && + (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) + continue; + ieee80211_check_fast_rx(sta); + } +} + +void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + + mutex_lock(&local->sta_mtx); + __ieee80211_check_fast_rx_iface(sdata); + mutex_unlock(&local->sta_mtx); +} + +static void ieee80211_rx_8023(struct ieee80211_rx_data *rx, + struct ieee80211_fast_rx *fast_rx, + int orig_len) +{ + struct ieee80211_sta_rx_stats *stats; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + struct sta_info *sta = rx->sta; + struct link_sta_info *link_sta; + struct sk_buff *skb = rx->skb; + void *sa = skb->data + ETH_ALEN; + void *da = skb->data; + + if (rx->link_id >= 0) { + link_sta = rcu_dereference(sta->link[rx->link_id]); + if (WARN_ON_ONCE(!link_sta)) { + dev_kfree_skb(rx->skb); + return; + } + } else { + link_sta = &sta->deflink; + } + + stats = &link_sta->rx_stats; + if (fast_rx->uses_rss) + stats = this_cpu_ptr(link_sta->pcpu_rx_stats); + + /* statistics part of ieee80211_rx_h_sta_process() */ + if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { + stats->last_signal = status->signal; + if (!fast_rx->uses_rss) + ewma_signal_add(&link_sta->rx_stats_avg.signal, + -status->signal); + } + + if (status->chains) { + int i; + + stats->chains = status->chains; + for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { + int signal = status->chain_signal[i]; + + if (!(status->chains & BIT(i))) + continue; + + stats->chain_signal_last[i] = signal; + if (!fast_rx->uses_rss) + ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i], + -signal); + } + } + /* end of statistics */ + + stats->last_rx = jiffies; + stats->last_rate = sta_stats_encode_rate(status); + + stats->fragments++; + stats->packets++; + + skb->dev = fast_rx->dev; + + dev_sw_netstats_rx_add(fast_rx->dev, skb->len); + + /* The seqno index has the same property as needed + * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS + * for non-QoS-data frames. Here we know it's a data + * frame, so count MSDUs. + */ + u64_stats_update_begin(&stats->syncp); + stats->msdu[rx->seqno_idx]++; + stats->bytes += orig_len; + u64_stats_update_end(&stats->syncp); + + if (fast_rx->internal_forward) { + struct sk_buff *xmit_skb = NULL; + if (is_multicast_ether_addr(da)) { + xmit_skb = skb_copy(skb, GFP_ATOMIC); + } else if (!ether_addr_equal(da, sa) && + sta_info_get(rx->sdata, da)) { + xmit_skb = skb; + skb = NULL; + } + + if (xmit_skb) { + /* + * Send to wireless media and increase priority by 256 + * to keep the received priority instead of + * reclassifying the frame (see cfg80211_classify8021d). + */ + xmit_skb->priority += 256; + xmit_skb->protocol = htons(ETH_P_802_3); + skb_reset_network_header(xmit_skb); + skb_reset_mac_header(xmit_skb); + dev_queue_xmit(xmit_skb); + } + + if (!skb) + return; + } + + /* deliver to local stack */ + skb->protocol = eth_type_trans(skb, fast_rx->dev); + ieee80211_deliver_skb_to_local_stack(skb, rx); +} + +static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, + struct ieee80211_fast_rx *fast_rx) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + static ieee80211_rx_result res; + int orig_len = skb->len; + int hdrlen = ieee80211_hdrlen(hdr->frame_control); + int snap_offs = hdrlen; + struct { + u8 snap[sizeof(rfc1042_header)]; + __be16 proto; + } *payload __aligned(2); + struct { + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; + } addrs __aligned(2); + struct ieee80211_sta_rx_stats *stats; + + /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write + * to a common data structure; drivers can implement that per queue + * but we don't have that information in mac80211 + */ + if (!(status->flag & RX_FLAG_DUP_VALIDATED)) + return false; + +#define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED) + + /* If using encryption, we also need to have: + * - PN_VALIDATED: similar, but the implementation is tricky + * - DECRYPTED: necessary for PN_VALIDATED + */ + if (fast_rx->key && + (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS) + return false; + + if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) + return false; + + if (unlikely(ieee80211_is_frag(hdr))) + return false; + + /* Since our interface address cannot be multicast, this + * implicitly also rejects multicast frames without the + * explicit check. + * + * We shouldn't get any *data* frames not addressed to us + * (AP mode will accept multicast *management* frames), but + * punting here will make it go through the full checks in + * ieee80211_accept_frame(). + */ + if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1)) + return false; + + if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | + IEEE80211_FCTL_TODS)) != + fast_rx->expected_ds_bits) + return false; + + /* assign the key to drop unencrypted frames (later) + * and strip the IV/MIC if necessary + */ + if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) { + /* GCMP header length is the same */ + snap_offs += IEEE80211_CCMP_HDR_LEN; + } + + if (!ieee80211_vif_is_mesh(&rx->sdata->vif) && + !(status->rx_flags & IEEE80211_RX_AMSDU)) { + if (!pskb_may_pull(skb, snap_offs + sizeof(*payload))) + return false; + + payload = (void *)(skb->data + snap_offs); + + if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr)) + return false; + + /* Don't handle these here since they require special code. + * Accept AARP and IPX even though they should come with a + * bridge-tunnel header - but if we get them this way then + * there's little point in discarding them. + */ + if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) || + payload->proto == fast_rx->control_port_protocol)) + return false; + } + + /* after this point, don't punt to the slowpath! */ + + if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) && + pskb_trim(skb, skb->len - fast_rx->icv_len)) + goto drop; + + if (rx->key && !ieee80211_has_protected(hdr->frame_control)) + goto drop; + + if (status->rx_flags & IEEE80211_RX_AMSDU) { + if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) != + RX_QUEUED) + goto drop; + + return true; + } + + /* do the header conversion - first grab the addresses */ + ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs); + ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs); + if (ieee80211_vif_is_mesh(&rx->sdata->vif)) { + skb_pull(skb, snap_offs - 2); + put_unaligned_be16(skb->len - 2, skb->data); + } else { + skb_postpull_rcsum(skb, skb->data + snap_offs, + sizeof(rfc1042_header) + 2); + + /* remove the SNAP but leave the ethertype */ + skb_pull(skb, snap_offs + sizeof(rfc1042_header)); + } + /* push the addresses in front */ + memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs)); + + res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb); + switch (res) { + case RX_QUEUED: + return true; + case RX_CONTINUE: + break; + default: + goto drop; + } + + ieee80211_rx_8023(rx, fast_rx, orig_len); + + return true; + drop: + dev_kfree_skb(skb); + + if (fast_rx->uses_rss) + stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats); + else + stats = &rx->link_sta->rx_stats; + + stats->dropped++; + return true; +} + +/* + * This function returns whether or not the SKB + * was destined for RX processing or not, which, + * if consume is true, is equivalent to whether + * or not the skb was consumed. + */ +static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, + struct sk_buff *skb, bool consume) +{ + struct ieee80211_local *local = rx->local; + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct ieee80211_hdr *hdr = (void *)skb->data; + struct link_sta_info *link_sta = rx->link_sta; + struct ieee80211_link_data *link = rx->link; + + rx->skb = skb; + + /* See if we can do fast-rx; if we have to copy we already lost, + * so punt in that case. We should never have to deliver a data + * frame to multiple interfaces anyway. + * + * We skip the ieee80211_accept_frame() call and do the necessary + * checking inside ieee80211_invoke_fast_rx(). + */ + if (consume && rx->sta) { + struct ieee80211_fast_rx *fast_rx; + + fast_rx = rcu_dereference(rx->sta->fast_rx); + if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx)) + return true; + } + + if (!ieee80211_accept_frame(rx)) + return false; + + if (!consume) { + struct skb_shared_hwtstamps *shwt; + + rx->skb = skb_copy(skb, GFP_ATOMIC); + if (!rx->skb) { + if (net_ratelimit()) + wiphy_debug(local->hw.wiphy, + "failed to copy skb for %s\n", + sdata->name); + return true; + } + + /* skb_copy() does not copy the hw timestamps, so copy it + * explicitly + */ + shwt = skb_hwtstamps(rx->skb); + shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp; + + /* Update the hdr pointer to the new skb for translation below */ + hdr = (struct ieee80211_hdr *)rx->skb->data; + } + + if (unlikely(rx->sta && rx->sta->sta.mlo) && + is_unicast_ether_addr(hdr->addr1) && + !ieee80211_is_probe_resp(hdr->frame_control) && + !ieee80211_is_beacon(hdr->frame_control)) { + /* translate to MLD addresses */ + if (ether_addr_equal(link->conf->addr, hdr->addr1)) + ether_addr_copy(hdr->addr1, rx->sdata->vif.addr); + if (ether_addr_equal(link_sta->addr, hdr->addr2)) + ether_addr_copy(hdr->addr2, rx->sta->addr); + /* translate A3 only if it's the BSSID */ + if (!ieee80211_has_tods(hdr->frame_control) && + !ieee80211_has_fromds(hdr->frame_control)) { + if (ether_addr_equal(link_sta->addr, hdr->addr3)) + ether_addr_copy(hdr->addr3, rx->sta->addr); + else if (ether_addr_equal(link->conf->addr, hdr->addr3)) + ether_addr_copy(hdr->addr3, rx->sdata->vif.addr); + } + /* not needed for A4 since it can only carry the SA */ + } + + ieee80211_invoke_rx_handlers(rx); + return true; +} + +static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw, + struct ieee80211_sta *pubsta, + struct sk_buff *skb, + struct list_head *list) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_fast_rx *fast_rx; + struct ieee80211_rx_data rx; + struct sta_info *sta; + int link_id = -1; + + memset(&rx, 0, sizeof(rx)); + rx.skb = skb; + rx.local = local; + rx.list = list; + rx.link_id = -1; + + I802_DEBUG_INC(local->dot11ReceivedFragmentCount); + + /* drop frame if too short for header */ + if (skb->len < sizeof(struct ethhdr)) + goto drop; + + if (!pubsta) + goto drop; + + if (status->link_valid) + link_id = status->link_id; + + /* + * TODO: Should the frame be dropped if the right link_id is not + * available? Or may be it is fine in the current form to proceed with + * the frame processing because with frame being in 802.3 format, + * link_id is used only for stats purpose and updating the stats on + * the deflink is fine? + */ + sta = container_of(pubsta, struct sta_info, sta); + if (!ieee80211_rx_data_set_sta(&rx, sta, link_id)) + goto drop; + + fast_rx = rcu_dereference(rx.sta->fast_rx); + if (!fast_rx) + goto drop; + + ieee80211_rx_8023(&rx, fast_rx, skb->len); + return; + +drop: + dev_kfree_skb(skb); +} + +static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx, + struct sk_buff *skb, bool consume) +{ + struct link_sta_info *link_sta; + struct ieee80211_hdr *hdr = (void *)skb->data; + struct sta_info *sta; + int link_id = -1; + + /* + * Look up link station first, in case there's a + * chance that they might have a link address that + * is identical to the MLD address, that way we'll + * have the link information if needed. + */ + link_sta = link_sta_info_get_bss(rx->sdata, hdr->addr2); + if (link_sta) { + sta = link_sta->sta; + link_id = link_sta->link_id; + } else { + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + + sta = sta_info_get_bss(rx->sdata, hdr->addr2); + if (status->link_valid) + link_id = status->link_id; + } + + if (!ieee80211_rx_data_set_sta(rx, sta, link_id)) + return false; + + return ieee80211_prepare_and_rx_handle(rx, skb, consume); +} + +/* + * This is the actual Rx frames handler. as it belongs to Rx path it must + * be called with rcu_read_lock protection. + */ +static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, + struct ieee80211_sta *pubsta, + struct sk_buff *skb, + struct list_head *list) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_sub_if_data *sdata; + struct ieee80211_hdr *hdr; + __le16 fc; + struct ieee80211_rx_data rx; + struct ieee80211_sub_if_data *prev; + struct rhlist_head *tmp; + int err = 0; + + fc = ((struct ieee80211_hdr *)skb->data)->frame_control; + memset(&rx, 0, sizeof(rx)); + rx.skb = skb; + rx.local = local; + rx.list = list; + rx.link_id = -1; + + if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) + I802_DEBUG_INC(local->dot11ReceivedFragmentCount); + + if (ieee80211_is_mgmt(fc)) { + /* drop frame if too short for header */ + if (skb->len < ieee80211_hdrlen(fc)) + err = -ENOBUFS; + else + err = skb_linearize(skb); + } else { + err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); + } + + if (err) { + dev_kfree_skb(skb); + return; + } + + hdr = (struct ieee80211_hdr *)skb->data; + ieee80211_parse_qos(&rx); + ieee80211_verify_alignment(&rx); + + if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || + ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_s1g_beacon(hdr->frame_control))) + ieee80211_scan_rx(local, skb); + + if (ieee80211_is_data(fc)) { + struct sta_info *sta, *prev_sta; + int link_id = -1; + + if (status->link_valid) + link_id = status->link_id; + + if (pubsta) { + sta = container_of(pubsta, struct sta_info, sta); + if (!ieee80211_rx_data_set_sta(&rx, sta, link_id)) + goto out; + + /* + * In MLO connection, fetch the link_id using addr2 + * when the driver does not pass link_id in status. + * When the address translation is already performed by + * driver/hw, the valid link_id must be passed in + * status. + */ + + if (!status->link_valid && pubsta->mlo) { + struct ieee80211_hdr *hdr = (void *)skb->data; + struct link_sta_info *link_sta; + + link_sta = link_sta_info_get_bss(rx.sdata, + hdr->addr2); + if (!link_sta) + goto out; + + ieee80211_rx_data_set_link(&rx, link_sta->link_id); + } + + if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) + return; + goto out; + } + + prev_sta = NULL; + + for_each_sta_info(local, hdr->addr2, sta, tmp) { + if (!prev_sta) { + prev_sta = sta; + continue; + } + + rx.sdata = prev_sta->sdata; + if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id)) + goto out; + + if (!status->link_valid && prev_sta->sta.mlo) + continue; + + ieee80211_prepare_and_rx_handle(&rx, skb, false); + + prev_sta = sta; + } + + if (prev_sta) { + rx.sdata = prev_sta->sdata; + if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id)) + goto out; + + if (!status->link_valid && prev_sta->sta.mlo) + goto out; + + if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) + return; + goto out; + } + } + + prev = NULL; + + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + + if (sdata->vif.type == NL80211_IFTYPE_MONITOR || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + continue; + + /* + * frame is destined for this interface, but if it's + * not also for the previous one we handle that after + * the loop to avoid copying the SKB once too much + */ + + if (!prev) { + prev = sdata; + continue; + } + + rx.sdata = prev; + ieee80211_rx_for_interface(&rx, skb, false); + + prev = sdata; + } + + if (prev) { + rx.sdata = prev; + + if (ieee80211_rx_for_interface(&rx, skb, true)) + return; + } + + out: + dev_kfree_skb(skb); +} + +/* + * This is the receive path handler. It is called by a low level driver when an + * 802.11 MPDU is received from the hardware. + */ +void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, + struct sk_buff *skb, struct list_head *list) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_rate *rate = NULL; + struct ieee80211_supported_band *sband; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + WARN_ON_ONCE(softirq_count() == 0); + + if (WARN_ON(status->band >= NUM_NL80211_BANDS)) + goto drop; + + sband = local->hw.wiphy->bands[status->band]; + if (WARN_ON(!sband)) + goto drop; + + /* + * If we're suspending, it is possible although not too likely + * that we'd be receiving frames after having already partially + * quiesced the stack. We can't process such frames then since + * that might, for example, cause stations to be added or other + * driver callbacks be invoked. + */ + if (unlikely(local->quiescing || local->suspended)) + goto drop; + + /* We might be during a HW reconfig, prevent Rx for the same reason */ + if (unlikely(local->in_reconfig)) + goto drop; + + /* + * The same happens when we're not even started, + * but that's worth a warning. + */ + if (WARN_ON(!local->started)) + goto drop; + + if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { + /* + * Validate the rate, unless a PLCP error means that + * we probably can't have a valid rate here anyway. + */ + + switch (status->encoding) { + case RX_ENC_HT: + /* + * rate_idx is MCS index, which can be [0-76] + * as documented on: + * + * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n + * + * Anything else would be some sort of driver or + * hardware error. The driver should catch hardware + * errors. + */ + if (WARN(status->rate_idx > 76, + "Rate marked as an HT rate but passed " + "status->rate_idx is not " + "an MCS index [0-76]: %d (0x%02x)\n", + status->rate_idx, + status->rate_idx)) + goto drop; + break; + case RX_ENC_VHT: + if (WARN_ONCE(status->rate_idx > 11 || + !status->nss || + status->nss > 8, + "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", + status->rate_idx, status->nss)) + goto drop; + break; + case RX_ENC_HE: + if (WARN_ONCE(status->rate_idx > 11 || + !status->nss || + status->nss > 8, + "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n", + status->rate_idx, status->nss)) + goto drop; + break; + case RX_ENC_EHT: + if (WARN_ONCE(status->rate_idx > 15 || + !status->nss || + status->nss > 8 || + status->eht.gi > NL80211_RATE_INFO_EHT_GI_3_2, + "Rate marked as an EHT rate but data is invalid: MCS:%d, NSS:%d, GI:%d\n", + status->rate_idx, status->nss, status->eht.gi)) + goto drop; + break; + default: + WARN_ON_ONCE(1); + fallthrough; + case RX_ENC_LEGACY: + if (WARN_ON(status->rate_idx >= sband->n_bitrates)) + goto drop; + rate = &sband->bitrates[status->rate_idx]; + } + } + + if (WARN_ON_ONCE(status->link_id >= IEEE80211_LINK_UNSPECIFIED)) + goto drop; + + status->rx_flags = 0; + + kcov_remote_start_common(skb_get_kcov_handle(skb)); + + /* + * Frames with failed FCS/PLCP checksum are not returned, + * all other frames are returned without radiotap header + * if it was previously present. + * Also, frames with less than 16 bytes are dropped. + */ + if (!(status->flag & RX_FLAG_8023)) + skb = ieee80211_rx_monitor(local, skb, rate); + if (skb) { + if ((status->flag & RX_FLAG_8023) || + ieee80211_is_data_present(hdr->frame_control)) + ieee80211_tpt_led_trig_rx(local, skb->len); + + if (status->flag & RX_FLAG_8023) + __ieee80211_rx_handle_8023(hw, pubsta, skb, list); + else + __ieee80211_rx_handle_packet(hw, pubsta, skb, list); + } + + kcov_remote_stop(); + return; + drop: + kfree_skb(skb); +} +EXPORT_SYMBOL(ieee80211_rx_list); + +void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, + struct sk_buff *skb, struct napi_struct *napi) +{ + struct sk_buff *tmp; + LIST_HEAD(list); + + + /* + * key references and virtual interfaces are protected using RCU + * and this requires that we are in a read-side RCU section during + * receive processing + */ + rcu_read_lock(); + ieee80211_rx_list(hw, pubsta, skb, &list); + rcu_read_unlock(); + + if (!napi) { + netif_receive_skb_list(&list); + return; + } + + list_for_each_entry_safe(skb, tmp, &list, list) { + skb_list_del_init(skb); + napi_gro_receive(napi, skb); + } +} +EXPORT_SYMBOL(ieee80211_rx_napi); + +/* This is a version of the rx handler that can be called from hard irq + * context. Post the skb on the queue and schedule the tasklet */ +void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ieee80211_local *local = hw_to_local(hw); + + BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); + + skb->pkt_type = IEEE80211_RX_MSG; + skb_queue_tail(&local->skb_queue, skb); + tasklet_schedule(&local->tasklet); +} +EXPORT_SYMBOL(ieee80211_rx_irqsafe); diff --git a/net/mac80211/s1g.c b/net/mac80211/s1g.c new file mode 100644 index 0000000000..c1f964e999 --- /dev/null +++ b/net/mac80211/s1g.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * S1G handling + * Copyright(c) 2020 Adapt-IP + */ +#include <linux/ieee80211.h> +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "driver-ops.h" + +void ieee80211_s1g_sta_rate_init(struct sta_info *sta) +{ + /* avoid indicating legacy bitrates for S1G STAs */ + sta->deflink.tx_stats.last_rate.flags |= IEEE80211_TX_RC_S1G_MCS; + sta->deflink.rx_stats.last_rate = + STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_S1G); +} + +bool ieee80211_s1g_is_twt_setup(struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + + if (likely(!ieee80211_is_action(mgmt->frame_control))) + return false; + + if (likely(mgmt->u.action.category != WLAN_CATEGORY_S1G)) + return false; + + return mgmt->u.action.u.s1g.action_code == WLAN_S1G_TWT_SETUP; +} + +static void +ieee80211_s1g_send_twt_setup(struct ieee80211_sub_if_data *sdata, const u8 *da, + const u8 *bssid, struct ieee80211_twt_setup *twt) +{ + int len = IEEE80211_MIN_ACTION_SIZE + 4 + twt->length; + struct ieee80211_local *local = sdata->local; + struct ieee80211_mgmt *mgmt; + struct sk_buff *skb; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + len); + if (!skb) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom); + mgmt = skb_put_zero(skb, len); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, bssid, ETH_ALEN); + + mgmt->u.action.category = WLAN_CATEGORY_S1G; + mgmt->u.action.u.s1g.action_code = WLAN_S1G_TWT_SETUP; + memcpy(mgmt->u.action.u.s1g.variable, twt, 3 + twt->length); + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | + IEEE80211_TX_INTFL_MLME_CONN_TX | + IEEE80211_TX_CTL_REQ_TX_STATUS; + ieee80211_tx_skb(sdata, skb); +} + +static void +ieee80211_s1g_send_twt_teardown(struct ieee80211_sub_if_data *sdata, + const u8 *da, const u8 *bssid, u8 flowid) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_mgmt *mgmt; + struct sk_buff *skb; + u8 *id; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + + IEEE80211_MIN_ACTION_SIZE + 2); + if (!skb) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom); + mgmt = skb_put_zero(skb, IEEE80211_MIN_ACTION_SIZE + 2); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, bssid, ETH_ALEN); + + mgmt->u.action.category = WLAN_CATEGORY_S1G; + mgmt->u.action.u.s1g.action_code = WLAN_S1G_TWT_TEARDOWN; + id = (u8 *)mgmt->u.action.u.s1g.variable; + *id = flowid; + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | + IEEE80211_TX_CTL_REQ_TX_STATUS; + ieee80211_tx_skb(sdata, skb); +} + +static void +ieee80211_s1g_rx_twt_setup(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + struct ieee80211_twt_setup *twt = (void *)mgmt->u.action.u.s1g.variable; + struct ieee80211_twt_params *twt_agrt = (void *)twt->params; + + twt_agrt->req_type &= cpu_to_le16(~IEEE80211_TWT_REQTYPE_REQUEST); + + /* broadcast TWT not supported yet */ + if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) { + twt_agrt->req_type &= + ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD); + twt_agrt->req_type |= + le16_encode_bits(TWT_SETUP_CMD_REJECT, + IEEE80211_TWT_REQTYPE_SETUP_CMD); + goto out; + } + + /* TWT Information not supported yet */ + twt->control |= IEEE80211_TWT_CONTROL_RX_DISABLED; + + drv_add_twt_setup(sdata->local, sdata, &sta->sta, twt); +out: + ieee80211_s1g_send_twt_setup(sdata, mgmt->sa, sdata->vif.addr, twt); +} + +static void +ieee80211_s1g_rx_twt_teardown(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + + drv_twt_teardown_request(sdata->local, sdata, &sta->sta, + mgmt->u.action.u.s1g.variable[0]); +} + +static void +ieee80211_s1g_tx_twt_setup_fail(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct ieee80211_twt_setup *twt = (void *)mgmt->u.action.u.s1g.variable; + struct ieee80211_twt_params *twt_agrt = (void *)twt->params; + u8 flowid = le16_get_bits(twt_agrt->req_type, + IEEE80211_TWT_REQTYPE_FLOWID); + + drv_twt_teardown_request(sdata->local, sdata, &sta->sta, flowid); + + ieee80211_s1g_send_twt_teardown(sdata, mgmt->sa, sdata->vif.addr, + flowid); +} + +void ieee80211_s1g_rx_twt_action(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + mutex_lock(&local->sta_mtx); + + sta = sta_info_get_bss(sdata, mgmt->sa); + if (!sta) + goto out; + + switch (mgmt->u.action.u.s1g.action_code) { + case WLAN_S1G_TWT_SETUP: + ieee80211_s1g_rx_twt_setup(sdata, sta, skb); + break; + case WLAN_S1G_TWT_TEARDOWN: + ieee80211_s1g_rx_twt_teardown(sdata, sta, skb); + break; + default: + break; + } + +out: + mutex_unlock(&local->sta_mtx); +} + +void ieee80211_s1g_status_twt_action(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + mutex_lock(&local->sta_mtx); + + sta = sta_info_get_bss(sdata, mgmt->da); + if (!sta) + goto out; + + switch (mgmt->u.action.u.s1g.action_code) { + case WLAN_S1G_TWT_SETUP: + /* process failed twt setup frames */ + ieee80211_s1g_tx_twt_setup_fail(sdata, sta, skb); + break; + default: + break; + } + +out: + mutex_unlock(&local->sta_mtx); +} diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c new file mode 100644 index 0000000000..68ec2124c3 --- /dev/null +++ b/net/mac80211/scan.c @@ -0,0 +1,1454 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Scanning implementation + * + * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> + * Copyright 2004, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2007, Michael Wu <flamingice@sourmilk.net> + * Copyright 2013-2015 Intel Mobile Communications GmbH + * Copyright 2016-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2023 Intel Corporation + */ + +#include <linux/if_arp.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <net/sch_generic.h> +#include <linux/slab.h> +#include <linux/export.h> +#include <linux/random.h> +#include <net/mac80211.h> + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "mesh.h" + +#define IEEE80211_PROBE_DELAY (HZ / 33) +#define IEEE80211_CHANNEL_TIME (HZ / 33) +#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 9) + +void ieee80211_rx_bss_put(struct ieee80211_local *local, + struct ieee80211_bss *bss) +{ + if (!bss) + return; + cfg80211_put_bss(local->hw.wiphy, + container_of((void *)bss, struct cfg80211_bss, priv)); +} + +static bool is_uapsd_supported(struct ieee802_11_elems *elems) +{ + u8 qos_info; + + if (elems->wmm_info && elems->wmm_info_len == 7 + && elems->wmm_info[5] == 1) + qos_info = elems->wmm_info[6]; + else if (elems->wmm_param && elems->wmm_param_len == 24 + && elems->wmm_param[5] == 1) + qos_info = elems->wmm_param[6]; + else + /* no valid wmm information or parameter element found */ + return false; + + return qos_info & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD; +} + +struct inform_bss_update_data { + struct ieee80211_rx_status *rx_status; + bool beacon; +}; + +void ieee80211_inform_bss(struct wiphy *wiphy, + struct cfg80211_bss *cbss, + const struct cfg80211_bss_ies *ies, + void *data) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct inform_bss_update_data *update_data = data; + struct ieee80211_bss *bss = (void *)cbss->priv; + struct ieee80211_rx_status *rx_status; + struct ieee802_11_elems *elems; + int clen, srlen; + + /* This happens while joining an IBSS */ + if (!update_data) + return; + + elems = ieee802_11_parse_elems(ies->data, ies->len, false, NULL); + if (!elems) + return; + + rx_status = update_data->rx_status; + + if (update_data->beacon) + bss->device_ts_beacon = rx_status->device_timestamp; + else + bss->device_ts_presp = rx_status->device_timestamp; + + if (elems->parse_error) { + if (update_data->beacon) + bss->corrupt_data |= IEEE80211_BSS_CORRUPT_BEACON; + else + bss->corrupt_data |= IEEE80211_BSS_CORRUPT_PROBE_RESP; + } else { + if (update_data->beacon) + bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_BEACON; + else + bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_PROBE_RESP; + } + + /* save the ERP value so that it is available at association time */ + if (elems->erp_info && (!elems->parse_error || + !(bss->valid_data & IEEE80211_BSS_VALID_ERP))) { + bss->erp_value = elems->erp_info[0]; + bss->has_erp_value = true; + if (!elems->parse_error) + bss->valid_data |= IEEE80211_BSS_VALID_ERP; + } + + /* replace old supported rates if we get new values */ + if (!elems->parse_error || + !(bss->valid_data & IEEE80211_BSS_VALID_RATES)) { + srlen = 0; + if (elems->supp_rates) { + clen = IEEE80211_MAX_SUPP_RATES; + if (clen > elems->supp_rates_len) + clen = elems->supp_rates_len; + memcpy(bss->supp_rates, elems->supp_rates, clen); + srlen += clen; + } + if (elems->ext_supp_rates) { + clen = IEEE80211_MAX_SUPP_RATES - srlen; + if (clen > elems->ext_supp_rates_len) + clen = elems->ext_supp_rates_len; + memcpy(bss->supp_rates + srlen, elems->ext_supp_rates, + clen); + srlen += clen; + } + if (srlen) { + bss->supp_rates_len = srlen; + if (!elems->parse_error) + bss->valid_data |= IEEE80211_BSS_VALID_RATES; + } + } + + if (!elems->parse_error || + !(bss->valid_data & IEEE80211_BSS_VALID_WMM)) { + bss->wmm_used = elems->wmm_param || elems->wmm_info; + bss->uapsd_supported = is_uapsd_supported(elems); + if (!elems->parse_error) + bss->valid_data |= IEEE80211_BSS_VALID_WMM; + } + + if (update_data->beacon) { + struct ieee80211_supported_band *sband = + local->hw.wiphy->bands[rx_status->band]; + if (!(rx_status->encoding == RX_ENC_HT) && + !(rx_status->encoding == RX_ENC_VHT)) + bss->beacon_rate = + &sband->bitrates[rx_status->rate_idx]; + } + + if (elems->vht_cap_elem) + bss->vht_cap_info = + le32_to_cpu(elems->vht_cap_elem->vht_cap_info); + else + bss->vht_cap_info = 0; + + kfree(elems); +} + +struct ieee80211_bss * +ieee80211_bss_info_update(struct ieee80211_local *local, + struct ieee80211_rx_status *rx_status, + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee80211_channel *channel) +{ + bool beacon = ieee80211_is_beacon(mgmt->frame_control) || + ieee80211_is_s1g_beacon(mgmt->frame_control); + struct cfg80211_bss *cbss; + struct inform_bss_update_data update_data = { + .rx_status = rx_status, + .beacon = beacon, + }; + struct cfg80211_inform_bss bss_meta = { + .boottime_ns = rx_status->boottime_ns, + .drv_data = (void *)&update_data, + }; + bool signal_valid; + struct ieee80211_sub_if_data *scan_sdata; + + if (rx_status->flag & RX_FLAG_NO_SIGNAL_VAL) + bss_meta.signal = 0; /* invalid signal indication */ + else if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) + bss_meta.signal = rx_status->signal * 100; + else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC)) + bss_meta.signal = (rx_status->signal * 100) / local->hw.max_signal; + + bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_20; + if (rx_status->bw == RATE_INFO_BW_5) + bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_5; + else if (rx_status->bw == RATE_INFO_BW_10) + bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_10; + + bss_meta.chan = channel; + + rcu_read_lock(); + scan_sdata = rcu_dereference(local->scan_sdata); + if (scan_sdata && scan_sdata->vif.type == NL80211_IFTYPE_STATION && + scan_sdata->vif.cfg.assoc && + ieee80211_have_rx_timestamp(rx_status)) { + bss_meta.parent_tsf = + ieee80211_calculate_rx_timestamp(local, rx_status, + len + FCS_LEN, 24); + ether_addr_copy(bss_meta.parent_bssid, + scan_sdata->vif.bss_conf.bssid); + } + rcu_read_unlock(); + + cbss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta, + mgmt, len, GFP_ATOMIC); + if (!cbss) + return NULL; + + /* In case the signal is invalid update the status */ + signal_valid = channel == cbss->channel; + if (!signal_valid) + rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; + + return (void *)cbss->priv; +} + +static bool ieee80211_scan_accept_presp(struct ieee80211_sub_if_data *sdata, + u32 scan_flags, const u8 *da) +{ + if (!sdata) + return false; + /* accept broadcast for OCE */ + if (scan_flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP && + is_broadcast_ether_addr(da)) + return true; + if (scan_flags & NL80211_SCAN_FLAG_RANDOM_ADDR) + return true; + return ether_addr_equal(da, sdata->vif.addr); +} + +void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_sub_if_data *sdata1, *sdata2; + struct ieee80211_mgmt *mgmt = (void *)skb->data; + struct ieee80211_bss *bss; + struct ieee80211_channel *channel; + size_t min_hdr_len = offsetof(struct ieee80211_mgmt, + u.probe_resp.variable); + + if (!ieee80211_is_probe_resp(mgmt->frame_control) && + !ieee80211_is_beacon(mgmt->frame_control) && + !ieee80211_is_s1g_beacon(mgmt->frame_control)) + return; + + if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { + if (ieee80211_is_s1g_short_beacon(mgmt->frame_control)) + min_hdr_len = offsetof(struct ieee80211_ext, + u.s1g_short_beacon.variable); + else + min_hdr_len = offsetof(struct ieee80211_ext, + u.s1g_beacon); + } + + if (skb->len < min_hdr_len) + return; + + sdata1 = rcu_dereference(local->scan_sdata); + sdata2 = rcu_dereference(local->sched_scan_sdata); + + if (likely(!sdata1 && !sdata2)) + return; + + if (test_and_clear_bit(SCAN_BEACON_WAIT, &local->scanning)) { + /* + * we were passive scanning because of radar/no-IR, but + * the beacon/proberesp rx gives us an opportunity to upgrade + * to active scan + */ + set_bit(SCAN_BEACON_DONE, &local->scanning); + wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0); + } + + if (ieee80211_is_probe_resp(mgmt->frame_control)) { + struct cfg80211_scan_request *scan_req; + struct cfg80211_sched_scan_request *sched_scan_req; + u32 scan_req_flags = 0, sched_scan_req_flags = 0; + + scan_req = rcu_dereference(local->scan_req); + sched_scan_req = rcu_dereference(local->sched_scan_req); + + if (scan_req) + scan_req_flags = scan_req->flags; + + if (sched_scan_req) + sched_scan_req_flags = sched_scan_req->flags; + + /* ignore ProbeResp to foreign address or non-bcast (OCE) + * unless scanning with randomised address + */ + if (!ieee80211_scan_accept_presp(sdata1, scan_req_flags, + mgmt->da) && + !ieee80211_scan_accept_presp(sdata2, sched_scan_req_flags, + mgmt->da)) + return; + } + + channel = ieee80211_get_channel_khz(local->hw.wiphy, + ieee80211_rx_status_to_khz(rx_status)); + + if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) + return; + + bss = ieee80211_bss_info_update(local, rx_status, + mgmt, skb->len, + channel); + if (bss) + ieee80211_rx_bss_put(local, bss); +} + +static void +ieee80211_prepare_scan_chandef(struct cfg80211_chan_def *chandef, + enum nl80211_bss_scan_width scan_width) +{ + memset(chandef, 0, sizeof(*chandef)); + switch (scan_width) { + case NL80211_BSS_CHAN_WIDTH_5: + chandef->width = NL80211_CHAN_WIDTH_5; + break; + case NL80211_BSS_CHAN_WIDTH_10: + chandef->width = NL80211_CHAN_WIDTH_10; + break; + default: + chandef->width = NL80211_CHAN_WIDTH_20_NOHT; + break; + } +} + +/* return false if no more work */ +static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct cfg80211_scan_request *req; + struct cfg80211_chan_def chandef; + u8 bands_used = 0; + int i, ielen, n_chans; + u32 flags = 0; + + req = rcu_dereference_protected(local->scan_req, + lockdep_is_held(&local->mtx)); + + if (test_bit(SCAN_HW_CANCELLED, &local->scanning)) + return false; + + if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) { + for (i = 0; i < req->n_channels; i++) { + local->hw_scan_req->req.channels[i] = req->channels[i]; + bands_used |= BIT(req->channels[i]->band); + } + + n_chans = req->n_channels; + } else { + do { + if (local->hw_scan_band == NUM_NL80211_BANDS) + return false; + + n_chans = 0; + + for (i = 0; i < req->n_channels; i++) { + if (req->channels[i]->band != + local->hw_scan_band) + continue; + local->hw_scan_req->req.channels[n_chans] = + req->channels[i]; + n_chans++; + bands_used |= BIT(req->channels[i]->band); + } + + local->hw_scan_band++; + } while (!n_chans); + } + + local->hw_scan_req->req.n_channels = n_chans; + ieee80211_prepare_scan_chandef(&chandef, req->scan_width); + + if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT) + flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT; + + ielen = ieee80211_build_preq_ies(sdata, + (u8 *)local->hw_scan_req->req.ie, + local->hw_scan_ies_bufsize, + &local->hw_scan_req->ies, + req->ie, req->ie_len, + bands_used, req->rates, &chandef, + flags); + local->hw_scan_req->req.ie_len = ielen; + local->hw_scan_req->req.no_cck = req->no_cck; + ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr); + ether_addr_copy(local->hw_scan_req->req.mac_addr_mask, + req->mac_addr_mask); + ether_addr_copy(local->hw_scan_req->req.bssid, req->bssid); + + return true; +} + +static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) +{ + struct ieee80211_local *local = hw_to_local(hw); + bool hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning); + bool was_scanning = local->scanning; + struct cfg80211_scan_request *scan_req; + struct ieee80211_sub_if_data *scan_sdata; + struct ieee80211_sub_if_data *sdata; + + lockdep_assert_held(&local->mtx); + + /* + * It's ok to abort a not-yet-running scan (that + * we have one at all will be verified by checking + * local->scan_req next), but not to complete it + * successfully. + */ + if (WARN_ON(!local->scanning && !aborted)) + aborted = true; + + if (WARN_ON(!local->scan_req)) + return; + + scan_sdata = rcu_dereference_protected(local->scan_sdata, + lockdep_is_held(&local->mtx)); + + if (hw_scan && !aborted && + !ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS) && + ieee80211_prep_hw_scan(scan_sdata)) { + int rc; + + rc = drv_hw_scan(local, + rcu_dereference_protected(local->scan_sdata, + lockdep_is_held(&local->mtx)), + local->hw_scan_req); + + if (rc == 0) + return; + + /* HW scan failed and is going to be reported as aborted, + * so clear old scan info. + */ + memset(&local->scan_info, 0, sizeof(local->scan_info)); + aborted = true; + } + + kfree(local->hw_scan_req); + local->hw_scan_req = NULL; + + scan_req = rcu_dereference_protected(local->scan_req, + lockdep_is_held(&local->mtx)); + + RCU_INIT_POINTER(local->scan_req, NULL); + RCU_INIT_POINTER(local->scan_sdata, NULL); + + local->scanning = 0; + local->scan_chandef.chan = NULL; + + synchronize_rcu(); + + if (scan_req != local->int_scan_req) { + local->scan_info.aborted = aborted; + cfg80211_scan_done(scan_req, &local->scan_info); + } + + /* Set power back to normal operating levels. */ + ieee80211_hw_config(local, 0); + + if (!hw_scan && was_scanning) { + ieee80211_configure_filter(local); + drv_sw_scan_complete(local, scan_sdata); + ieee80211_offchannel_return(local); + } + + ieee80211_recalc_idle(local); + + ieee80211_mlme_notify_scan_completed(local); + ieee80211_ibss_notify_scan_completed(local); + + /* Requeue all the work that might have been ignored while + * the scan was in progress; if there was none this will + * just be a no-op for the particular interface. + */ + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (ieee80211_sdata_running(sdata)) + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); + } + + if (was_scanning) + ieee80211_start_next_roc(local); +} + +void ieee80211_scan_completed(struct ieee80211_hw *hw, + struct cfg80211_scan_info *info) +{ + struct ieee80211_local *local = hw_to_local(hw); + + trace_api_scan_completed(local, info->aborted); + + set_bit(SCAN_COMPLETED, &local->scanning); + if (info->aborted) + set_bit(SCAN_ABORTED, &local->scanning); + + memcpy(&local->scan_info, info, sizeof(*info)); + + wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0); +} +EXPORT_SYMBOL(ieee80211_scan_completed); + +static int ieee80211_start_sw_scan(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + /* Software scan is not supported in multi-channel cases */ + if (local->use_chanctx) + return -EOPNOTSUPP; + + /* + * Hardware/driver doesn't support hw_scan, so use software + * scanning instead. First send a nullfunc frame with power save + * bit on so that AP will buffer the frames for us while we are not + * listening, then send probe requests to each channel and wait for + * the responses. After all channels are scanned, tune back to the + * original channel and send a nullfunc frame with power save bit + * off to trigger the AP to send us all the buffered frames. + * + * Note that while local->sw_scanning is true everything else but + * nullfunc frames and probe requests will be dropped in + * ieee80211_tx_h_check_assoc(). + */ + drv_sw_scan_start(local, sdata, local->scan_addr); + + local->leave_oper_channel_time = jiffies; + local->next_scan_state = SCAN_DECISION; + local->scan_channel_idx = 0; + + ieee80211_offchannel_stop_vifs(local); + + /* ensure nullfunc is transmitted before leaving operating channel */ + ieee80211_flush_queues(local, NULL, false); + + ieee80211_configure_filter(local); + + /* We need to set power level at maximum rate for scanning. */ + ieee80211_hw_config(local, 0); + + wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0); + + return 0; +} + +static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_sub_if_data *sdata_iter; + + if (!ieee80211_is_radar_required(local)) + return true; + + if (!regulatory_pre_cac_allowed(local->hw.wiphy)) + return false; + + mutex_lock(&local->iflist_mtx); + list_for_each_entry(sdata_iter, &local->interfaces, list) { + if (sdata_iter->wdev.cac_started) { + mutex_unlock(&local->iflist_mtx); + return false; + } + } + mutex_unlock(&local->iflist_mtx); + + return true; +} + +static bool ieee80211_can_scan(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + if (!__ieee80211_can_leave_ch(sdata)) + return false; + + if (!list_empty(&local->roc_list)) + return false; + + if (sdata->vif.type == NL80211_IFTYPE_STATION && + sdata->u.mgd.flags & IEEE80211_STA_CONNECTION_POLL) + return false; + + return true; +} + +void ieee80211_run_deferred_scan(struct ieee80211_local *local) +{ + lockdep_assert_held(&local->mtx); + + if (!local->scan_req || local->scanning) + return; + + if (!ieee80211_can_scan(local, + rcu_dereference_protected( + local->scan_sdata, + lockdep_is_held(&local->mtx)))) + return; + + wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, + round_jiffies_relative(0)); +} + +static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata, + const u8 *src, const u8 *dst, + const u8 *ssid, size_t ssid_len, + const u8 *ie, size_t ie_len, + u32 ratemask, u32 flags, u32 tx_flags, + struct ieee80211_channel *channel) +{ + struct sk_buff *skb; + + skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel, + ssid, ssid_len, + ie, ie_len, flags); + + if (skb) { + if (flags & IEEE80211_PROBE_FLAG_RANDOM_SN) { + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u16 sn = get_random_u16(); + + info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO; + hdr->seq_ctrl = + cpu_to_le16(IEEE80211_SN_TO_SEQ(sn)); + } + IEEE80211_SKB_CB(skb)->flags |= tx_flags; + ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band); + } +} + +static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, + unsigned long *next_delay) +{ + int i; + struct ieee80211_sub_if_data *sdata; + struct cfg80211_scan_request *scan_req; + enum nl80211_band band = local->hw.conf.chandef.chan->band; + u32 flags = 0, tx_flags; + + scan_req = rcu_dereference_protected(local->scan_req, + lockdep_is_held(&local->mtx)); + + tx_flags = IEEE80211_TX_INTFL_OFFCHAN_TX_OK; + if (scan_req->no_cck) + tx_flags |= IEEE80211_TX_CTL_NO_CCK_RATE; + if (scan_req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT) + flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT; + if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_SN) + flags |= IEEE80211_PROBE_FLAG_RANDOM_SN; + + sdata = rcu_dereference_protected(local->scan_sdata, + lockdep_is_held(&local->mtx)); + + for (i = 0; i < scan_req->n_ssids; i++) + ieee80211_send_scan_probe_req( + sdata, local->scan_addr, scan_req->bssid, + scan_req->ssids[i].ssid, scan_req->ssids[i].ssid_len, + scan_req->ie, scan_req->ie_len, + scan_req->rates[band], flags, + tx_flags, local->hw.conf.chandef.chan); + + /* + * After sending probe requests, wait for probe responses + * on the channel. + */ + *next_delay = IEEE80211_CHANNEL_TIME; + local->next_scan_state = SCAN_DECISION; +} + +static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, + struct cfg80211_scan_request *req) +{ + struct ieee80211_local *local = sdata->local; + bool hw_scan = local->ops->hw_scan; + int rc; + + lockdep_assert_held(&local->mtx); + + if (local->scan_req) + return -EBUSY; + + if (!__ieee80211_can_leave_ch(sdata)) + return -EBUSY; + + if (!ieee80211_can_scan(local, sdata)) { + /* wait for the work to finish/time out */ + rcu_assign_pointer(local->scan_req, req); + rcu_assign_pointer(local->scan_sdata, sdata); + return 0; + } + + again: + if (hw_scan) { + u8 *ies; + + local->hw_scan_ies_bufsize = local->scan_ies_len + req->ie_len; + + if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) { + int i, n_bands = 0; + u8 bands_counted = 0; + + for (i = 0; i < req->n_channels; i++) { + if (bands_counted & BIT(req->channels[i]->band)) + continue; + bands_counted |= BIT(req->channels[i]->band); + n_bands++; + } + + local->hw_scan_ies_bufsize *= n_bands; + } + + local->hw_scan_req = kmalloc( + sizeof(*local->hw_scan_req) + + req->n_channels * sizeof(req->channels[0]) + + local->hw_scan_ies_bufsize, GFP_KERNEL); + if (!local->hw_scan_req) + return -ENOMEM; + + local->hw_scan_req->req.ssids = req->ssids; + local->hw_scan_req->req.n_ssids = req->n_ssids; + ies = (u8 *)local->hw_scan_req + + sizeof(*local->hw_scan_req) + + req->n_channels * sizeof(req->channels[0]); + local->hw_scan_req->req.ie = ies; + local->hw_scan_req->req.flags = req->flags; + eth_broadcast_addr(local->hw_scan_req->req.bssid); + local->hw_scan_req->req.duration = req->duration; + local->hw_scan_req->req.duration_mandatory = + req->duration_mandatory; + + local->hw_scan_band = 0; + local->hw_scan_req->req.n_6ghz_params = req->n_6ghz_params; + local->hw_scan_req->req.scan_6ghz_params = + req->scan_6ghz_params; + local->hw_scan_req->req.scan_6ghz = req->scan_6ghz; + + /* + * After allocating local->hw_scan_req, we must + * go through until ieee80211_prep_hw_scan(), so + * anything that might be changed here and leave + * this function early must not go after this + * allocation. + */ + } + + rcu_assign_pointer(local->scan_req, req); + rcu_assign_pointer(local->scan_sdata, sdata); + + if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) + get_random_mask_addr(local->scan_addr, + req->mac_addr, + req->mac_addr_mask); + else + memcpy(local->scan_addr, sdata->vif.addr, ETH_ALEN); + + if (hw_scan) { + __set_bit(SCAN_HW_SCANNING, &local->scanning); + } else if ((req->n_channels == 1) && + (req->channels[0] == local->_oper_chandef.chan)) { + /* + * If we are scanning only on the operating channel + * then we do not need to stop normal activities + */ + unsigned long next_delay; + + __set_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning); + + ieee80211_recalc_idle(local); + + /* Notify driver scan is starting, keep order of operations + * same as normal software scan, in case that matters. */ + drv_sw_scan_start(local, sdata, local->scan_addr); + + ieee80211_configure_filter(local); /* accept probe-responses */ + + /* We need to ensure power level is at max for scanning. */ + ieee80211_hw_config(local, 0); + + if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR | + IEEE80211_CHAN_RADAR)) || + !req->n_ssids) { + next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; + if (req->n_ssids) + set_bit(SCAN_BEACON_WAIT, &local->scanning); + } else { + ieee80211_scan_state_send_probe(local, &next_delay); + next_delay = IEEE80211_CHANNEL_TIME; + } + + /* Now, just wait a bit and we are all done! */ + wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, + next_delay); + return 0; + } else { + /* Do normal software scan */ + __set_bit(SCAN_SW_SCANNING, &local->scanning); + } + + ieee80211_recalc_idle(local); + + if (hw_scan) { + WARN_ON(!ieee80211_prep_hw_scan(sdata)); + rc = drv_hw_scan(local, sdata, local->hw_scan_req); + } else { + rc = ieee80211_start_sw_scan(local, sdata); + } + + if (rc) { + kfree(local->hw_scan_req); + local->hw_scan_req = NULL; + local->scanning = 0; + + ieee80211_recalc_idle(local); + + local->scan_req = NULL; + RCU_INIT_POINTER(local->scan_sdata, NULL); + } + + if (hw_scan && rc == 1) { + /* + * we can't fall back to software for P2P-GO + * as it must update NoA etc. + */ + if (ieee80211_vif_type_p2p(&sdata->vif) == + NL80211_IFTYPE_P2P_GO) + return -EOPNOTSUPP; + hw_scan = false; + goto again; + } + + return rc; +} + +static unsigned long +ieee80211_scan_get_channel_time(struct ieee80211_channel *chan) +{ + /* + * TODO: channel switching also consumes quite some time, + * add that delay as well to get a better estimation + */ + if (chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) + return IEEE80211_PASSIVE_CHANNEL_TIME; + return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME; +} + +static void ieee80211_scan_state_decision(struct ieee80211_local *local, + unsigned long *next_delay) +{ + bool associated = false; + bool tx_empty = true; + bool bad_latency; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_channel *next_chan; + enum mac80211_scan_state next_scan_state; + struct cfg80211_scan_request *scan_req; + + /* + * check if at least one STA interface is associated, + * check if at least one STA interface has pending tx frames + * and grab the lowest used beacon interval + */ + mutex_lock(&local->iflist_mtx); + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + if (sdata->u.mgd.associated) { + associated = true; + + if (!qdisc_all_tx_empty(sdata->dev)) { + tx_empty = false; + break; + } + } + } + } + mutex_unlock(&local->iflist_mtx); + + scan_req = rcu_dereference_protected(local->scan_req, + lockdep_is_held(&local->mtx)); + + next_chan = scan_req->channels[local->scan_channel_idx]; + + /* + * we're currently scanning a different channel, let's + * see if we can scan another channel without interfering + * with the current traffic situation. + * + * Keep good latency, do not stay off-channel more than 125 ms. + */ + + bad_latency = time_after(jiffies + + ieee80211_scan_get_channel_time(next_chan), + local->leave_oper_channel_time + HZ / 8); + + if (associated && !tx_empty) { + if (scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) + next_scan_state = SCAN_ABORT; + else + next_scan_state = SCAN_SUSPEND; + } else if (associated && bad_latency) { + next_scan_state = SCAN_SUSPEND; + } else { + next_scan_state = SCAN_SET_CHANNEL; + } + + local->next_scan_state = next_scan_state; + + *next_delay = 0; +} + +static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, + unsigned long *next_delay) +{ + int skip; + struct ieee80211_channel *chan; + enum nl80211_bss_scan_width oper_scan_width; + struct cfg80211_scan_request *scan_req; + + scan_req = rcu_dereference_protected(local->scan_req, + lockdep_is_held(&local->mtx)); + + skip = 0; + chan = scan_req->channels[local->scan_channel_idx]; + + local->scan_chandef.chan = chan; + local->scan_chandef.center_freq1 = chan->center_freq; + local->scan_chandef.freq1_offset = chan->freq_offset; + local->scan_chandef.center_freq2 = 0; + + /* For scanning on the S1G band, ignore scan_width (which is constant + * across all channels) for now since channel width is specific to each + * channel. Detect the required channel width here and likely revisit + * later. Maybe scan_width could be used to build the channel scan list? + */ + if (chan->band == NL80211_BAND_S1GHZ) { + local->scan_chandef.width = ieee80211_s1g_channel_width(chan); + goto set_channel; + } + + switch (scan_req->scan_width) { + case NL80211_BSS_CHAN_WIDTH_5: + local->scan_chandef.width = NL80211_CHAN_WIDTH_5; + break; + case NL80211_BSS_CHAN_WIDTH_10: + local->scan_chandef.width = NL80211_CHAN_WIDTH_10; + break; + default: + case NL80211_BSS_CHAN_WIDTH_20: + /* If scanning on oper channel, use whatever channel-type + * is currently in use. + */ + oper_scan_width = cfg80211_chandef_to_scan_width( + &local->_oper_chandef); + if (chan == local->_oper_chandef.chan && + oper_scan_width == scan_req->scan_width) + local->scan_chandef = local->_oper_chandef; + else + local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT; + break; + case NL80211_BSS_CHAN_WIDTH_1: + case NL80211_BSS_CHAN_WIDTH_2: + /* shouldn't get here, S1G handled above */ + WARN_ON(1); + break; + } + +set_channel: + if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL)) + skip = 1; + + /* advance state machine to next channel/band */ + local->scan_channel_idx++; + + if (skip) { + /* if we skip this channel return to the decision state */ + local->next_scan_state = SCAN_DECISION; + return; + } + + /* + * Probe delay is used to update the NAV, cf. 11.1.3.2.2 + * (which unfortunately doesn't say _why_ step a) is done, + * but it waits for the probe delay or until a frame is + * received - and the received frame would update the NAV). + * For now, we do not support waiting until a frame is + * received. + * + * In any case, it is not necessary for a passive scan. + */ + if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) || + !scan_req->n_ssids) { + *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; + local->next_scan_state = SCAN_DECISION; + if (scan_req->n_ssids) + set_bit(SCAN_BEACON_WAIT, &local->scanning); + return; + } + + /* active scan, send probes */ + *next_delay = IEEE80211_PROBE_DELAY; + local->next_scan_state = SCAN_SEND_PROBE; +} + +static void ieee80211_scan_state_suspend(struct ieee80211_local *local, + unsigned long *next_delay) +{ + /* switch back to the operating channel */ + local->scan_chandef.chan = NULL; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + + /* disable PS */ + ieee80211_offchannel_return(local); + + *next_delay = HZ / 5; + /* afterwards, resume scan & go to next channel */ + local->next_scan_state = SCAN_RESUME; +} + +static void ieee80211_scan_state_resume(struct ieee80211_local *local, + unsigned long *next_delay) +{ + ieee80211_offchannel_stop_vifs(local); + + if (local->ops->flush) { + ieee80211_flush_queues(local, NULL, false); + *next_delay = 0; + } else + *next_delay = HZ / 10; + + /* remember when we left the operating channel */ + local->leave_oper_channel_time = jiffies; + + /* advance to the next channel to be scanned */ + local->next_scan_state = SCAN_SET_CHANNEL; +} + +void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, scan_work.work); + struct ieee80211_sub_if_data *sdata; + struct cfg80211_scan_request *scan_req; + unsigned long next_delay = 0; + bool aborted; + + mutex_lock(&local->mtx); + + if (!ieee80211_can_run_worker(local)) { + aborted = true; + goto out_complete; + } + + sdata = rcu_dereference_protected(local->scan_sdata, + lockdep_is_held(&local->mtx)); + scan_req = rcu_dereference_protected(local->scan_req, + lockdep_is_held(&local->mtx)); + + /* When scanning on-channel, the first-callback means completed. */ + if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) { + aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning); + goto out_complete; + } + + if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) { + aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning); + goto out_complete; + } + + if (!sdata || !scan_req) + goto out; + + if (!local->scanning) { + int rc; + + RCU_INIT_POINTER(local->scan_req, NULL); + RCU_INIT_POINTER(local->scan_sdata, NULL); + + rc = __ieee80211_start_scan(sdata, scan_req); + if (rc) { + /* need to complete scan in cfg80211 */ + rcu_assign_pointer(local->scan_req, scan_req); + aborted = true; + goto out_complete; + } else + goto out; + } + + clear_bit(SCAN_BEACON_WAIT, &local->scanning); + + /* + * as long as no delay is required advance immediately + * without scheduling a new work + */ + do { + if (!ieee80211_sdata_running(sdata)) { + aborted = true; + goto out_complete; + } + + if (test_and_clear_bit(SCAN_BEACON_DONE, &local->scanning) && + local->next_scan_state == SCAN_DECISION) + local->next_scan_state = SCAN_SEND_PROBE; + + switch (local->next_scan_state) { + case SCAN_DECISION: + /* if no more bands/channels left, complete scan */ + if (local->scan_channel_idx >= scan_req->n_channels) { + aborted = false; + goto out_complete; + } + ieee80211_scan_state_decision(local, &next_delay); + break; + case SCAN_SET_CHANNEL: + ieee80211_scan_state_set_channel(local, &next_delay); + break; + case SCAN_SEND_PROBE: + ieee80211_scan_state_send_probe(local, &next_delay); + break; + case SCAN_SUSPEND: + ieee80211_scan_state_suspend(local, &next_delay); + break; + case SCAN_RESUME: + ieee80211_scan_state_resume(local, &next_delay); + break; + case SCAN_ABORT: + aborted = true; + goto out_complete; + } + } while (next_delay == 0); + + wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, + next_delay); + goto out; + +out_complete: + __ieee80211_scan_completed(&local->hw, aborted); +out: + mutex_unlock(&local->mtx); +} + +int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, + struct cfg80211_scan_request *req) +{ + int res; + + mutex_lock(&sdata->local->mtx); + res = __ieee80211_start_scan(sdata, req); + mutex_unlock(&sdata->local->mtx); + + return res; +} + +int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata, + const u8 *ssid, u8 ssid_len, + struct ieee80211_channel **channels, + unsigned int n_channels, + enum nl80211_bss_scan_width scan_width) +{ + struct ieee80211_local *local = sdata->local; + int ret = -EBUSY, i, n_ch = 0; + enum nl80211_band band; + + mutex_lock(&local->mtx); + + /* busy scanning */ + if (local->scan_req) + goto unlock; + + /* fill internal scan request */ + if (!channels) { + int max_n; + + for (band = 0; band < NUM_NL80211_BANDS; band++) { + if (!local->hw.wiphy->bands[band] || + band == NL80211_BAND_6GHZ) + continue; + + max_n = local->hw.wiphy->bands[band]->n_channels; + for (i = 0; i < max_n; i++) { + struct ieee80211_channel *tmp_ch = + &local->hw.wiphy->bands[band]->channels[i]; + + if (tmp_ch->flags & (IEEE80211_CHAN_NO_IR | + IEEE80211_CHAN_DISABLED)) + continue; + + local->int_scan_req->channels[n_ch] = tmp_ch; + n_ch++; + } + } + + if (WARN_ON_ONCE(n_ch == 0)) + goto unlock; + + local->int_scan_req->n_channels = n_ch; + } else { + for (i = 0; i < n_channels; i++) { + if (channels[i]->flags & (IEEE80211_CHAN_NO_IR | + IEEE80211_CHAN_DISABLED)) + continue; + + local->int_scan_req->channels[n_ch] = channels[i]; + n_ch++; + } + + if (WARN_ON_ONCE(n_ch == 0)) + goto unlock; + + local->int_scan_req->n_channels = n_ch; + } + + local->int_scan_req->ssids = &local->scan_ssid; + local->int_scan_req->n_ssids = 1; + local->int_scan_req->scan_width = scan_width; + memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN); + local->int_scan_req->ssids[0].ssid_len = ssid_len; + + ret = __ieee80211_start_scan(sdata, sdata->local->int_scan_req); + unlock: + mutex_unlock(&local->mtx); + return ret; +} + +void ieee80211_scan_cancel(struct ieee80211_local *local) +{ + /* ensure a new scan cannot be queued */ + lockdep_assert_wiphy(local->hw.wiphy); + + /* + * We are canceling software scan, or deferred scan that was not + * yet really started (see __ieee80211_start_scan ). + * + * Regarding hardware scan: + * - we can not call __ieee80211_scan_completed() as when + * SCAN_HW_SCANNING bit is set this function change + * local->hw_scan_req to operate on 5G band, what race with + * driver which can use local->hw_scan_req + * + * - we can not cancel scan_work since driver can schedule it + * by ieee80211_scan_completed(..., true) to finish scan + * + * Hence we only call the cancel_hw_scan() callback, but the low-level + * driver is still responsible for calling ieee80211_scan_completed() + * after the scan was completed/aborted. + */ + + mutex_lock(&local->mtx); + if (!local->scan_req) + goto out; + + /* + * We have a scan running and the driver already reported completion, + * but the worker hasn't run yet or is stuck on the mutex - mark it as + * cancelled. + */ + if (test_bit(SCAN_HW_SCANNING, &local->scanning) && + test_bit(SCAN_COMPLETED, &local->scanning)) { + set_bit(SCAN_HW_CANCELLED, &local->scanning); + goto out; + } + + if (test_bit(SCAN_HW_SCANNING, &local->scanning)) { + /* + * Make sure that __ieee80211_scan_completed doesn't trigger a + * scan on another band. + */ + set_bit(SCAN_HW_CANCELLED, &local->scanning); + if (local->ops->cancel_hw_scan) + drv_cancel_hw_scan(local, + rcu_dereference_protected(local->scan_sdata, + lockdep_is_held(&local->mtx))); + goto out; + } + + wiphy_delayed_work_cancel(local->hw.wiphy, &local->scan_work); + /* and clean up */ + memset(&local->scan_info, 0, sizeof(local->scan_info)); + __ieee80211_scan_completed(&local->hw, true); +out: + mutex_unlock(&local->mtx); +} + +int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, + struct cfg80211_sched_scan_request *req) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_scan_ies sched_scan_ies = {}; + struct cfg80211_chan_def chandef; + int ret, i, iebufsz, num_bands = 0; + u32 rate_masks[NUM_NL80211_BANDS] = {}; + u8 bands_used = 0; + u8 *ie; + u32 flags = 0; + + iebufsz = local->scan_ies_len + req->ie_len; + + lockdep_assert_held(&local->mtx); + + if (!local->ops->sched_scan_start) + return -ENOTSUPP; + + for (i = 0; i < NUM_NL80211_BANDS; i++) { + if (local->hw.wiphy->bands[i]) { + bands_used |= BIT(i); + rate_masks[i] = (u32) -1; + num_bands++; + } + } + + if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT) + flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT; + + ie = kcalloc(iebufsz, num_bands, GFP_KERNEL); + if (!ie) { + ret = -ENOMEM; + goto out; + } + + ieee80211_prepare_scan_chandef(&chandef, req->scan_width); + + ieee80211_build_preq_ies(sdata, ie, num_bands * iebufsz, + &sched_scan_ies, req->ie, + req->ie_len, bands_used, rate_masks, &chandef, + flags); + + ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies); + if (ret == 0) { + rcu_assign_pointer(local->sched_scan_sdata, sdata); + rcu_assign_pointer(local->sched_scan_req, req); + } + + kfree(ie); + +out: + if (ret) { + /* Clean in case of failure after HW restart or upon resume. */ + RCU_INIT_POINTER(local->sched_scan_sdata, NULL); + RCU_INIT_POINTER(local->sched_scan_req, NULL); + } + + return ret; +} + +int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, + struct cfg80211_sched_scan_request *req) +{ + struct ieee80211_local *local = sdata->local; + int ret; + + mutex_lock(&local->mtx); + + if (rcu_access_pointer(local->sched_scan_sdata)) { + mutex_unlock(&local->mtx); + return -EBUSY; + } + + ret = __ieee80211_request_sched_scan_start(sdata, req); + + mutex_unlock(&local->mtx); + return ret; +} + +int ieee80211_request_sched_scan_stop(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sched_scan_sdata; + int ret = -ENOENT; + + mutex_lock(&local->mtx); + + if (!local->ops->sched_scan_stop) { + ret = -ENOTSUPP; + goto out; + } + + /* We don't want to restart sched scan anymore. */ + RCU_INIT_POINTER(local->sched_scan_req, NULL); + + sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata, + lockdep_is_held(&local->mtx)); + if (sched_scan_sdata) { + ret = drv_sched_scan_stop(local, sched_scan_sdata); + if (!ret) + RCU_INIT_POINTER(local->sched_scan_sdata, NULL); + } +out: + mutex_unlock(&local->mtx); + + return ret; +} + +void ieee80211_sched_scan_results(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + trace_api_sched_scan_results(local); + + cfg80211_sched_scan_results(hw->wiphy, 0); +} +EXPORT_SYMBOL(ieee80211_sched_scan_results); + +void ieee80211_sched_scan_end(struct ieee80211_local *local) +{ + mutex_lock(&local->mtx); + + if (!rcu_access_pointer(local->sched_scan_sdata)) { + mutex_unlock(&local->mtx); + return; + } + + RCU_INIT_POINTER(local->sched_scan_sdata, NULL); + + /* If sched scan was aborted by the driver. */ + RCU_INIT_POINTER(local->sched_scan_req, NULL); + + mutex_unlock(&local->mtx); + + cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0); +} + +void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy, + struct wiphy_work *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, + sched_scan_stopped_work); + + ieee80211_sched_scan_end(local); +} + +void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + trace_api_sched_scan_stopped(local); + + /* + * this shouldn't really happen, so for simplicity + * simply ignore it, and let mac80211 reconfigure + * the sched scan later on. + */ + if (local->in_reconfig) + return; + + wiphy_work_queue(hw->wiphy, &local->sched_scan_stopped_work); +} +EXPORT_SYMBOL(ieee80211_sched_scan_stopped); diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c new file mode 100644 index 0000000000..871cdac2d0 --- /dev/null +++ b/net/mac80211/spectmgmt.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * spectrum management + * + * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2007, Michael Wu <flamingice@sourmilk.net> + * Copyright 2007-2008, Intel Corporation + * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> + * Copyright (C) 2018, 2020, 2022 Intel Corporation + */ + +#include <linux/ieee80211.h> +#include <net/cfg80211.h> +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "sta_info.h" +#include "wme.h" + +int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *elems, + enum nl80211_band current_band, + u32 vht_cap_info, + ieee80211_conn_flags_t conn_flags, u8 *bssid, + struct ieee80211_csa_ie *csa_ie) +{ + enum nl80211_band new_band = current_band; + int new_freq; + u8 new_chan_no; + struct ieee80211_channel *new_chan; + struct cfg80211_chan_def new_vht_chandef = {}; + const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; + const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie; + int secondary_channel_offset = -1; + + memset(csa_ie, 0, sizeof(*csa_ie)); + + sec_chan_offs = elems->sec_chan_offs; + wide_bw_chansw_ie = elems->wide_bw_chansw_ie; + + if (conn_flags & (IEEE80211_CONN_DISABLE_HT | + IEEE80211_CONN_DISABLE_40MHZ)) { + sec_chan_offs = NULL; + wide_bw_chansw_ie = NULL; + } + + if (conn_flags & IEEE80211_CONN_DISABLE_VHT) + wide_bw_chansw_ie = NULL; + + if (elems->ext_chansw_ie) { + if (!ieee80211_operating_class_to_band( + elems->ext_chansw_ie->new_operating_class, + &new_band)) { + sdata_info(sdata, + "cannot understand ECSA IE operating class, %d, ignoring\n", + elems->ext_chansw_ie->new_operating_class); + } + new_chan_no = elems->ext_chansw_ie->new_ch_num; + csa_ie->count = elems->ext_chansw_ie->count; + csa_ie->mode = elems->ext_chansw_ie->mode; + } else if (elems->ch_switch_ie) { + new_chan_no = elems->ch_switch_ie->new_ch_num; + csa_ie->count = elems->ch_switch_ie->count; + csa_ie->mode = elems->ch_switch_ie->mode; + } else { + /* nothing here we understand */ + return 1; + } + + /* Mesh Channel Switch Parameters Element */ + if (elems->mesh_chansw_params_ie) { + csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl; + csa_ie->mode = elems->mesh_chansw_params_ie->mesh_flags; + csa_ie->pre_value = le16_to_cpu( + elems->mesh_chansw_params_ie->mesh_pre_value); + + if (elems->mesh_chansw_params_ie->mesh_flags & + WLAN_EID_CHAN_SWITCH_PARAM_REASON) + csa_ie->reason_code = le16_to_cpu( + elems->mesh_chansw_params_ie->mesh_reason); + } + + new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band); + new_chan = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq); + if (!new_chan || new_chan->flags & IEEE80211_CHAN_DISABLED) { + sdata_info(sdata, + "BSS %pM switches to unsupported channel (%d MHz), disconnecting\n", + bssid, new_freq); + return -EINVAL; + } + + if (sec_chan_offs) { + secondary_channel_offset = sec_chan_offs->sec_chan_offs; + } else if (!(conn_flags & IEEE80211_CONN_DISABLE_HT)) { + /* If the secondary channel offset IE is not present, + * we can't know what's the post-CSA offset, so the + * best we can do is use 20MHz. + */ + secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; + } + + switch (secondary_channel_offset) { + default: + /* secondary_channel_offset was present but is invalid */ + case IEEE80211_HT_PARAM_CHA_SEC_NONE: + cfg80211_chandef_create(&csa_ie->chandef, new_chan, + NL80211_CHAN_HT20); + break; + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + cfg80211_chandef_create(&csa_ie->chandef, new_chan, + NL80211_CHAN_HT40PLUS); + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + cfg80211_chandef_create(&csa_ie->chandef, new_chan, + NL80211_CHAN_HT40MINUS); + break; + case -1: + cfg80211_chandef_create(&csa_ie->chandef, new_chan, + NL80211_CHAN_NO_HT); + /* keep width for 5/10 MHz channels */ + switch (sdata->vif.bss_conf.chandef.width) { + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: + csa_ie->chandef.width = + sdata->vif.bss_conf.chandef.width; + break; + default: + break; + } + break; + } + + if (wide_bw_chansw_ie) { + u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1; + struct ieee80211_vht_operation vht_oper = { + .chan_width = + wide_bw_chansw_ie->new_channel_width, + .center_freq_seg0_idx = + wide_bw_chansw_ie->new_center_freq_seg0, + .center_freq_seg1_idx = new_seg1, + /* .basic_mcs_set doesn't matter */ + }; + struct ieee80211_ht_operation ht_oper = { + .operation_mode = + cpu_to_le16(new_seg1 << + IEEE80211_HT_OP_MODE_CCFS2_SHIFT), + }; + + /* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT, + * to the previously parsed chandef + */ + new_vht_chandef = csa_ie->chandef; + + /* ignore if parsing fails */ + if (!ieee80211_chandef_vht_oper(&sdata->local->hw, + vht_cap_info, + &vht_oper, &ht_oper, + &new_vht_chandef)) + new_vht_chandef.chan = NULL; + + if (conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ && + new_vht_chandef.width == NL80211_CHAN_WIDTH_80P80) + ieee80211_chandef_downgrade(&new_vht_chandef); + if (conn_flags & IEEE80211_CONN_DISABLE_160MHZ && + new_vht_chandef.width == NL80211_CHAN_WIDTH_160) + ieee80211_chandef_downgrade(&new_vht_chandef); + } + + /* if VHT data is there validate & use it */ + if (new_vht_chandef.chan) { + if (!cfg80211_chandef_compatible(&new_vht_chandef, + &csa_ie->chandef)) { + sdata_info(sdata, + "BSS %pM: CSA has inconsistent channel data, disconnecting\n", + bssid); + return -EINVAL; + } + csa_ie->chandef = new_vht_chandef; + } + + if (elems->max_channel_switch_time) + csa_ie->max_switch_time = + (elems->max_channel_switch_time[0] << 0) | + (elems->max_channel_switch_time[1] << 8) | + (elems->max_channel_switch_time[2] << 16); + + return 0; +} + +static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_data *sdata, + struct ieee80211_msrment_ie *request_ie, + const u8 *da, const u8 *bssid, + u8 dialog_token) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *msr_report; + + skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom + + sizeof(struct ieee80211_msrment_ie)); + if (!skb) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom); + msr_report = skb_put_zero(skb, 24); + memcpy(msr_report->da, da, ETH_ALEN); + memcpy(msr_report->sa, sdata->vif.addr, ETH_ALEN); + memcpy(msr_report->bssid, bssid, ETH_ALEN); + msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + + skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement)); + msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT; + msr_report->u.action.u.measurement.action_code = + WLAN_ACTION_SPCT_MSR_RPRT; + msr_report->u.action.u.measurement.dialog_token = dialog_token; + + msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT; + msr_report->u.action.u.measurement.length = + sizeof(struct ieee80211_msrment_ie); + + memset(&msr_report->u.action.u.measurement.msr_elem, 0, + sizeof(struct ieee80211_msrment_ie)); + msr_report->u.action.u.measurement.msr_elem.token = request_ie->token; + msr_report->u.action.u.measurement.msr_elem.mode |= + IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED; + msr_report->u.action.u.measurement.msr_elem.type = request_ie->type; + + ieee80211_tx_skb(sdata, skb); +} + +void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len) +{ + /* + * Ignoring measurement request is spec violation. + * Mandatory measurements must be reported optional + * measurements might be refused or reported incapable + * For now just refuse + * TODO: Answer basic measurement as unmeasured + */ + ieee80211_send_refuse_measurement_request(sdata, + &mgmt->u.action.u.measurement.msr_elem, + mgmt->sa, mgmt->bssid, + mgmt->u.action.u.measurement.dialog_token); +} diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c new file mode 100644 index 0000000000..e112300caa --- /dev/null +++ b/net/mac80211/sta_info.c @@ -0,0 +1,3007 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018-2023 Intel Corporation + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/skbuff.h> +#include <linux/if_arp.h> +#include <linux/timer.h> +#include <linux/rtnetlink.h> + +#include <net/codel.h> +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" +#include "sta_info.h" +#include "debugfs_sta.h" +#include "mesh.h" +#include "wme.h" + +/** + * DOC: STA information lifetime rules + * + * STA info structures (&struct sta_info) are managed in a hash table + * for faster lookup and a list for iteration. They are managed using + * RCU, i.e. access to the list and hash table is protected by RCU. + * + * Upon allocating a STA info structure with sta_info_alloc(), the caller + * owns that structure. It must then insert it into the hash table using + * either sta_info_insert() or sta_info_insert_rcu(); only in the latter + * case (which acquires an rcu read section but must not be called from + * within one) will the pointer still be valid after the call. Note that + * the caller may not do much with the STA info before inserting it, in + * particular, it may not start any mesh peer link management or add + * encryption keys. + * + * When the insertion fails (sta_info_insert()) returns non-zero), the + * structure will have been freed by sta_info_insert()! + * + * Station entries are added by mac80211 when you establish a link with a + * peer. This means different things for the different type of interfaces + * we support. For a regular station this mean we add the AP sta when we + * receive an association response from the AP. For IBSS this occurs when + * get to know about a peer on the same IBSS. For WDS we add the sta for + * the peer immediately upon device open. When using AP mode we add stations + * for each respective station upon request from userspace through nl80211. + * + * In order to remove a STA info structure, various sta_info_destroy_*() + * calls are available. + * + * There is no concept of ownership on a STA entry, each structure is + * owned by the global hash table/list until it is removed. All users of + * the structure need to be RCU protected so that the structure won't be + * freed before they are done using it. + */ + +struct sta_link_alloc { + struct link_sta_info info; + struct ieee80211_link_sta sta; + struct rcu_head rcu_head; +}; + +static const struct rhashtable_params sta_rht_params = { + .nelem_hint = 3, /* start small */ + .automatic_shrinking = true, + .head_offset = offsetof(struct sta_info, hash_node), + .key_offset = offsetof(struct sta_info, addr), + .key_len = ETH_ALEN, + .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE, +}; + +static const struct rhashtable_params link_sta_rht_params = { + .nelem_hint = 3, /* start small */ + .automatic_shrinking = true, + .head_offset = offsetof(struct link_sta_info, link_hash_node), + .key_offset = offsetof(struct link_sta_info, addr), + .key_len = ETH_ALEN, + .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE, +}; + +/* Caller must hold local->sta_mtx */ +static int sta_info_hash_del(struct ieee80211_local *local, + struct sta_info *sta) +{ + return rhltable_remove(&local->sta_hash, &sta->hash_node, + sta_rht_params); +} + +static int link_sta_info_hash_add(struct ieee80211_local *local, + struct link_sta_info *link_sta) +{ + lockdep_assert_held(&local->sta_mtx); + return rhltable_insert(&local->link_sta_hash, + &link_sta->link_hash_node, + link_sta_rht_params); +} + +static int link_sta_info_hash_del(struct ieee80211_local *local, + struct link_sta_info *link_sta) +{ + lockdep_assert_held(&local->sta_mtx); + return rhltable_remove(&local->link_sta_hash, + &link_sta->link_hash_node, + link_sta_rht_params); +} + +static void __cleanup_single_sta(struct sta_info *sta) +{ + int ac, i; + struct tid_ampdu_tx *tid_tx; + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct ps_data *ps; + + if (test_sta_flag(sta, WLAN_STA_PS_STA) || + test_sta_flag(sta, WLAN_STA_PS_DRIVER) || + test_sta_flag(sta, WLAN_STA_PS_DELIVER)) { + if (sta->sdata->vif.type == NL80211_IFTYPE_AP || + sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + ps = &sdata->bss->ps; + else if (ieee80211_vif_is_mesh(&sdata->vif)) + ps = &sdata->u.mesh.ps; + else + return; + + clear_sta_flag(sta, WLAN_STA_PS_STA); + clear_sta_flag(sta, WLAN_STA_PS_DRIVER); + clear_sta_flag(sta, WLAN_STA_PS_DELIVER); + + atomic_dec(&ps->num_sta_ps); + } + + for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { + struct txq_info *txqi; + + if (!sta->sta.txq[i]) + continue; + + txqi = to_txq_info(sta->sta.txq[i]); + + ieee80211_txq_purge(local, txqi); + } + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); + ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]); + ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); + } + + if (ieee80211_vif_is_mesh(&sdata->vif)) + mesh_sta_cleanup(sta); + + cancel_work_sync(&sta->drv_deliver_wk); + + /* + * Destroy aggregation state here. It would be nice to wait for the + * driver to finish aggregation stop and then clean up, but for now + * drivers have to handle aggregation stop being requested, followed + * directly by station destruction. + */ + for (i = 0; i < IEEE80211_NUM_TIDS; i++) { + kfree(sta->ampdu_mlme.tid_start_tx[i]); + tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); + if (!tid_tx) + continue; + ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); + kfree(tid_tx); + } +} + +static void cleanup_single_sta(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + + __cleanup_single_sta(sta); + sta_info_free(local, sta); +} + +struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local, + const u8 *addr) +{ + return rhltable_lookup(&local->sta_hash, addr, sta_rht_params); +} + +/* protected by RCU */ +struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, + const u8 *addr) +{ + struct ieee80211_local *local = sdata->local; + struct rhlist_head *tmp; + struct sta_info *sta; + + rcu_read_lock(); + for_each_sta_info(local, addr, sta, tmp) { + if (sta->sdata == sdata) { + rcu_read_unlock(); + /* this is safe as the caller must already hold + * another rcu read section or the mutex + */ + return sta; + } + } + rcu_read_unlock(); + return NULL; +} + +/* + * Get sta info either from the specified interface + * or from one of its vlans + */ +struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, + const u8 *addr) +{ + struct ieee80211_local *local = sdata->local; + struct rhlist_head *tmp; + struct sta_info *sta; + + rcu_read_lock(); + for_each_sta_info(local, addr, sta, tmp) { + if (sta->sdata == sdata || + (sta->sdata->bss && sta->sdata->bss == sdata->bss)) { + rcu_read_unlock(); + /* this is safe as the caller must already hold + * another rcu read section or the mutex + */ + return sta; + } + } + rcu_read_unlock(); + return NULL; +} + +struct rhlist_head *link_sta_info_hash_lookup(struct ieee80211_local *local, + const u8 *addr) +{ + return rhltable_lookup(&local->link_sta_hash, addr, + link_sta_rht_params); +} + +struct link_sta_info * +link_sta_info_get_bss(struct ieee80211_sub_if_data *sdata, const u8 *addr) +{ + struct ieee80211_local *local = sdata->local; + struct rhlist_head *tmp; + struct link_sta_info *link_sta; + + rcu_read_lock(); + for_each_link_sta_info(local, addr, link_sta, tmp) { + struct sta_info *sta = link_sta->sta; + + if (sta->sdata == sdata || + (sta->sdata->bss && sta->sdata->bss == sdata->bss)) { + rcu_read_unlock(); + /* this is safe as the caller must already hold + * another rcu read section or the mutex + */ + return link_sta; + } + } + rcu_read_unlock(); + return NULL; +} + +struct ieee80211_sta * +ieee80211_find_sta_by_link_addrs(struct ieee80211_hw *hw, + const u8 *addr, + const u8 *localaddr, + unsigned int *link_id) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct link_sta_info *link_sta; + struct rhlist_head *tmp; + + for_each_link_sta_info(local, addr, link_sta, tmp) { + struct sta_info *sta = link_sta->sta; + struct ieee80211_link_data *link; + u8 _link_id = link_sta->link_id; + + if (!localaddr) { + if (link_id) + *link_id = _link_id; + return &sta->sta; + } + + link = rcu_dereference(sta->sdata->link[_link_id]); + if (!link) + continue; + + if (memcmp(link->conf->addr, localaddr, ETH_ALEN)) + continue; + + if (link_id) + *link_id = _link_id; + return &sta->sta; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_link_addrs); + +struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local, + const u8 *sta_addr, const u8 *vif_addr) +{ + struct rhlist_head *tmp; + struct sta_info *sta; + + for_each_sta_info(local, sta_addr, sta, tmp) { + if (ether_addr_equal(vif_addr, sta->sdata->vif.addr)) + return sta; + } + + return NULL; +} + +struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, + int idx) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + int i = 0; + + list_for_each_entry_rcu(sta, &local->sta_list, list, + lockdep_is_held(&local->sta_mtx)) { + if (sdata != sta->sdata) + continue; + if (i < idx) { + ++i; + continue; + } + return sta; + } + + return NULL; +} + +static void sta_info_free_link(struct link_sta_info *link_sta) +{ + free_percpu(link_sta->pcpu_rx_stats); +} + +static void sta_remove_link(struct sta_info *sta, unsigned int link_id, + bool unhash) +{ + struct sta_link_alloc *alloc = NULL; + struct link_sta_info *link_sta; + + link_sta = rcu_access_pointer(sta->link[link_id]); + if (link_sta != &sta->deflink) + lockdep_assert_held(&sta->local->sta_mtx); + + if (WARN_ON(!link_sta)) + return; + + if (unhash) + link_sta_info_hash_del(sta->local, link_sta); + + if (test_sta_flag(sta, WLAN_STA_INSERTED)) + ieee80211_link_sta_debugfs_remove(link_sta); + + if (link_sta != &sta->deflink) + alloc = container_of(link_sta, typeof(*alloc), info); + + sta->sta.valid_links &= ~BIT(link_id); + RCU_INIT_POINTER(sta->link[link_id], NULL); + RCU_INIT_POINTER(sta->sta.link[link_id], NULL); + if (alloc) { + sta_info_free_link(&alloc->info); + kfree_rcu(alloc, rcu_head); + } + + ieee80211_sta_recalc_aggregates(&sta->sta); +} + +/** + * sta_info_free - free STA + * + * @local: pointer to the global information + * @sta: STA info to free + * + * This function must undo everything done by sta_info_alloc() + * that may happen before sta_info_insert(). It may only be + * called when sta_info_insert() has not been attempted (and + * if that fails, the station is freed anyway.) + */ +void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sta->link); i++) { + struct link_sta_info *link_sta; + + link_sta = rcu_access_pointer(sta->link[i]); + if (!link_sta) + continue; + + sta_remove_link(sta, i, false); + } + + /* + * If we had used sta_info_pre_move_state() then we might not + * have gone through the state transitions down again, so do + * it here now (and warn if it's inserted). + * + * This will clear state such as fast TX/RX that may have been + * allocated during state transitions. + */ + while (sta->sta_state > IEEE80211_STA_NONE) { + int ret; + + WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED)); + + ret = sta_info_move_state(sta, sta->sta_state - 1); + if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret)) + break; + } + + if (sta->rate_ctrl) + rate_control_free_sta(sta); + + sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr); + + kfree(to_txq_info(sta->sta.txq[0])); + kfree(rcu_dereference_raw(sta->sta.rates)); +#ifdef CONFIG_MAC80211_MESH + kfree(sta->mesh); +#endif + + sta_info_free_link(&sta->deflink); + kfree(sta); +} + +/* Caller must hold local->sta_mtx */ +static int sta_info_hash_add(struct ieee80211_local *local, + struct sta_info *sta) +{ + return rhltable_insert(&local->sta_hash, &sta->hash_node, + sta_rht_params); +} + +static void sta_deliver_ps_frames(struct work_struct *wk) +{ + struct sta_info *sta; + + sta = container_of(wk, struct sta_info, drv_deliver_wk); + + if (sta->dead) + return; + + local_bh_disable(); + if (!test_sta_flag(sta, WLAN_STA_PS_STA)) + ieee80211_sta_ps_deliver_wakeup(sta); + else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) + ieee80211_sta_ps_deliver_poll_response(sta); + else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) + ieee80211_sta_ps_deliver_uapsd(sta); + local_bh_enable(); +} + +static int sta_prepare_rate_control(struct ieee80211_local *local, + struct sta_info *sta, gfp_t gfp) +{ + if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) + return 0; + + sta->rate_ctrl = local->rate_ctrl; + sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, + sta, gfp); + if (!sta->rate_ctrl_priv) + return -ENOMEM; + + return 0; +} + +static int sta_info_alloc_link(struct ieee80211_local *local, + struct link_sta_info *link_info, + gfp_t gfp) +{ + struct ieee80211_hw *hw = &local->hw; + int i; + + if (ieee80211_hw_check(hw, USES_RSS)) { + link_info->pcpu_rx_stats = + alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp); + if (!link_info->pcpu_rx_stats) + return -ENOMEM; + } + + link_info->rx_stats.last_rx = jiffies; + u64_stats_init(&link_info->rx_stats.syncp); + + ewma_signal_init(&link_info->rx_stats_avg.signal); + ewma_avg_signal_init(&link_info->status_stats.avg_ack_signal); + for (i = 0; i < ARRAY_SIZE(link_info->rx_stats_avg.chain_signal); i++) + ewma_signal_init(&link_info->rx_stats_avg.chain_signal[i]); + + return 0; +} + +static void sta_info_add_link(struct sta_info *sta, + unsigned int link_id, + struct link_sta_info *link_info, + struct ieee80211_link_sta *link_sta) +{ + link_info->sta = sta; + link_info->link_id = link_id; + link_info->pub = link_sta; + link_info->pub->sta = &sta->sta; + link_sta->link_id = link_id; + rcu_assign_pointer(sta->link[link_id], link_info); + rcu_assign_pointer(sta->sta.link[link_id], link_sta); + + link_sta->smps_mode = IEEE80211_SMPS_OFF; + link_sta->agg.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA; +} + +static struct sta_info * +__sta_info_alloc(struct ieee80211_sub_if_data *sdata, + const u8 *addr, int link_id, const u8 *link_addr, + gfp_t gfp) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_hw *hw = &local->hw; + struct sta_info *sta; + void *txq_data; + int size; + int i; + + sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); + if (!sta) + return NULL; + + sta->local = local; + sta->sdata = sdata; + + if (sta_info_alloc_link(local, &sta->deflink, gfp)) + goto free; + + if (link_id >= 0) { + sta_info_add_link(sta, link_id, &sta->deflink, + &sta->sta.deflink); + sta->sta.valid_links = BIT(link_id); + } else { + sta_info_add_link(sta, 0, &sta->deflink, &sta->sta.deflink); + } + + sta->sta.cur = &sta->sta.deflink.agg; + + spin_lock_init(&sta->lock); + spin_lock_init(&sta->ps_lock); + INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames); + INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); + mutex_init(&sta->ampdu_mlme.mtx); +#ifdef CONFIG_MAC80211_MESH + if (ieee80211_vif_is_mesh(&sdata->vif)) { + sta->mesh = kzalloc(sizeof(*sta->mesh), gfp); + if (!sta->mesh) + goto free; + sta->mesh->plink_sta = sta; + spin_lock_init(&sta->mesh->plink_lock); + if (!sdata->u.mesh.user_mpm) + timer_setup(&sta->mesh->plink_timer, mesh_plink_timer, + 0); + sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE; + } +#endif + + memcpy(sta->addr, addr, ETH_ALEN); + memcpy(sta->sta.addr, addr, ETH_ALEN); + memcpy(sta->deflink.addr, link_addr, ETH_ALEN); + memcpy(sta->sta.deflink.addr, link_addr, ETH_ALEN); + sta->sta.max_rx_aggregation_subframes = + local->hw.max_rx_aggregation_subframes; + + /* TODO link specific alloc and assignments for MLO Link STA */ + + /* Extended Key ID needs to install keys for keyid 0 and 1 Rx-only. + * The Tx path starts to use a key as soon as the key slot ptk_idx + * references to is not NULL. To not use the initial Rx-only key + * prematurely for Tx initialize ptk_idx to an impossible PTK keyid + * which always will refer to a NULL key. + */ + BUILD_BUG_ON(ARRAY_SIZE(sta->ptk) <= INVALID_PTK_KEYIDX); + sta->ptk_idx = INVALID_PTK_KEYIDX; + + + ieee80211_init_frag_cache(&sta->frags); + + sta->sta_state = IEEE80211_STA_NONE; + + if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) + sta->amsdu_mesh_control = -1; + + /* Mark TID as unreserved */ + sta->reserved_tid = IEEE80211_TID_UNRESERVED; + + sta->last_connected = ktime_get_seconds(); + + size = sizeof(struct txq_info) + + ALIGN(hw->txq_data_size, sizeof(void *)); + + txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp); + if (!txq_data) + goto free; + + for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { + struct txq_info *txq = txq_data + i * size; + + /* might not do anything for the (bufferable) MMPDU TXQ */ + ieee80211_txq_init(sdata, sta, txq, i); + } + + if (sta_prepare_rate_control(local, sta, gfp)) + goto free_txq; + + sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + skb_queue_head_init(&sta->ps_tx_buf[i]); + skb_queue_head_init(&sta->tx_filtered[i]); + sta->airtime[i].deficit = sta->airtime_weight; + atomic_set(&sta->airtime[i].aql_tx_pending, 0); + sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i]; + sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i]; + } + + for (i = 0; i < IEEE80211_NUM_TIDS; i++) + sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); + + for (i = 0; i < NUM_NL80211_BANDS; i++) { + u32 mandatory = 0; + int r; + + if (!hw->wiphy->bands[i]) + continue; + + switch (i) { + case NL80211_BAND_2GHZ: + case NL80211_BAND_LC: + /* + * We use both here, even if we cannot really know for + * sure the station will support both, but the only use + * for this is when we don't know anything yet and send + * management frames, and then we'll pick the lowest + * possible rate anyway. + * If we don't include _G here, we cannot find a rate + * in P2P, and thus trigger the WARN_ONCE() in rate.c + */ + mandatory = IEEE80211_RATE_MANDATORY_B | + IEEE80211_RATE_MANDATORY_G; + break; + case NL80211_BAND_5GHZ: + mandatory = IEEE80211_RATE_MANDATORY_A; + break; + case NL80211_BAND_60GHZ: + WARN_ON(1); + mandatory = 0; + break; + } + + for (r = 0; r < hw->wiphy->bands[i]->n_bitrates; r++) { + struct ieee80211_rate *rate; + + rate = &hw->wiphy->bands[i]->bitrates[r]; + + if (!(rate->flags & mandatory)) + continue; + sta->sta.deflink.supp_rates[i] |= BIT(r); + } + } + + sta->cparams.ce_threshold = CODEL_DISABLED_THRESHOLD; + sta->cparams.target = MS2TIME(20); + sta->cparams.interval = MS2TIME(100); + sta->cparams.ecn = true; + sta->cparams.ce_threshold_selector = 0; + sta->cparams.ce_threshold_mask = 0; + + sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); + + return sta; + +free_txq: + kfree(to_txq_info(sta->sta.txq[0])); +free: + sta_info_free_link(&sta->deflink); +#ifdef CONFIG_MAC80211_MESH + kfree(sta->mesh); +#endif + kfree(sta); + return NULL; +} + +struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, + const u8 *addr, gfp_t gfp) +{ + return __sta_info_alloc(sdata, addr, -1, addr, gfp); +} + +struct sta_info *sta_info_alloc_with_link(struct ieee80211_sub_if_data *sdata, + const u8 *mld_addr, + unsigned int link_id, + const u8 *link_addr, + gfp_t gfp) +{ + return __sta_info_alloc(sdata, mld_addr, link_id, link_addr, gfp); +} + +static int sta_info_insert_check(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + + /* + * Can't be a WARN_ON because it can be triggered through a race: + * something inserts a STA (on one CPU) without holding the RTNL + * and another CPU turns off the net device. + */ + if (unlikely(!ieee80211_sdata_running(sdata))) + return -ENETDOWN; + + if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) || + !is_valid_ether_addr(sta->sta.addr))) + return -EINVAL; + + /* The RCU read lock is required by rhashtable due to + * asynchronous resize/rehash. We also require the mutex + * for correctness. + */ + rcu_read_lock(); + lockdep_assert_held(&sdata->local->sta_mtx); + if (ieee80211_hw_check(&sdata->local->hw, NEEDS_UNIQUE_STA_ADDR) && + ieee80211_find_sta_by_ifaddr(&sdata->local->hw, sta->addr, NULL)) { + rcu_read_unlock(); + return -ENOTUNIQ; + } + rcu_read_unlock(); + + return 0; +} + +static int sta_info_insert_drv_state(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct sta_info *sta) +{ + enum ieee80211_sta_state state; + int err = 0; + + for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) { + err = drv_sta_state(local, sdata, sta, state, state + 1); + if (err) + break; + } + + if (!err) { + /* + * Drivers using legacy sta_add/sta_remove callbacks only + * get uploaded set to true after sta_add is called. + */ + if (!local->ops->sta_add) + sta->uploaded = true; + return 0; + } + + if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { + sdata_info(sdata, + "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n", + sta->sta.addr, state + 1, err); + err = 0; + } + + /* unwind on error */ + for (; state > IEEE80211_STA_NOTEXIST; state--) + WARN_ON(drv_sta_state(local, sdata, sta, state, state - 1)); + + return err; +} + +static void +ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + bool allow_p2p_go_ps = sdata->vif.p2p; + struct sta_info *sta; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sdata != sta->sdata || + !test_sta_flag(sta, WLAN_STA_ASSOC)) + continue; + if (!sta->sta.support_p2p_ps) { + allow_p2p_go_ps = false; + break; + } + } + rcu_read_unlock(); + + if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) { + sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps; + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_P2P_PS); + } +} + +/* + * should be called with sta_mtx locked + * this function replaces the mutex lock + * with a RCU lock + */ +static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) +{ + struct ieee80211_local *local = sta->local; + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct station_info *sinfo = NULL; + int err = 0; + + lockdep_assert_held(&local->sta_mtx); + + /* check if STA exists already */ + if (sta_info_get_bss(sdata, sta->sta.addr)) { + err = -EEXIST; + goto out_cleanup; + } + + sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL); + if (!sinfo) { + err = -ENOMEM; + goto out_cleanup; + } + + local->num_sta++; + local->sta_generation++; + smp_mb(); + + /* simplify things and don't accept BA sessions yet */ + set_sta_flag(sta, WLAN_STA_BLOCK_BA); + + /* make the station visible */ + err = sta_info_hash_add(local, sta); + if (err) + goto out_drop_sta; + + if (sta->sta.valid_links) { + err = link_sta_info_hash_add(local, &sta->deflink); + if (err) { + sta_info_hash_del(local, sta); + goto out_drop_sta; + } + } + + list_add_tail_rcu(&sta->list, &local->sta_list); + + /* update channel context before notifying the driver about state + * change, this enables driver using the updated channel context right away. + */ + if (sta->sta_state >= IEEE80211_STA_ASSOC) { + ieee80211_recalc_min_chandef(sta->sdata, -1); + if (!sta->sta.support_p2p_ps) + ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); + } + + /* notify driver */ + err = sta_info_insert_drv_state(local, sdata, sta); + if (err) + goto out_remove; + + set_sta_flag(sta, WLAN_STA_INSERTED); + + /* accept BA sessions now */ + clear_sta_flag(sta, WLAN_STA_BLOCK_BA); + + ieee80211_sta_debugfs_add(sta); + rate_control_add_sta_debugfs(sta); + if (sta->sta.valid_links) { + int i; + + for (i = 0; i < ARRAY_SIZE(sta->link); i++) { + struct link_sta_info *link_sta; + + link_sta = rcu_dereference_protected(sta->link[i], + lockdep_is_held(&local->sta_mtx)); + + if (!link_sta) + continue; + + ieee80211_link_sta_debugfs_add(link_sta); + if (sdata->vif.active_links & BIT(i)) + ieee80211_link_sta_debugfs_drv_add(link_sta); + } + } else { + ieee80211_link_sta_debugfs_add(&sta->deflink); + ieee80211_link_sta_debugfs_drv_add(&sta->deflink); + } + + sinfo->generation = local->sta_generation; + cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); + kfree(sinfo); + + sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr); + + /* move reference to rcu-protected */ + rcu_read_lock(); + mutex_unlock(&local->sta_mtx); + + if (ieee80211_vif_is_mesh(&sdata->vif)) + mesh_accept_plinks_update(sdata); + + return 0; + out_remove: + if (sta->sta.valid_links) + link_sta_info_hash_del(local, &sta->deflink); + sta_info_hash_del(local, sta); + list_del_rcu(&sta->list); + out_drop_sta: + local->num_sta--; + synchronize_net(); + out_cleanup: + cleanup_single_sta(sta); + mutex_unlock(&local->sta_mtx); + kfree(sinfo); + rcu_read_lock(); + return err; +} + +int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) +{ + struct ieee80211_local *local = sta->local; + int err; + + might_sleep(); + + mutex_lock(&local->sta_mtx); + + err = sta_info_insert_check(sta); + if (err) { + sta_info_free(local, sta); + mutex_unlock(&local->sta_mtx); + rcu_read_lock(); + return err; + } + + return sta_info_insert_finish(sta); +} + +int sta_info_insert(struct sta_info *sta) +{ + int err = sta_info_insert_rcu(sta); + + rcu_read_unlock(); + + return err; +} + +static inline void __bss_tim_set(u8 *tim, u16 id) +{ + /* + * This format has been mandated by the IEEE specifications, + * so this line may not be changed to use the __set_bit() format. + */ + tim[id / 8] |= (1 << (id % 8)); +} + +static inline void __bss_tim_clear(u8 *tim, u16 id) +{ + /* + * This format has been mandated by the IEEE specifications, + * so this line may not be changed to use the __clear_bit() format. + */ + tim[id / 8] &= ~(1 << (id % 8)); +} + +static inline bool __bss_tim_get(u8 *tim, u16 id) +{ + /* + * This format has been mandated by the IEEE specifications, + * so this line may not be changed to use the test_bit() format. + */ + return tim[id / 8] & (1 << (id % 8)); +} + +static unsigned long ieee80211_tids_for_ac(int ac) +{ + /* If we ever support TIDs > 7, this obviously needs to be adjusted */ + switch (ac) { + case IEEE80211_AC_VO: + return BIT(6) | BIT(7); + case IEEE80211_AC_VI: + return BIT(4) | BIT(5); + case IEEE80211_AC_BE: + return BIT(0) | BIT(3); + case IEEE80211_AC_BK: + return BIT(1) | BIT(2); + default: + WARN_ON(1); + return 0; + } +} + +static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending) +{ + struct ieee80211_local *local = sta->local; + struct ps_data *ps; + bool indicate_tim = false; + u8 ignore_for_tim = sta->sta.uapsd_queues; + int ac; + u16 id = sta->sta.aid; + + if (sta->sdata->vif.type == NL80211_IFTYPE_AP || + sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + if (WARN_ON_ONCE(!sta->sdata->bss)) + return; + + ps = &sta->sdata->bss->ps; +#ifdef CONFIG_MAC80211_MESH + } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) { + ps = &sta->sdata->u.mesh.ps; +#endif + } else { + return; + } + + /* No need to do anything if the driver does all */ + if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim) + return; + + if (sta->dead) + goto done; + + /* + * If all ACs are delivery-enabled then we should build + * the TIM bit for all ACs anyway; if only some are then + * we ignore those and build the TIM bit using only the + * non-enabled ones. + */ + if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1) + ignore_for_tim = 0; + + if (ignore_pending) + ignore_for_tim = BIT(IEEE80211_NUM_ACS) - 1; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + unsigned long tids; + + if (ignore_for_tim & ieee80211_ac_to_qos_mask[ac]) + continue; + + indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) || + !skb_queue_empty(&sta->ps_tx_buf[ac]); + if (indicate_tim) + break; + + tids = ieee80211_tids_for_ac(ac); + + indicate_tim |= + sta->driver_buffered_tids & tids; + indicate_tim |= + sta->txq_buffered_tids & tids; + } + + done: + spin_lock_bh(&local->tim_lock); + + if (indicate_tim == __bss_tim_get(ps->tim, id)) + goto out_unlock; + + if (indicate_tim) + __bss_tim_set(ps->tim, id); + else + __bss_tim_clear(ps->tim, id); + + if (local->ops->set_tim && !WARN_ON(sta->dead)) { + local->tim_in_locked_section = true; + drv_set_tim(local, &sta->sta, indicate_tim); + local->tim_in_locked_section = false; + } + +out_unlock: + spin_unlock_bh(&local->tim_lock); +} + +void sta_info_recalc_tim(struct sta_info *sta) +{ + __sta_info_recalc_tim(sta, false); +} + +static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info; + int timeout; + + if (!skb) + return false; + + info = IEEE80211_SKB_CB(skb); + + /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ + timeout = (sta->listen_interval * + sta->sdata->vif.bss_conf.beacon_int * + 32 / 15625) * HZ; + if (timeout < STA_TX_BUFFER_EXPIRE) + timeout = STA_TX_BUFFER_EXPIRE; + return time_after(jiffies, info->control.jiffies + timeout); +} + + +static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local, + struct sta_info *sta, int ac) +{ + unsigned long flags; + struct sk_buff *skb; + + /* + * First check for frames that should expire on the filtered + * queue. Frames here were rejected by the driver and are on + * a separate queue to avoid reordering with normal PS-buffered + * frames. They also aren't accounted for right now in the + * total_ps_buffered counter. + */ + for (;;) { + spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); + skb = skb_peek(&sta->tx_filtered[ac]); + if (sta_info_buffer_expired(sta, skb)) + skb = __skb_dequeue(&sta->tx_filtered[ac]); + else + skb = NULL; + spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); + + /* + * Frames are queued in order, so if this one + * hasn't expired yet we can stop testing. If + * we actually reached the end of the queue we + * also need to stop, of course. + */ + if (!skb) + break; + ieee80211_free_txskb(&local->hw, skb); + } + + /* + * Now also check the normal PS-buffered queue, this will + * only find something if the filtered queue was emptied + * since the filtered frames are all before the normal PS + * buffered frames. + */ + for (;;) { + spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); + skb = skb_peek(&sta->ps_tx_buf[ac]); + if (sta_info_buffer_expired(sta, skb)) + skb = __skb_dequeue(&sta->ps_tx_buf[ac]); + else + skb = NULL; + spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); + + /* + * frames are queued in order, so if this one + * hasn't expired yet (or we reached the end of + * the queue) we can stop testing + */ + if (!skb) + break; + + local->total_ps_buffered--; + ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n", + sta->sta.addr); + ieee80211_free_txskb(&local->hw, skb); + } + + /* + * Finally, recalculate the TIM bit for this station -- it might + * now be clear because the station was too slow to retrieve its + * frames. + */ + sta_info_recalc_tim(sta); + + /* + * Return whether there are any frames still buffered, this is + * used to check whether the cleanup timer still needs to run, + * if there are no frames we don't need to rearm the timer. + */ + return !(skb_queue_empty(&sta->ps_tx_buf[ac]) && + skb_queue_empty(&sta->tx_filtered[ac])); +} + +static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, + struct sta_info *sta) +{ + bool have_buffered = false; + int ac; + + /* This is only necessary for stations on BSS/MBSS interfaces */ + if (!sta->sdata->bss && + !ieee80211_vif_is_mesh(&sta->sdata->vif)) + return false; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + have_buffered |= + sta_info_cleanup_expire_buffered_ac(local, sta, ac); + + return have_buffered; +} + +static int __must_check __sta_info_destroy_part1(struct sta_info *sta) +{ + struct ieee80211_local *local; + struct ieee80211_sub_if_data *sdata; + int ret, i; + + might_sleep(); + + if (!sta) + return -ENOENT; + + local = sta->local; + sdata = sta->sdata; + + lockdep_assert_held(&local->sta_mtx); + + /* + * Before removing the station from the driver and + * rate control, it might still start new aggregation + * sessions -- block that to make sure the tear-down + * will be sufficient. + */ + set_sta_flag(sta, WLAN_STA_BLOCK_BA); + ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA); + + /* + * Before removing the station from the driver there might be pending + * rx frames on RSS queues sent prior to the disassociation - wait for + * all such frames to be processed. + */ + drv_sync_rx_queues(local, sta); + + for (i = 0; i < ARRAY_SIZE(sta->link); i++) { + struct link_sta_info *link_sta; + + if (!(sta->sta.valid_links & BIT(i))) + continue; + + link_sta = rcu_dereference_protected(sta->link[i], + lockdep_is_held(&local->sta_mtx)); + + link_sta_info_hash_del(local, link_sta); + } + + ret = sta_info_hash_del(local, sta); + if (WARN_ON(ret)) + return ret; + + /* + * for TDLS peers, make sure to return to the base channel before + * removal. + */ + if (test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) { + drv_tdls_cancel_channel_switch(local, sdata, &sta->sta); + clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); + } + + list_del_rcu(&sta->list); + sta->removed = true; + + if (sta->uploaded) + drv_sta_pre_rcu_remove(local, sta->sdata, sta); + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + rcu_access_pointer(sdata->u.vlan.sta) == sta) + RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); + + return 0; +} + +static int _sta_info_move_state(struct sta_info *sta, + enum ieee80211_sta_state new_state, + bool recalc) +{ + might_sleep(); + + if (sta->sta_state == new_state) + return 0; + + /* check allowed transitions first */ + + switch (new_state) { + case IEEE80211_STA_NONE: + if (sta->sta_state != IEEE80211_STA_AUTH) + return -EINVAL; + break; + case IEEE80211_STA_AUTH: + if (sta->sta_state != IEEE80211_STA_NONE && + sta->sta_state != IEEE80211_STA_ASSOC) + return -EINVAL; + break; + case IEEE80211_STA_ASSOC: + if (sta->sta_state != IEEE80211_STA_AUTH && + sta->sta_state != IEEE80211_STA_AUTHORIZED) + return -EINVAL; + break; + case IEEE80211_STA_AUTHORIZED: + if (sta->sta_state != IEEE80211_STA_ASSOC) + return -EINVAL; + break; + default: + WARN(1, "invalid state %d", new_state); + return -EINVAL; + } + + sta_dbg(sta->sdata, "moving STA %pM to state %d\n", + sta->sta.addr, new_state); + + /* notify the driver before the actual changes so it can + * fail the transition + */ + if (test_sta_flag(sta, WLAN_STA_INSERTED)) { + int err = drv_sta_state(sta->local, sta->sdata, sta, + sta->sta_state, new_state); + if (err) + return err; + } + + /* reflect the change in all state variables */ + + switch (new_state) { + case IEEE80211_STA_NONE: + if (sta->sta_state == IEEE80211_STA_AUTH) + clear_bit(WLAN_STA_AUTH, &sta->_flags); + break; + case IEEE80211_STA_AUTH: + if (sta->sta_state == IEEE80211_STA_NONE) { + set_bit(WLAN_STA_AUTH, &sta->_flags); + } else if (sta->sta_state == IEEE80211_STA_ASSOC) { + clear_bit(WLAN_STA_ASSOC, &sta->_flags); + if (recalc) { + ieee80211_recalc_min_chandef(sta->sdata, -1); + if (!sta->sta.support_p2p_ps) + ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); + } + } + break; + case IEEE80211_STA_ASSOC: + if (sta->sta_state == IEEE80211_STA_AUTH) { + set_bit(WLAN_STA_ASSOC, &sta->_flags); + sta->assoc_at = ktime_get_boottime_ns(); + if (recalc) { + ieee80211_recalc_min_chandef(sta->sdata, -1); + if (!sta->sta.support_p2p_ps) + ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); + } + } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { + ieee80211_vif_dec_num_mcast(sta->sdata); + clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); + ieee80211_clear_fast_xmit(sta); + ieee80211_clear_fast_rx(sta); + } + break; + case IEEE80211_STA_AUTHORIZED: + if (sta->sta_state == IEEE80211_STA_ASSOC) { + ieee80211_vif_inc_num_mcast(sta->sdata); + set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); + ieee80211_check_fast_xmit(sta); + ieee80211_check_fast_rx(sta); + } + if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + sta->sdata->vif.type == NL80211_IFTYPE_AP) + cfg80211_send_layer2_update(sta->sdata->dev, + sta->sta.addr); + break; + default: + break; + } + + sta->sta_state = new_state; + + return 0; +} + +int sta_info_move_state(struct sta_info *sta, + enum ieee80211_sta_state new_state) +{ + return _sta_info_move_state(sta, new_state, true); +} + +static void __sta_info_destroy_part2(struct sta_info *sta, bool recalc) +{ + struct ieee80211_local *local = sta->local; + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct station_info *sinfo; + int ret; + + /* + * NOTE: This assumes at least synchronize_net() was done + * after _part1 and before _part2! + */ + + might_sleep(); + lockdep_assert_held(&local->sta_mtx); + + if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { + ret = _sta_info_move_state(sta, IEEE80211_STA_ASSOC, recalc); + WARN_ON_ONCE(ret); + } + + /* Flush queues before removing keys, as that might remove them + * from hardware, and then depending on the offload method, any + * frames sitting on hardware queues might be sent out without + * any encryption at all. + */ + if (local->ops->set_key) { + if (local->ops->flush_sta) + drv_flush_sta(local, sta->sdata, sta); + else + ieee80211_flush_queues(local, sta->sdata, false); + } + + /* now keys can no longer be reached */ + ieee80211_free_sta_keys(local, sta); + + /* disable TIM bit - last chance to tell driver */ + __sta_info_recalc_tim(sta, true); + + sta->dead = true; + + local->num_sta--; + local->sta_generation++; + + while (sta->sta_state > IEEE80211_STA_NONE) { + ret = _sta_info_move_state(sta, sta->sta_state - 1, recalc); + if (ret) { + WARN_ON_ONCE(1); + break; + } + } + + if (sta->uploaded) { + ret = drv_sta_state(local, sdata, sta, IEEE80211_STA_NONE, + IEEE80211_STA_NOTEXIST); + WARN_ON_ONCE(ret != 0); + } + + sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr); + + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (sinfo) + sta_set_sinfo(sta, sinfo, true); + cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); + kfree(sinfo); + + ieee80211_sta_debugfs_remove(sta); + + ieee80211_destroy_frag_cache(&sta->frags); + + cleanup_single_sta(sta); +} + +int __must_check __sta_info_destroy(struct sta_info *sta) +{ + int err = __sta_info_destroy_part1(sta); + + if (err) + return err; + + synchronize_net(); + + __sta_info_destroy_part2(sta, true); + + return 0; +} + +int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr) +{ + struct sta_info *sta; + int ret; + + mutex_lock(&sdata->local->sta_mtx); + sta = sta_info_get(sdata, addr); + ret = __sta_info_destroy(sta); + mutex_unlock(&sdata->local->sta_mtx); + + return ret; +} + +int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, + const u8 *addr) +{ + struct sta_info *sta; + int ret; + + mutex_lock(&sdata->local->sta_mtx); + sta = sta_info_get_bss(sdata, addr); + ret = __sta_info_destroy(sta); + mutex_unlock(&sdata->local->sta_mtx); + + return ret; +} + +static void sta_info_cleanup(struct timer_list *t) +{ + struct ieee80211_local *local = from_timer(local, t, sta_cleanup); + struct sta_info *sta; + bool timer_needed = false; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &local->sta_list, list) + if (sta_info_cleanup_expire_buffered(local, sta)) + timer_needed = true; + rcu_read_unlock(); + + if (local->quiescing) + return; + + if (!timer_needed) + return; + + mod_timer(&local->sta_cleanup, + round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); +} + +int sta_info_init(struct ieee80211_local *local) +{ + int err; + + err = rhltable_init(&local->sta_hash, &sta_rht_params); + if (err) + return err; + + err = rhltable_init(&local->link_sta_hash, &link_sta_rht_params); + if (err) { + rhltable_destroy(&local->sta_hash); + return err; + } + + spin_lock_init(&local->tim_lock); + mutex_init(&local->sta_mtx); + INIT_LIST_HEAD(&local->sta_list); + + timer_setup(&local->sta_cleanup, sta_info_cleanup, 0); + return 0; +} + +void sta_info_stop(struct ieee80211_local *local) +{ + del_timer_sync(&local->sta_cleanup); + rhltable_destroy(&local->sta_hash); + rhltable_destroy(&local->link_sta_hash); +} + + +int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta, *tmp; + LIST_HEAD(free_list); + int ret = 0; + + might_sleep(); + + WARN_ON(vlans && sdata->vif.type != NL80211_IFTYPE_AP); + WARN_ON(vlans && !sdata->bss); + + mutex_lock(&local->sta_mtx); + list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { + if (sdata == sta->sdata || + (vlans && sdata->bss == sta->sdata->bss)) { + if (!WARN_ON(__sta_info_destroy_part1(sta))) + list_add(&sta->free_list, &free_list); + ret++; + } + } + + if (!list_empty(&free_list)) { + bool support_p2p_ps = true; + + synchronize_net(); + list_for_each_entry_safe(sta, tmp, &free_list, free_list) { + if (!sta->sta.support_p2p_ps) + support_p2p_ps = false; + __sta_info_destroy_part2(sta, false); + } + + ieee80211_recalc_min_chandef(sdata, -1); + if (!support_p2p_ps) + ieee80211_recalc_p2p_go_ps_allowed(sdata); + } + mutex_unlock(&local->sta_mtx); + + return ret; +} + +void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, + unsigned long exp_time) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta, *tmp; + + mutex_lock(&local->sta_mtx); + + list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { + unsigned long last_active = ieee80211_sta_last_active(sta); + + if (sdata != sta->sdata) + continue; + + if (time_is_before_jiffies(last_active + exp_time)) { + sta_dbg(sta->sdata, "expiring inactive STA %pM\n", + sta->sta.addr); + + if (ieee80211_vif_is_mesh(&sdata->vif) && + test_sta_flag(sta, WLAN_STA_PS_STA)) + atomic_dec(&sdata->u.mesh.ps.num_sta_ps); + + WARN_ON(__sta_info_destroy(sta)); + } + } + + mutex_unlock(&local->sta_mtx); +} + +struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, + const u8 *addr, + const u8 *localaddr) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct rhlist_head *tmp; + struct sta_info *sta; + + /* + * Just return a random station if localaddr is NULL + * ... first in list. + */ + for_each_sta_info(local, addr, sta, tmp) { + if (localaddr && + !ether_addr_equal(sta->sdata->vif.addr, localaddr)) + continue; + if (!sta->uploaded) + return NULL; + return &sta->sta; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_ifaddr); + +struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, + const u8 *addr) +{ + struct sta_info *sta; + + if (!vif) + return NULL; + + sta = sta_info_get_bss(vif_to_sdata(vif), addr); + if (!sta) + return NULL; + + if (!sta->uploaded) + return NULL; + + return &sta->sta; +} +EXPORT_SYMBOL(ieee80211_find_sta); + +/* powersave support code */ +void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct sk_buff_head pending; + int filtered = 0, buffered = 0, ac, i; + unsigned long flags; + struct ps_data *ps; + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, + u.ap); + + if (sdata->vif.type == NL80211_IFTYPE_AP) + ps = &sdata->bss->ps; + else if (ieee80211_vif_is_mesh(&sdata->vif)) + ps = &sdata->u.mesh.ps; + else + return; + + clear_sta_flag(sta, WLAN_STA_SP); + + BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1); + sta->driver_buffered_tids = 0; + sta->txq_buffered_tids = 0; + + if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) + drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); + + for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { + if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i])) + continue; + + schedule_and_wake_txq(local, to_txq_info(sta->sta.txq[i])); + } + + skb_queue_head_init(&pending); + + /* sync with ieee80211_tx_h_unicast_ps_buf */ + spin_lock(&sta->ps_lock); + /* Send all buffered frames to the station */ + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + int count = skb_queue_len(&pending), tmp; + + spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); + skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); + spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); + tmp = skb_queue_len(&pending); + filtered += tmp - count; + count = tmp; + + spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); + skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); + spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); + tmp = skb_queue_len(&pending); + buffered += tmp - count; + } + + ieee80211_add_pending_skbs(local, &pending); + + /* now we're no longer in the deliver code */ + clear_sta_flag(sta, WLAN_STA_PS_DELIVER); + + /* The station might have polled and then woken up before we responded, + * so clear these flags now to avoid them sticking around. + */ + clear_sta_flag(sta, WLAN_STA_PSPOLL); + clear_sta_flag(sta, WLAN_STA_UAPSD); + spin_unlock(&sta->ps_lock); + + atomic_dec(&ps->num_sta_ps); + + local->total_ps_buffered -= buffered; + + sta_info_recalc_tim(sta); + + ps_dbg(sdata, + "STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n", + sta->sta.addr, sta->sta.aid, filtered, buffered); + + ieee80211_check_fast_xmit(sta); +} + +static void ieee80211_send_null_response(struct sta_info *sta, int tid, + enum ieee80211_frame_release_type reason, + bool call_driver, bool more_data) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_qos_hdr *nullfunc; + struct sk_buff *skb; + int size = sizeof(*nullfunc); + __le16 fc; + bool qos = sta->sta.wme; + struct ieee80211_tx_info *info; + struct ieee80211_chanctx_conf *chanctx_conf; + + if (qos) { + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_NULLFUNC | + IEEE80211_FCTL_FROMDS); + } else { + size -= 2; + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_NULLFUNC | + IEEE80211_FCTL_FROMDS); + } + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); + if (!skb) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom); + + nullfunc = skb_put(skb, size); + nullfunc->frame_control = fc; + nullfunc->duration_id = 0; + memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); + memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); + memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); + nullfunc->seq_ctrl = 0; + + skb->priority = tid; + skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); + if (qos) { + nullfunc->qos_ctrl = cpu_to_le16(tid); + + if (reason == IEEE80211_FRAME_RELEASE_UAPSD) { + nullfunc->qos_ctrl |= + cpu_to_le16(IEEE80211_QOS_CTL_EOSP); + if (more_data) + nullfunc->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } + } + + info = IEEE80211_SKB_CB(skb); + + /* + * Tell TX path to send this frame even though the + * STA may still remain is PS mode after this frame + * exchange. Also set EOSP to indicate this packet + * ends the poll/service period. + */ + info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | + IEEE80211_TX_STATUS_EOSP | + IEEE80211_TX_CTL_REQ_TX_STATUS; + + info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; + + if (call_driver) + drv_allow_buffered_frames(local, sta, BIT(tid), 1, + reason, false); + + skb->dev = sdata->dev; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + if (WARN_ON(!chanctx_conf)) { + rcu_read_unlock(); + kfree_skb(skb); + return; + } + + info->band = chanctx_conf->def.chan->band; + ieee80211_xmit(sdata, sta, skb); + rcu_read_unlock(); +} + +static int find_highest_prio_tid(unsigned long tids) +{ + /* lower 3 TIDs aren't ordered perfectly */ + if (tids & 0xF8) + return fls(tids) - 1; + /* TID 0 is BE just like TID 3 */ + if (tids & BIT(0)) + return 0; + return fls(tids) - 1; +} + +/* Indicates if the MORE_DATA bit should be set in the last + * frame obtained by ieee80211_sta_ps_get_frames. + * Note that driver_release_tids is relevant only if + * reason = IEEE80211_FRAME_RELEASE_PSPOLL + */ +static bool +ieee80211_sta_ps_more_data(struct sta_info *sta, u8 ignored_acs, + enum ieee80211_frame_release_type reason, + unsigned long driver_release_tids) +{ + int ac; + + /* If the driver has data on more than one TID then + * certainly there's more data if we release just a + * single frame now (from a single TID). This will + * only happen for PS-Poll. + */ + if (reason == IEEE80211_FRAME_RELEASE_PSPOLL && + hweight16(driver_release_tids) > 1) + return true; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) + continue; + + if (!skb_queue_empty(&sta->tx_filtered[ac]) || + !skb_queue_empty(&sta->ps_tx_buf[ac])) + return true; + } + + return false; +} + +static void +ieee80211_sta_ps_get_frames(struct sta_info *sta, int n_frames, u8 ignored_acs, + enum ieee80211_frame_release_type reason, + struct sk_buff_head *frames, + unsigned long *driver_release_tids) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + int ac; + + /* Get response frame(s) and more data bit for the last one. */ + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + unsigned long tids; + + if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) + continue; + + tids = ieee80211_tids_for_ac(ac); + + /* if we already have frames from software, then we can't also + * release from hardware queues + */ + if (skb_queue_empty(frames)) { + *driver_release_tids |= + sta->driver_buffered_tids & tids; + *driver_release_tids |= sta->txq_buffered_tids & tids; + } + + if (!*driver_release_tids) { + struct sk_buff *skb; + + while (n_frames > 0) { + skb = skb_dequeue(&sta->tx_filtered[ac]); + if (!skb) { + skb = skb_dequeue( + &sta->ps_tx_buf[ac]); + if (skb) + local->total_ps_buffered--; + } + if (!skb) + break; + n_frames--; + __skb_queue_tail(frames, skb); + } + } + + /* If we have more frames buffered on this AC, then abort the + * loop since we can't send more data from other ACs before + * the buffered frames from this. + */ + if (!skb_queue_empty(&sta->tx_filtered[ac]) || + !skb_queue_empty(&sta->ps_tx_buf[ac])) + break; + } +} + +static void +ieee80211_sta_ps_deliver_response(struct sta_info *sta, + int n_frames, u8 ignored_acs, + enum ieee80211_frame_release_type reason) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + unsigned long driver_release_tids = 0; + struct sk_buff_head frames; + bool more_data; + + /* Service or PS-Poll period starts */ + set_sta_flag(sta, WLAN_STA_SP); + + __skb_queue_head_init(&frames); + + ieee80211_sta_ps_get_frames(sta, n_frames, ignored_acs, reason, + &frames, &driver_release_tids); + + more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids); + + if (driver_release_tids && reason == IEEE80211_FRAME_RELEASE_PSPOLL) + driver_release_tids = + BIT(find_highest_prio_tid(driver_release_tids)); + + if (skb_queue_empty(&frames) && !driver_release_tids) { + int tid, ac; + + /* + * For PS-Poll, this can only happen due to a race condition + * when we set the TIM bit and the station notices it, but + * before it can poll for the frame we expire it. + * + * For uAPSD, this is said in the standard (11.2.1.5 h): + * At each unscheduled SP for a non-AP STA, the AP shall + * attempt to transmit at least one MSDU or MMPDU, but no + * more than the value specified in the Max SP Length field + * in the QoS Capability element from delivery-enabled ACs, + * that are destined for the non-AP STA. + * + * Since we have no other MSDU/MMPDU, transmit a QoS null frame. + */ + + /* This will evaluate to 1, 3, 5 or 7. */ + for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) + if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac])) + break; + tid = 7 - 2 * ac; + + ieee80211_send_null_response(sta, tid, reason, true, false); + } else if (!driver_release_tids) { + struct sk_buff_head pending; + struct sk_buff *skb; + int num = 0; + u16 tids = 0; + bool need_null = false; + + skb_queue_head_init(&pending); + + while ((skb = __skb_dequeue(&frames))) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *) skb->data; + u8 *qoshdr = NULL; + + num++; + + /* + * Tell TX path to send this frame even though the + * STA may still remain is PS mode after this frame + * exchange. + */ + info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; + info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; + + /* + * Use MoreData flag to indicate whether there are + * more buffered frames for this STA + */ + if (more_data || !skb_queue_empty(&frames)) + hdr->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + else + hdr->frame_control &= + cpu_to_le16(~IEEE80211_FCTL_MOREDATA); + + if (ieee80211_is_data_qos(hdr->frame_control) || + ieee80211_is_qos_nullfunc(hdr->frame_control)) + qoshdr = ieee80211_get_qos_ctl(hdr); + + tids |= BIT(skb->priority); + + __skb_queue_tail(&pending, skb); + + /* end service period after last frame or add one */ + if (!skb_queue_empty(&frames)) + continue; + + if (reason != IEEE80211_FRAME_RELEASE_UAPSD) { + /* for PS-Poll, there's only one frame */ + info->flags |= IEEE80211_TX_STATUS_EOSP | + IEEE80211_TX_CTL_REQ_TX_STATUS; + break; + } + + /* For uAPSD, things are a bit more complicated. If the + * last frame has a QoS header (i.e. is a QoS-data or + * QoS-nulldata frame) then just set the EOSP bit there + * and be done. + * If the frame doesn't have a QoS header (which means + * it should be a bufferable MMPDU) then we can't set + * the EOSP bit in the QoS header; add a QoS-nulldata + * frame to the list to send it after the MMPDU. + * + * Note that this code is only in the mac80211-release + * code path, we assume that the driver will not buffer + * anything but QoS-data frames, or if it does, will + * create the QoS-nulldata frame by itself if needed. + * + * Cf. 802.11-2012 10.2.1.10 (c). + */ + if (qoshdr) { + *qoshdr |= IEEE80211_QOS_CTL_EOSP; + + info->flags |= IEEE80211_TX_STATUS_EOSP | + IEEE80211_TX_CTL_REQ_TX_STATUS; + } else { + /* The standard isn't completely clear on this + * as it says the more-data bit should be set + * if there are more BUs. The QoS-Null frame + * we're about to send isn't buffered yet, we + * only create it below, but let's pretend it + * was buffered just in case some clients only + * expect more-data=0 when eosp=1. + */ + hdr->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + need_null = true; + num++; + } + break; + } + + drv_allow_buffered_frames(local, sta, tids, num, + reason, more_data); + + ieee80211_add_pending_skbs(local, &pending); + + if (need_null) + ieee80211_send_null_response( + sta, find_highest_prio_tid(tids), + reason, false, false); + + sta_info_recalc_tim(sta); + } else { + int tid; + + /* + * We need to release a frame that is buffered somewhere in the + * driver ... it'll have to handle that. + * Note that the driver also has to check the number of frames + * on the TIDs we're releasing from - if there are more than + * n_frames it has to set the more-data bit (if we didn't ask + * it to set it anyway due to other buffered frames); if there + * are fewer than n_frames it has to make sure to adjust that + * to allow the service period to end properly. + */ + drv_release_buffered_frames(local, sta, driver_release_tids, + n_frames, reason, more_data); + + /* + * Note that we don't recalculate the TIM bit here as it would + * most likely have no effect at all unless the driver told us + * that the TID(s) became empty before returning here from the + * release function. + * Either way, however, when the driver tells us that the TID(s) + * became empty or we find that a txq became empty, we'll do the + * TIM recalculation. + */ + + for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { + if (!sta->sta.txq[tid] || + !(driver_release_tids & BIT(tid)) || + txq_has_queue(sta->sta.txq[tid])) + continue; + + sta_info_recalc_tim(sta); + break; + } + } +} + +void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta) +{ + u8 ignore_for_response = sta->sta.uapsd_queues; + + /* + * If all ACs are delivery-enabled then we should reply + * from any of them, if only some are enabled we reply + * only from the non-enabled ones. + */ + if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1) + ignore_for_response = 0; + + ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response, + IEEE80211_FRAME_RELEASE_PSPOLL); +} + +void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta) +{ + int n_frames = sta->sta.max_sp; + u8 delivery_enabled = sta->sta.uapsd_queues; + + /* + * If we ever grow support for TSPEC this might happen if + * the TSPEC update from hostapd comes in between a trigger + * frame setting WLAN_STA_UAPSD in the RX path and this + * actually getting called. + */ + if (!delivery_enabled) + return; + + switch (sta->sta.max_sp) { + case 1: + n_frames = 2; + break; + case 2: + n_frames = 4; + break; + case 3: + n_frames = 6; + break; + case 0: + /* XXX: what is a good value? */ + n_frames = 128; + break; + } + + ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled, + IEEE80211_FRAME_RELEASE_UAPSD); +} + +void ieee80211_sta_block_awake(struct ieee80211_hw *hw, + struct ieee80211_sta *pubsta, bool block) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + + trace_api_sta_block_awake(sta->local, pubsta, block); + + if (block) { + set_sta_flag(sta, WLAN_STA_PS_DRIVER); + ieee80211_clear_fast_xmit(sta); + return; + } + + if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) + return; + + if (!test_sta_flag(sta, WLAN_STA_PS_STA)) { + set_sta_flag(sta, WLAN_STA_PS_DELIVER); + clear_sta_flag(sta, WLAN_STA_PS_DRIVER); + ieee80211_queue_work(hw, &sta->drv_deliver_wk); + } else if (test_sta_flag(sta, WLAN_STA_PSPOLL) || + test_sta_flag(sta, WLAN_STA_UAPSD)) { + /* must be asleep in this case */ + clear_sta_flag(sta, WLAN_STA_PS_DRIVER); + ieee80211_queue_work(hw, &sta->drv_deliver_wk); + } else { + clear_sta_flag(sta, WLAN_STA_PS_DRIVER); + ieee80211_check_fast_xmit(sta); + } +} +EXPORT_SYMBOL(ieee80211_sta_block_awake); + +void ieee80211_sta_eosp(struct ieee80211_sta *pubsta) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_local *local = sta->local; + + trace_api_eosp(local, pubsta); + + clear_sta_flag(sta, WLAN_STA_SP); +} +EXPORT_SYMBOL(ieee80211_sta_eosp); + +void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + enum ieee80211_frame_release_type reason; + bool more_data; + + trace_api_send_eosp_nullfunc(sta->local, pubsta, tid); + + reason = IEEE80211_FRAME_RELEASE_UAPSD; + more_data = ieee80211_sta_ps_more_data(sta, ~sta->sta.uapsd_queues, + reason, 0); + + ieee80211_send_null_response(sta, tid, reason, false, more_data); +} +EXPORT_SYMBOL(ieee80211_send_eosp_nullfunc); + +void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, + u8 tid, bool buffered) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + + if (WARN_ON(tid >= IEEE80211_NUM_TIDS)) + return; + + trace_api_sta_set_buffered(sta->local, pubsta, tid, buffered); + + if (buffered) + set_bit(tid, &sta->driver_buffered_tids); + else + clear_bit(tid, &sta->driver_buffered_tids); + + sta_info_recalc_tim(sta); +} +EXPORT_SYMBOL(ieee80211_sta_set_buffered); + +void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid, + u32 tx_airtime, u32 rx_airtime) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_local *local = sta->sdata->local; + u8 ac = ieee80211_ac_from_tid(tid); + u32 airtime = 0; + u32 diff; + + if (sta->local->airtime_flags & AIRTIME_USE_TX) + airtime += tx_airtime; + if (sta->local->airtime_flags & AIRTIME_USE_RX) + airtime += rx_airtime; + + spin_lock_bh(&local->active_txq_lock[ac]); + sta->airtime[ac].tx_airtime += tx_airtime; + sta->airtime[ac].rx_airtime += rx_airtime; + + diff = (u32)jiffies - sta->airtime[ac].last_active; + if (diff <= AIRTIME_ACTIVE_DURATION) + sta->airtime[ac].deficit -= airtime; + + spin_unlock_bh(&local->active_txq_lock[ac]); +} +EXPORT_SYMBOL(ieee80211_sta_register_airtime); + +void __ieee80211_sta_recalc_aggregates(struct sta_info *sta, u16 active_links) +{ + bool first = true; + int link_id; + + if (!sta->sta.valid_links || !sta->sta.mlo) { + sta->sta.cur = &sta->sta.deflink.agg; + return; + } + + rcu_read_lock(); + for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) { + struct ieee80211_link_sta *link_sta; + int i; + + if (!(active_links & BIT(link_id))) + continue; + + link_sta = rcu_dereference(sta->sta.link[link_id]); + if (!link_sta) + continue; + + if (first) { + sta->cur = sta->sta.deflink.agg; + first = false; + continue; + } + + sta->cur.max_amsdu_len = + min(sta->cur.max_amsdu_len, + link_sta->agg.max_amsdu_len); + sta->cur.max_rc_amsdu_len = + min(sta->cur.max_rc_amsdu_len, + link_sta->agg.max_rc_amsdu_len); + + for (i = 0; i < ARRAY_SIZE(sta->cur.max_tid_amsdu_len); i++) + sta->cur.max_tid_amsdu_len[i] = + min(sta->cur.max_tid_amsdu_len[i], + link_sta->agg.max_tid_amsdu_len[i]); + } + rcu_read_unlock(); + + sta->sta.cur = &sta->cur; +} + +void ieee80211_sta_recalc_aggregates(struct ieee80211_sta *pubsta) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + + __ieee80211_sta_recalc_aggregates(sta, sta->sdata->vif.active_links); +} +EXPORT_SYMBOL(ieee80211_sta_recalc_aggregates); + +void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, + struct sta_info *sta, u8 ac, + u16 tx_airtime, bool tx_completed) +{ + int tx_pending; + + if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) + return; + + if (!tx_completed) { + if (sta) + atomic_add(tx_airtime, + &sta->airtime[ac].aql_tx_pending); + + atomic_add(tx_airtime, &local->aql_total_pending_airtime); + atomic_add(tx_airtime, &local->aql_ac_pending_airtime[ac]); + return; + } + + if (sta) { + tx_pending = atomic_sub_return(tx_airtime, + &sta->airtime[ac].aql_tx_pending); + if (tx_pending < 0) + atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending, + tx_pending, 0); + } + + atomic_sub(tx_airtime, &local->aql_total_pending_airtime); + tx_pending = atomic_sub_return(tx_airtime, + &local->aql_ac_pending_airtime[ac]); + if (WARN_ONCE(tx_pending < 0, + "Device %s AC %d pending airtime underflow: %u, %u", + wiphy_name(local->hw.wiphy), ac, tx_pending, + tx_airtime)) { + atomic_cmpxchg(&local->aql_ac_pending_airtime[ac], + tx_pending, 0); + atomic_sub(tx_pending, &local->aql_total_pending_airtime); + } +} + +static struct ieee80211_sta_rx_stats * +sta_get_last_rx_stats(struct sta_info *sta) +{ + struct ieee80211_sta_rx_stats *stats = &sta->deflink.rx_stats; + int cpu; + + if (!sta->deflink.pcpu_rx_stats) + return stats; + + for_each_possible_cpu(cpu) { + struct ieee80211_sta_rx_stats *cpustats; + + cpustats = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu); + + if (time_after(cpustats->last_rx, stats->last_rx)) + stats = cpustats; + } + + return stats; +} + +static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, + struct rate_info *rinfo) +{ + rinfo->bw = STA_STATS_GET(BW, rate); + + switch (STA_STATS_GET(TYPE, rate)) { + case STA_STATS_RATE_TYPE_VHT: + rinfo->flags = RATE_INFO_FLAGS_VHT_MCS; + rinfo->mcs = STA_STATS_GET(VHT_MCS, rate); + rinfo->nss = STA_STATS_GET(VHT_NSS, rate); + if (STA_STATS_GET(SGI, rate)) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case STA_STATS_RATE_TYPE_HT: + rinfo->flags = RATE_INFO_FLAGS_MCS; + rinfo->mcs = STA_STATS_GET(HT_MCS, rate); + if (STA_STATS_GET(SGI, rate)) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case STA_STATS_RATE_TYPE_LEGACY: { + struct ieee80211_supported_band *sband; + u16 brate; + unsigned int shift; + int band = STA_STATS_GET(LEGACY_BAND, rate); + int rate_idx = STA_STATS_GET(LEGACY_IDX, rate); + + sband = local->hw.wiphy->bands[band]; + + if (WARN_ON_ONCE(!sband->bitrates)) + break; + + brate = sband->bitrates[rate_idx].bitrate; + if (rinfo->bw == RATE_INFO_BW_5) + shift = 2; + else if (rinfo->bw == RATE_INFO_BW_10) + shift = 1; + else + shift = 0; + rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); + break; + } + case STA_STATS_RATE_TYPE_HE: + rinfo->flags = RATE_INFO_FLAGS_HE_MCS; + rinfo->mcs = STA_STATS_GET(HE_MCS, rate); + rinfo->nss = STA_STATS_GET(HE_NSS, rate); + rinfo->he_gi = STA_STATS_GET(HE_GI, rate); + rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate); + rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate); + break; + case STA_STATS_RATE_TYPE_EHT: + rinfo->flags = RATE_INFO_FLAGS_EHT_MCS; + rinfo->mcs = STA_STATS_GET(EHT_MCS, rate); + rinfo->nss = STA_STATS_GET(EHT_NSS, rate); + rinfo->eht_gi = STA_STATS_GET(EHT_GI, rate); + rinfo->eht_ru_alloc = STA_STATS_GET(EHT_RU, rate); + break; + } +} + +static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) +{ + u32 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate); + + if (rate == STA_STATS_RATE_INVALID) + return -EINVAL; + + sta_stats_decode_rate(sta->local, rate, rinfo); + return 0; +} + +static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats, + int tid) +{ + unsigned int start; + u64 value; + + do { + start = u64_stats_fetch_begin(&rxstats->syncp); + value = rxstats->msdu[tid]; + } while (u64_stats_fetch_retry(&rxstats->syncp, start)); + + return value; +} + +static void sta_set_tidstats(struct sta_info *sta, + struct cfg80211_tid_stats *tidstats, + int tid) +{ + struct ieee80211_local *local = sta->local; + int cpu; + + if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) { + tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->deflink.rx_stats, + tid); + + if (sta->deflink.pcpu_rx_stats) { + for_each_possible_cpu(cpu) { + struct ieee80211_sta_rx_stats *cpurxs; + + cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, + cpu); + tidstats->rx_msdu += + sta_get_tidstats_msdu(cpurxs, tid); + } + } + + tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU); + } + + if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) { + tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU); + tidstats->tx_msdu = sta->deflink.tx_stats.msdu[tid]; + } + + if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) && + ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { + tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES); + tidstats->tx_msdu_retries = sta->deflink.status_stats.msdu_retries[tid]; + } + + if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) && + ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { + tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED); + tidstats->tx_msdu_failed = sta->deflink.status_stats.msdu_failed[tid]; + } + + if (tid < IEEE80211_NUM_TIDS) { + spin_lock_bh(&local->fq.lock); + rcu_read_lock(); + + tidstats->filled |= BIT(NL80211_TID_STATS_TXQ_STATS); + ieee80211_fill_txq_stats(&tidstats->txq_stats, + to_txq_info(sta->sta.txq[tid])); + + rcu_read_unlock(); + spin_unlock_bh(&local->fq.lock); + } +} + +static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) +{ + unsigned int start; + u64 value; + + do { + start = u64_stats_fetch_begin(&rxstats->syncp); + value = rxstats->bytes; + } while (u64_stats_fetch_retry(&rxstats->syncp, start)); + + return value; +} + +void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, + bool tidstats) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + u32 thr = 0; + int i, ac, cpu; + struct ieee80211_sta_rx_stats *last_rxstats; + + last_rxstats = sta_get_last_rx_stats(sta); + + sinfo->generation = sdata->local->sta_generation; + + /* do before driver, so beacon filtering drivers have a + * chance to e.g. just add the number of filtered beacons + * (or just modify the value entirely, of course) + */ + if (sdata->vif.type == NL80211_IFTYPE_STATION) + sinfo->rx_beacon = sdata->deflink.u.mgd.count_beacon_signal; + + drv_sta_statistics(local, sdata, &sta->sta, sinfo); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) | + BIT_ULL(NL80211_STA_INFO_STA_FLAGS) | + BIT_ULL(NL80211_STA_INFO_BSS_PARAM) | + BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) | + BIT_ULL(NL80211_STA_INFO_ASSOC_AT_BOOTTIME) | + BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); + + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + sinfo->beacon_loss_count = + sdata->deflink.u.mgd.beacon_loss_count; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS); + } + + sinfo->connected_time = ktime_get_seconds() - sta->last_connected; + sinfo->assoc_at = sta->assoc_at; + sinfo->inactive_time = + jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta)); + + if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | + BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { + sinfo->tx_bytes = 0; + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + sinfo->tx_bytes += sta->deflink.tx_stats.bytes[ac]; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); + } + + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) { + sinfo->tx_packets = 0; + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + sinfo->tx_packets += sta->deflink.tx_stats.packets[ac]; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); + } + + if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) | + BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) { + sinfo->rx_bytes += sta_get_stats_bytes(&sta->deflink.rx_stats); + + if (sta->deflink.pcpu_rx_stats) { + for_each_possible_cpu(cpu) { + struct ieee80211_sta_rx_stats *cpurxs; + + cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, + cpu); + sinfo->rx_bytes += sta_get_stats_bytes(cpurxs); + } + } + + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); + } + + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) { + sinfo->rx_packets = sta->deflink.rx_stats.packets; + if (sta->deflink.pcpu_rx_stats) { + for_each_possible_cpu(cpu) { + struct ieee80211_sta_rx_stats *cpurxs; + + cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, + cpu); + sinfo->rx_packets += cpurxs->packets; + } + } + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); + } + + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) { + sinfo->tx_retries = sta->deflink.status_stats.retry_count; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); + } + + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) { + sinfo->tx_failed = sta->deflink.status_stats.retry_failed; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); + } + + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_DURATION))) { + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + sinfo->rx_duration += sta->airtime[ac].rx_airtime; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); + } + + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_DURATION))) { + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + sinfo->tx_duration += sta->airtime[ac].tx_airtime; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); + } + + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) { + sinfo->airtime_weight = sta->airtime_weight; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT); + } + + sinfo->rx_dropped_misc = sta->deflink.rx_stats.dropped; + if (sta->deflink.pcpu_rx_stats) { + for_each_possible_cpu(cpu) { + struct ieee80211_sta_rx_stats *cpurxs; + + cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu); + sinfo->rx_dropped_misc += cpurxs->dropped; + } + } + + if (sdata->vif.type == NL80211_IFTYPE_STATION && + !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) | + BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); + sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif); + } + + if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || + ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) { + sinfo->signal = (s8)last_rxstats->last_signal; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); + } + + if (!sta->deflink.pcpu_rx_stats && + !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) { + sinfo->signal_avg = + -ewma_signal_read(&sta->deflink.rx_stats_avg.signal); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); + } + } + + /* for the average - if pcpu_rx_stats isn't set - rxstats must point to + * the sta->rx_stats struct, so the check here is fine with and without + * pcpu statistics + */ + if (last_rxstats->chains && + !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) | + BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); + if (!sta->deflink.pcpu_rx_stats) + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); + + sinfo->chains = last_rxstats->chains; + + for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { + sinfo->chain_signal[i] = + last_rxstats->chain_signal_last[i]; + sinfo->chain_signal_avg[i] = + -ewma_signal_read(&sta->deflink.rx_stats_avg.chain_signal[i]); + } + } + + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) && + !sta->sta.valid_links) { + sta_set_rate_info_tx(sta, &sta->deflink.tx_stats.last_rate, + &sinfo->txrate); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + } + + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) && + !sta->sta.valid_links) { + if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0) + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); + } + + if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) { + for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) + sta_set_tidstats(sta, &sinfo->pertid[i], i); + } + + if (ieee80211_vif_is_mesh(&sdata->vif)) { +#ifdef CONFIG_MAC80211_MESH + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) | + BIT_ULL(NL80211_STA_INFO_PLID) | + BIT_ULL(NL80211_STA_INFO_PLINK_STATE) | + BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | + BIT_ULL(NL80211_STA_INFO_PEER_PM) | + BIT_ULL(NL80211_STA_INFO_NONPEER_PM) | + BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE) | + BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_AS); + + sinfo->llid = sta->mesh->llid; + sinfo->plid = sta->mesh->plid; + sinfo->plink_state = sta->mesh->plink_state; + if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET); + sinfo->t_offset = sta->mesh->t_offset; + } + sinfo->local_pm = sta->mesh->local_pm; + sinfo->peer_pm = sta->mesh->peer_pm; + sinfo->nonpeer_pm = sta->mesh->nonpeer_pm; + sinfo->connected_to_gate = sta->mesh->connected_to_gate; + sinfo->connected_to_as = sta->mesh->connected_to_as; +#endif + } + + sinfo->bss_param.flags = 0; + if (sdata->vif.bss_conf.use_cts_prot) + sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT; + if (sdata->vif.bss_conf.use_short_preamble) + sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; + if (sdata->vif.bss_conf.use_short_slot) + sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; + sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period; + sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; + + sinfo->sta_flags.set = 0; + sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) | + BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | + BIT(NL80211_STA_FLAG_WME) | + BIT(NL80211_STA_FLAG_MFP) | + BIT(NL80211_STA_FLAG_AUTHENTICATED) | + BIT(NL80211_STA_FLAG_ASSOCIATED) | + BIT(NL80211_STA_FLAG_TDLS_PEER); + if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED); + if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE)) + sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE); + if (sta->sta.wme) + sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME); + if (test_sta_flag(sta, WLAN_STA_MFP)) + sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP); + if (test_sta_flag(sta, WLAN_STA_AUTH)) + sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED); + if (test_sta_flag(sta, WLAN_STA_ASSOC)) + sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED); + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) + sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); + + thr = sta_get_expected_throughput(sta); + + if (thr != 0) { + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT); + sinfo->expected_throughput = thr; + } + + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) && + sta->deflink.status_stats.ack_signal_filled) { + sinfo->ack_signal = sta->deflink.status_stats.last_ack_signal; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); + } + + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) && + sta->deflink.status_stats.ack_signal_filled) { + sinfo->avg_ack_signal = + -(s8)ewma_avg_signal_read( + &sta->deflink.status_stats.avg_ack_signal); + sinfo->filled |= + BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); + } + + if (ieee80211_vif_is_mesh(&sdata->vif)) { + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_LINK_METRIC); + sinfo->airtime_link_metric = + airtime_link_metric_get(local, sta); + } +} + +u32 sta_get_expected_throughput(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct rate_control_ref *ref = NULL; + u32 thr = 0; + + if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) + ref = local->rate_ctrl; + + /* check if the driver has a SW RC implementation */ + if (ref && ref->ops->get_expected_throughput) + thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv); + else + thr = drv_get_expected_throughput(local, sta); + + return thr; +} + +unsigned long ieee80211_sta_last_active(struct sta_info *sta) +{ + struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); + + if (!sta->deflink.status_stats.last_ack || + time_after(stats->last_rx, sta->deflink.status_stats.last_ack)) + return stats->last_rx; + return sta->deflink.status_stats.last_ack; +} + +static void sta_update_codel_params(struct sta_info *sta, u32 thr) +{ + if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) { + sta->cparams.target = MS2TIME(50); + sta->cparams.interval = MS2TIME(300); + sta->cparams.ecn = false; + } else { + sta->cparams.target = MS2TIME(20); + sta->cparams.interval = MS2TIME(100); + sta->cparams.ecn = true; + } +} + +void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta, + u32 thr) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + + sta_update_codel_params(sta, thr); +} + +int ieee80211_sta_allocate_link(struct sta_info *sta, unsigned int link_id) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct sta_link_alloc *alloc; + int ret; + + lockdep_assert_held(&sdata->local->sta_mtx); + + /* must represent an MLD from the start */ + if (WARN_ON(!sta->sta.valid_links)) + return -EINVAL; + + if (WARN_ON(sta->sta.valid_links & BIT(link_id) || + sta->link[link_id])) + return -EBUSY; + + alloc = kzalloc(sizeof(*alloc), GFP_KERNEL); + if (!alloc) + return -ENOMEM; + + ret = sta_info_alloc_link(sdata->local, &alloc->info, GFP_KERNEL); + if (ret) { + kfree(alloc); + return ret; + } + + sta_info_add_link(sta, link_id, &alloc->info, &alloc->sta); + + ieee80211_link_sta_debugfs_add(&alloc->info); + + return 0; +} + +void ieee80211_sta_free_link(struct sta_info *sta, unsigned int link_id) +{ + lockdep_assert_held(&sta->sdata->local->sta_mtx); + + sta_remove_link(sta, link_id, false); +} + +int ieee80211_sta_activate_link(struct sta_info *sta, unsigned int link_id) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct link_sta_info *link_sta; + u16 old_links = sta->sta.valid_links; + u16 new_links = old_links | BIT(link_id); + int ret; + + link_sta = rcu_dereference_protected(sta->link[link_id], + lockdep_is_held(&sdata->local->sta_mtx)); + + if (WARN_ON(old_links == new_links || !link_sta)) + return -EINVAL; + + rcu_read_lock(); + if (link_sta_info_hash_lookup(sdata->local, link_sta->addr)) { + rcu_read_unlock(); + return -EALREADY; + } + /* we only modify under the mutex so this is fine */ + rcu_read_unlock(); + + sta->sta.valid_links = new_links; + + if (!test_sta_flag(sta, WLAN_STA_INSERTED)) + goto hash; + + ieee80211_recalc_min_chandef(sdata, link_id); + + /* Ensure the values are updated for the driver, + * redone by sta_remove_link on failure. + */ + ieee80211_sta_recalc_aggregates(&sta->sta); + + ret = drv_change_sta_links(sdata->local, sdata, &sta->sta, + old_links, new_links); + if (ret) { + sta->sta.valid_links = old_links; + sta_remove_link(sta, link_id, false); + return ret; + } + +hash: + ret = link_sta_info_hash_add(sdata->local, link_sta); + WARN_ON(ret); + return 0; +} + +void ieee80211_sta_remove_link(struct sta_info *sta, unsigned int link_id) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + u16 old_links = sta->sta.valid_links; + + lockdep_assert_held(&sdata->local->sta_mtx); + + sta->sta.valid_links &= ~BIT(link_id); + + if (test_sta_flag(sta, WLAN_STA_INSERTED)) + drv_change_sta_links(sdata->local, sdata, &sta->sta, + old_links, sta->sta.valid_links); + + sta_remove_link(sta, link_id, true); +} + +void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta, + const u8 *ext_capab, + unsigned int ext_capab_len) +{ + u8 val; + + sta->sta.max_amsdu_subframes = 0; + + if (ext_capab_len < 8) + return; + + /* The sender might not have sent the last bit, consider it to be 0 */ + val = u8_get_bits(ext_capab[7], WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB); + + /* we did get all the bits, take the MSB as well */ + if (ext_capab_len >= 9) + val |= u8_get_bits(ext_capab[8], + WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB) << 1; + + if (val) + sta->sta.max_amsdu_subframes = 4 << (4 - val); +} + +#ifdef CONFIG_LOCKDEP +bool lockdep_sta_mutex_held(struct ieee80211_sta *pubsta) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + + return lockdep_is_held(&sta->local->sta_mtx); +} +EXPORT_SYMBOL(lockdep_sta_mutex_held); +#endif diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h new file mode 100644 index 0000000000..195b563132 --- /dev/null +++ b/net/mac80211/sta_info.h @@ -0,0 +1,1020 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2002-2005, Devicescape Software, Inc. + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright(c) 2015-2017 Intel Deutschland GmbH + * Copyright(c) 2020-2022 Intel Corporation + */ + +#ifndef STA_INFO_H +#define STA_INFO_H + +#include <linux/list.h> +#include <linux/types.h> +#include <linux/if_ether.h> +#include <linux/workqueue.h> +#include <linux/average.h> +#include <linux/bitfield.h> +#include <linux/etherdevice.h> +#include <linux/rhashtable.h> +#include <linux/u64_stats_sync.h> +#include "key.h" + +/** + * enum ieee80211_sta_info_flags - Stations flags + * + * These flags are used with &struct sta_info's @flags member, but + * only indirectly with set_sta_flag() and friends. + * + * @WLAN_STA_AUTH: Station is authenticated. + * @WLAN_STA_ASSOC: Station is associated. + * @WLAN_STA_PS_STA: Station is in power-save mode + * @WLAN_STA_AUTHORIZED: Station is authorized to send/receive traffic. + * This bit is always checked so needs to be enabled for all stations + * when virtual port control is not in use. + * @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble + * frames. + * @WLAN_STA_WDS: Station is one of our WDS peers. + * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the + * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next + * frame to this station is transmitted. + * @WLAN_STA_MFP: Management frame protection is used with this STA. + * @WLAN_STA_BLOCK_BA: Used to deny ADDBA requests (both TX and RX) + * during suspend/resume and station removal. + * @WLAN_STA_PS_DRIVER: driver requires keeping this station in + * power-save mode logically to flush frames that might still + * be in the queues + * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping + * station in power-save mode, reply when the driver unblocks. + * @WLAN_STA_TDLS_PEER: Station is a TDLS peer. + * @WLAN_STA_TDLS_PEER_AUTH: This TDLS peer is authorized to send direct + * packets. This means the link is enabled. + * @WLAN_STA_TDLS_INITIATOR: We are the initiator of the TDLS link with this + * station. + * @WLAN_STA_TDLS_CHAN_SWITCH: This TDLS peer supports TDLS channel-switching + * @WLAN_STA_TDLS_OFF_CHANNEL: The local STA is currently off-channel with this + * TDLS peer + * @WLAN_STA_TDLS_WIDER_BW: This TDLS peer supports working on a wider bw on + * the BSS base channel. + * @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was + * keeping station in power-save mode, reply when the driver + * unblocks the station. + * @WLAN_STA_SP: Station is in a service period, so don't try to + * reply to other uAPSD trigger frames or PS-Poll. + * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame. + * @WLAN_STA_INSERTED: This station is inserted into the hash table. + * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station. + * @WLAN_STA_TOFFSET_KNOWN: toffset calculated for this station is valid. + * @WLAN_STA_MPSP_OWNER: local STA is owner of a mesh Peer Service Period. + * @WLAN_STA_MPSP_RECIPIENT: local STA is recipient of a MPSP. + * @WLAN_STA_PS_DELIVER: station woke up, but we're still blocking TX + * until pending frames are delivered + * @WLAN_STA_USES_ENCRYPTION: This station was configured for encryption, + * so drop all packets without a key later. + * @WLAN_STA_DECAP_OFFLOAD: This station uses rx decap offload + * + * @NUM_WLAN_STA_FLAGS: number of defined flags + */ +enum ieee80211_sta_info_flags { + WLAN_STA_AUTH, + WLAN_STA_ASSOC, + WLAN_STA_PS_STA, + WLAN_STA_AUTHORIZED, + WLAN_STA_SHORT_PREAMBLE, + WLAN_STA_WDS, + WLAN_STA_CLEAR_PS_FILT, + WLAN_STA_MFP, + WLAN_STA_BLOCK_BA, + WLAN_STA_PS_DRIVER, + WLAN_STA_PSPOLL, + WLAN_STA_TDLS_PEER, + WLAN_STA_TDLS_PEER_AUTH, + WLAN_STA_TDLS_INITIATOR, + WLAN_STA_TDLS_CHAN_SWITCH, + WLAN_STA_TDLS_OFF_CHANNEL, + WLAN_STA_TDLS_WIDER_BW, + WLAN_STA_UAPSD, + WLAN_STA_SP, + WLAN_STA_4ADDR_EVENT, + WLAN_STA_INSERTED, + WLAN_STA_RATE_CONTROL, + WLAN_STA_TOFFSET_KNOWN, + WLAN_STA_MPSP_OWNER, + WLAN_STA_MPSP_RECIPIENT, + WLAN_STA_PS_DELIVER, + WLAN_STA_USES_ENCRYPTION, + WLAN_STA_DECAP_OFFLOAD, + + NUM_WLAN_STA_FLAGS, +}; + +#define ADDBA_RESP_INTERVAL HZ +#define HT_AGG_MAX_RETRIES 15 +#define HT_AGG_BURST_RETRIES 3 +#define HT_AGG_RETRIES_PERIOD (15 * HZ) + +#define HT_AGG_STATE_DRV_READY 0 +#define HT_AGG_STATE_RESPONSE_RECEIVED 1 +#define HT_AGG_STATE_OPERATIONAL 2 +#define HT_AGG_STATE_STOPPING 3 +#define HT_AGG_STATE_WANT_START 4 +#define HT_AGG_STATE_WANT_STOP 5 +#define HT_AGG_STATE_START_CB 6 +#define HT_AGG_STATE_STOP_CB 7 +#define HT_AGG_STATE_SENT_ADDBA 8 + +DECLARE_EWMA(avg_signal, 10, 8) +enum ieee80211_agg_stop_reason { + AGG_STOP_DECLINED, + AGG_STOP_LOCAL_REQUEST, + AGG_STOP_PEER_REQUEST, + AGG_STOP_DESTROY_STA, +}; + +/* Debugfs flags to enable/disable use of RX/TX airtime in scheduler */ +#define AIRTIME_USE_TX BIT(0) +#define AIRTIME_USE_RX BIT(1) + +struct airtime_info { + u64 rx_airtime; + u64 tx_airtime; + u32 last_active; + s32 deficit; + atomic_t aql_tx_pending; /* Estimated airtime for frames pending */ + u32 aql_limit_low; + u32 aql_limit_high; +}; + +void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, + struct sta_info *sta, u8 ac, + u16 tx_airtime, bool tx_completed); + +struct sta_info; + +/** + * struct tid_ampdu_tx - TID aggregation information (Tx). + * + * @rcu_head: rcu head for freeing structure + * @session_timer: check if we keep Tx-ing on the TID (by timeout value) + * @addba_resp_timer: timer for peer's response to addba request + * @pending: pending frames queue -- use sta's spinlock to protect + * @sta: station we are attached to + * @dialog_token: dialog token for aggregation session + * @timeout: session timeout value to be filled in ADDBA requests + * @tid: TID number + * @state: session state (see above) + * @last_tx: jiffies of last tx activity + * @stop_initiator: initiator of a session stop + * @tx_stop: TX DelBA frame when stopping + * @buf_size: reorder buffer size at receiver + * @failed_bar_ssn: ssn of the last failed BAR tx attempt + * @bar_pending: BAR needs to be re-sent + * @amsdu: support A-MSDU withing A-MDPU + * @ssn: starting sequence number of the session + * + * This structure's lifetime is managed by RCU, assignments to + * the array holding it must hold the aggregation mutex. + * + * The TX path can access it under RCU lock-free if, and + * only if, the state has the flag %HT_AGG_STATE_OPERATIONAL + * set. Otherwise, the TX path must also acquire the spinlock + * and re-check the state, see comments in the tx code + * touching it. + */ +struct tid_ampdu_tx { + struct rcu_head rcu_head; + struct timer_list session_timer; + struct timer_list addba_resp_timer; + struct sk_buff_head pending; + struct sta_info *sta; + unsigned long state; + unsigned long last_tx; + u16 timeout; + u8 dialog_token; + u8 stop_initiator; + bool tx_stop; + u16 buf_size; + u16 ssn; + + u16 failed_bar_ssn; + bool bar_pending; + bool amsdu; + u8 tid; +}; + +/** + * struct tid_ampdu_rx - TID aggregation information (Rx). + * + * @reorder_buf: buffer to reorder incoming aggregated MPDUs. An MPDU may be an + * A-MSDU with individually reported subframes. + * @reorder_buf_filtered: bitmap indicating where there are filtered frames in + * the reorder buffer that should be ignored when releasing frames + * @reorder_time: jiffies when skb was added + * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) + * @reorder_timer: releases expired frames from the reorder buffer. + * @sta: station we are attached to + * @last_rx: jiffies of last rx activity + * @head_seq_num: head sequence number in reordering buffer. + * @stored_mpdu_num: number of MPDUs in reordering buffer + * @ssn: Starting Sequence Number expected to be aggregated. + * @buf_size: buffer size for incoming A-MPDUs + * @timeout: reset timer value (in TUs). + * @tid: TID number + * @rcu_head: RCU head used for freeing this struct + * @reorder_lock: serializes access to reorder buffer, see below. + * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and + * and ssn. + * @removed: this session is removed (but might have been found due to RCU) + * @started: this session has started (head ssn or higher was received) + * + * This structure's lifetime is managed by RCU, assignments to + * the array holding it must hold the aggregation mutex. + * + * The @reorder_lock is used to protect the members of this + * struct, except for @timeout, @buf_size and @dialog_token, + * which are constant across the lifetime of the struct (the + * dialog token being used only for debugging). + */ +struct tid_ampdu_rx { + struct rcu_head rcu_head; + spinlock_t reorder_lock; + u64 reorder_buf_filtered; + struct sk_buff_head *reorder_buf; + unsigned long *reorder_time; + struct sta_info *sta; + struct timer_list session_timer; + struct timer_list reorder_timer; + unsigned long last_rx; + u16 head_seq_num; + u16 stored_mpdu_num; + u16 ssn; + u16 buf_size; + u16 timeout; + u8 tid; + u8 auto_seq:1, + removed:1, + started:1; +}; + +/** + * struct sta_ampdu_mlme - STA aggregation information. + * + * @mtx: mutex to protect all TX data (except non-NULL assignments + * to tid_tx[idx], which are protected by the sta spinlock) + * tid_start_tx is also protected by sta->lock. + * @tid_rx: aggregation info for Rx per TID -- RCU protected + * @tid_rx_token: dialog tokens for valid aggregation sessions + * @tid_rx_timer_expired: bitmap indicating on which TIDs the + * RX timer expired until the work for it runs + * @tid_rx_stop_requested: bitmap indicating which BA sessions per TID the + * driver requested to close until the work for it runs + * @tid_rx_manage_offl: bitmap indicating which BA sessions were requested + * to be treated as started/stopped due to offloading + * @agg_session_valid: bitmap indicating which TID has a rx BA session open on + * @unexpected_agg: bitmap indicating which TID already sent a delBA due to + * unexpected aggregation related frames outside a session + * @work: work struct for starting/stopping aggregation + * @tid_tx: aggregation info for Tx per TID + * @tid_start_tx: sessions where start was requested + * @last_addba_req_time: timestamp of the last addBA request. + * @addba_req_num: number of times addBA request has been sent. + * @dialog_token_allocator: dialog token enumerator for each new session; + */ +struct sta_ampdu_mlme { + struct mutex mtx; + /* rx */ + struct tid_ampdu_rx __rcu *tid_rx[IEEE80211_NUM_TIDS]; + u8 tid_rx_token[IEEE80211_NUM_TIDS]; + unsigned long tid_rx_timer_expired[BITS_TO_LONGS(IEEE80211_NUM_TIDS)]; + unsigned long tid_rx_stop_requested[BITS_TO_LONGS(IEEE80211_NUM_TIDS)]; + unsigned long tid_rx_manage_offl[BITS_TO_LONGS(2 * IEEE80211_NUM_TIDS)]; + unsigned long agg_session_valid[BITS_TO_LONGS(IEEE80211_NUM_TIDS)]; + unsigned long unexpected_agg[BITS_TO_LONGS(IEEE80211_NUM_TIDS)]; + /* tx */ + struct work_struct work; + struct tid_ampdu_tx __rcu *tid_tx[IEEE80211_NUM_TIDS]; + struct tid_ampdu_tx *tid_start_tx[IEEE80211_NUM_TIDS]; + unsigned long last_addba_req_time[IEEE80211_NUM_TIDS]; + u8 addba_req_num[IEEE80211_NUM_TIDS]; + u8 dialog_token_allocator; +}; + + +/* Value to indicate no TID reservation */ +#define IEEE80211_TID_UNRESERVED 0xff + +#define IEEE80211_FAST_XMIT_MAX_IV 18 + +/** + * struct ieee80211_fast_tx - TX fastpath information + * @key: key to use for hw crypto + * @hdr: the 802.11 header to put with the frame + * @hdr_len: actual 802.11 header length + * @sa_offs: offset of the SA + * @da_offs: offset of the DA + * @pn_offs: offset where to put PN for crypto (or 0 if not needed) + * @band: band this will be transmitted on, for tx_info + * @rcu_head: RCU head to free this struct + * + * This struct is small enough so that the common case (maximum crypto + * header length of 8 like for CCMP/GCMP) fits into a single 64-byte + * cache line. + */ +struct ieee80211_fast_tx { + struct ieee80211_key *key; + u8 hdr_len; + u8 sa_offs, da_offs, pn_offs; + u8 band; + u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV + + sizeof(rfc1042_header)] __aligned(2); + + struct rcu_head rcu_head; +}; + +/** + * struct ieee80211_fast_rx - RX fastpath information + * @dev: netdevice for reporting the SKB + * @vif_type: (P2P-less) interface type of the original sdata (sdata->vif.type) + * @vif_addr: interface address + * @rfc1042_hdr: copy of the RFC 1042 SNAP header (to have in cache) + * @control_port_protocol: control port protocol copied from sdata + * @expected_ds_bits: from/to DS bits expected + * @icv_len: length of the MIC if present + * @key: bool indicating encryption is expected (key is set) + * @internal_forward: forward froms internally on AP/VLAN type interfaces + * @uses_rss: copy of USES_RSS hw flag + * @da_offs: offset of the DA in the header (for header conversion) + * @sa_offs: offset of the SA in the header (for header conversion) + * @rcu_head: RCU head for freeing this structure + */ +struct ieee80211_fast_rx { + struct net_device *dev; + enum nl80211_iftype vif_type; + u8 vif_addr[ETH_ALEN] __aligned(2); + u8 rfc1042_hdr[6] __aligned(2); + __be16 control_port_protocol; + __le16 expected_ds_bits; + u8 icv_len; + u8 key:1, + internal_forward:1, + uses_rss:1; + u8 da_offs, sa_offs; + + struct rcu_head rcu_head; +}; + +/* we use only values in the range 0-100, so pick a large precision */ +DECLARE_EWMA(mesh_fail_avg, 20, 8) +DECLARE_EWMA(mesh_tx_rate_avg, 8, 16) + +/** + * struct mesh_sta - mesh STA information + * @plink_lock: serialize access to plink fields + * @llid: Local link ID + * @plid: Peer link ID + * @aid: local aid supplied by peer + * @reason: Cancel reason on PLINK_HOLDING state + * @plink_retries: Retries in establishment + * @plink_state: peer link state + * @plink_timeout: timeout of peer link + * @plink_timer: peer link watch timer + * @plink_sta: peer link watch timer's sta_info + * @t_offset: timing offset relative to this host + * @t_offset_setpoint: reference timing offset of this sta to be used when + * calculating clockdrift + * @local_pm: local link-specific power save mode + * @peer_pm: peer-specific power save mode towards local STA + * @nonpeer_pm: STA power save mode towards non-peer neighbors + * @processed_beacon: set to true after peer rates and capabilities are + * processed + * @connected_to_gate: true if mesh STA has a path to a mesh gate + * @connected_to_as: true if mesh STA has a path to a authentication server + * @fail_avg: moving percentage of failed MSDUs + * @tx_rate_avg: moving average of tx bitrate + */ +struct mesh_sta { + struct timer_list plink_timer; + struct sta_info *plink_sta; + + s64 t_offset; + s64 t_offset_setpoint; + + spinlock_t plink_lock; + u16 llid; + u16 plid; + u16 aid; + u16 reason; + u8 plink_retries; + + bool processed_beacon; + bool connected_to_gate; + bool connected_to_as; + + enum nl80211_plink_state plink_state; + u32 plink_timeout; + + /* mesh power save */ + enum nl80211_mesh_power_mode local_pm; + enum nl80211_mesh_power_mode peer_pm; + enum nl80211_mesh_power_mode nonpeer_pm; + + /* moving percentage of failed MSDUs */ + struct ewma_mesh_fail_avg fail_avg; + /* moving average of tx bitrate */ + struct ewma_mesh_tx_rate_avg tx_rate_avg; +}; + +DECLARE_EWMA(signal, 10, 8) + +struct ieee80211_sta_rx_stats { + unsigned long packets; + unsigned long last_rx; + unsigned long num_duplicates; + unsigned long fragments; + unsigned long dropped; + int last_signal; + u8 chains; + s8 chain_signal_last[IEEE80211_MAX_CHAINS]; + u32 last_rate; + struct u64_stats_sync syncp; + u64 bytes; + u64 msdu[IEEE80211_NUM_TIDS + 1]; +}; + +/* + * IEEE 802.11-2016 (10.6 "Defragmentation") recommends support for "concurrent + * reception of at least one MSDU per access category per associated STA" + * on APs, or "at least one MSDU per access category" on other interface types. + * + * This limit can be increased by changing this define, at the cost of slower + * frame reassembly and increased memory use while fragments are pending. + */ +#define IEEE80211_FRAGMENT_MAX 4 + +struct ieee80211_fragment_entry { + struct sk_buff_head skb_list; + unsigned long first_frag_time; + u16 seq; + u16 extra_len; + u16 last_frag; + u8 rx_queue; + u8 check_sequential_pn:1, /* needed for CCMP/GCMP */ + is_protected:1; + u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ + unsigned int key_color; +}; + +struct ieee80211_fragment_cache { + struct ieee80211_fragment_entry entries[IEEE80211_FRAGMENT_MAX]; + unsigned int next; +}; + +/* + * The bandwidth threshold below which the per-station CoDel parameters will be + * scaled to be more lenient (to prevent starvation of slow stations). This + * value will be scaled by the number of active stations when it is being + * applied. + */ +#define STA_SLOW_THRESHOLD 6000 /* 6 Mbps */ + +/** + * struct link_sta_info - Link STA information + * All link specific sta info are stored here for reference. This can be + * a single entry for non-MLD STA or multiple entries for MLD STA + * @addr: Link MAC address - Can be same as MLD STA mac address and is always + * same for non-MLD STA. This is used as key for searching link STA + * @link_id: Link ID uniquely identifying the link STA. This is 0 for non-MLD + * and set to the corresponding vif LinkId for MLD STA + * @link_hash_node: hash node for rhashtable + * @sta: Points to the STA info + * @gtk: group keys negotiated with this station, if any + * @tx_stats: TX statistics + * @tx_stats.packets: # of packets transmitted + * @tx_stats.bytes: # of bytes in all packets transmitted + * @tx_stats.last_rate: last TX rate + * @tx_stats.msdu: # of transmitted MSDUs per TID + * @rx_stats: RX statistics + * @rx_stats_avg: averaged RX statistics + * @rx_stats_avg.signal: averaged signal + * @rx_stats_avg.chain_signal: averaged per-chain signal + * @pcpu_rx_stats: per-CPU RX statistics, assigned only if the driver needs + * this (by advertising the USES_RSS hw flag) + * @status_stats: TX status statistics + * @status_stats.filtered: # of filtered frames + * @status_stats.retry_failed: # of frames that failed after retry + * @status_stats.retry_count: # of retries attempted + * @status_stats.lost_packets: # of lost packets + * @status_stats.last_pkt_time: timestamp of last ACKed packet + * @status_stats.msdu_retries: # of MSDU retries + * @status_stats.msdu_failed: # of failed MSDUs + * @status_stats.last_ack: last ack timestamp (jiffies) + * @status_stats.last_ack_signal: last ACK signal + * @status_stats.ack_signal_filled: last ACK signal validity + * @status_stats.avg_ack_signal: average ACK signal + * @cur_max_bandwidth: maximum bandwidth to use for TX to the station, + * taken from HT/VHT capabilities or VHT operating mode notification + * @debugfs_dir: debug filesystem directory dentry + * @pub: public (driver visible) link STA data + * TODO Move other link params from sta_info as required for MLD operation + */ +struct link_sta_info { + u8 addr[ETH_ALEN]; + u8 link_id; + + struct rhlist_head link_hash_node; + + struct sta_info *sta; + struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + + NUM_DEFAULT_MGMT_KEYS + + NUM_DEFAULT_BEACON_KEYS]; + struct ieee80211_sta_rx_stats __percpu *pcpu_rx_stats; + + /* Updated from RX path only, no locking requirements */ + struct ieee80211_sta_rx_stats rx_stats; + struct { + struct ewma_signal signal; + struct ewma_signal chain_signal[IEEE80211_MAX_CHAINS]; + } rx_stats_avg; + + /* Updated from TX status path only, no locking requirements */ + struct { + unsigned long filtered; + unsigned long retry_failed, retry_count; + unsigned int lost_packets; + unsigned long last_pkt_time; + u64 msdu_retries[IEEE80211_NUM_TIDS + 1]; + u64 msdu_failed[IEEE80211_NUM_TIDS + 1]; + unsigned long last_ack; + s8 last_ack_signal; + bool ack_signal_filled; + struct ewma_avg_signal avg_ack_signal; + } status_stats; + + /* Updated from TX path only, no locking requirements */ + struct { + u64 packets[IEEE80211_NUM_ACS]; + u64 bytes[IEEE80211_NUM_ACS]; + struct ieee80211_tx_rate last_rate; + struct rate_info last_rate_info; + u64 msdu[IEEE80211_NUM_TIDS + 1]; + } tx_stats; + + enum ieee80211_sta_rx_bandwidth cur_max_bandwidth; + +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *debugfs_dir; +#endif + + struct ieee80211_link_sta *pub; +}; + +/** + * struct sta_info - STA information + * + * This structure collects information about a station that + * mac80211 is communicating with. + * + * @list: global linked list entry + * @free_list: list entry for keeping track of stations to free + * @hash_node: hash node for rhashtable + * @addr: station's MAC address - duplicated from public part to + * let the hash table work with just a single cacheline + * @local: pointer to the global information + * @sdata: virtual interface this station belongs to + * @ptk: peer keys negotiated with this station, if any + * @ptk_idx: last installed peer key index + * @rate_ctrl: rate control algorithm reference + * @rate_ctrl_lock: spinlock used to protect rate control data + * (data inside the algorithm, so serializes calls there) + * @rate_ctrl_priv: rate control private per-STA pointer + * @lock: used for locking all fields that require locking, see comments + * in the header file. + * @drv_deliver_wk: used for delivering frames after driver PS unblocking + * @listen_interval: listen interval of this station, when we're acting as AP + * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly + * @ps_lock: used for powersave (when mac80211 is the AP) related locking + * @ps_tx_buf: buffers (per AC) of frames to transmit to this station + * when it leaves power saving state or polls + * @tx_filtered: buffers (per AC) of frames we already tried to + * transmit but were filtered by hardware due to STA having + * entered power saving state, these are also delivered to + * the station when it leaves powersave or polls for frames + * @driver_buffered_tids: bitmap of TIDs the driver has data buffered on + * @txq_buffered_tids: bitmap of TIDs that mac80211 has txq data buffered on + * @assoc_at: clock boottime (in ns) of last association + * @last_connected: time (in seconds) when a station got connected + * @last_seq_ctrl: last received seq/frag number from this STA (per TID + * plus one for non-QoS frames) + * @tid_seq: per-TID sequence numbers for sending to this STA + * @airtime: per-AC struct airtime_info describing airtime statistics for this + * station + * @airtime_weight: station weight for airtime fairness calculation purposes + * @ampdu_mlme: A-MPDU state machine state + * @mesh: mesh STA information + * @debugfs_dir: debug filesystem directory dentry + * @dead: set to true when sta is unlinked + * @removed: set to true when sta is being removed from sta_list + * @uploaded: set to true when sta is uploaded to the driver + * @sta: station information we share with the driver + * @sta_state: duplicates information about station state (for debug) + * @rcu_head: RCU head used for freeing this station struct + * @cur_max_bandwidth: maximum bandwidth to use for TX to the station, + * taken from HT/VHT capabilities or VHT operating mode notification + * @cparams: CoDel parameters for this station. + * @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED) + * @amsdu_mesh_control: track the mesh A-MSDU format used by the peer: + * + * * -1: not yet known + * * 0: non-mesh A-MSDU length field + * * 1: big-endian mesh A-MSDU length field + * * 2: little-endian mesh A-MSDU length field + * + * @fast_tx: TX fastpath information + * @fast_rx: RX fastpath information + * @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to + * the BSS one. + * @frags: fragment cache + * @cur: storage for aggregation data + * &struct ieee80211_sta points either here or to deflink.agg. + * @deflink: This is the default link STA information, for non MLO STA all link + * specific STA information is accessed through @deflink or through + * link[0] which points to address of @deflink. For MLO Link STA + * the first added link STA will point to deflink. + * @link: reference to Link Sta entries. For Non MLO STA, except 1st link, + * i.e link[0] all links would be assigned to NULL by default and + * would access link information via @deflink or link[0]. For MLO + * STA, first link STA being added will point its link pointer to + * @deflink address and remaining would be allocated and the address + * would be assigned to link[link_id] where link_id is the id assigned + * by the AP. + */ +struct sta_info { + /* General information, mostly static */ + struct list_head list, free_list; + struct rcu_head rcu_head; + struct rhlist_head hash_node; + u8 addr[ETH_ALEN]; + struct ieee80211_local *local; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_key __rcu *ptk[NUM_DEFAULT_KEYS]; + u8 ptk_idx; + struct rate_control_ref *rate_ctrl; + void *rate_ctrl_priv; + spinlock_t rate_ctrl_lock; + spinlock_t lock; + + struct ieee80211_fast_tx __rcu *fast_tx; + struct ieee80211_fast_rx __rcu *fast_rx; + +#ifdef CONFIG_MAC80211_MESH + struct mesh_sta *mesh; +#endif + + struct work_struct drv_deliver_wk; + + u16 listen_interval; + + bool dead; + bool removed; + + bool uploaded; + + enum ieee80211_sta_state sta_state; + + /* use the accessors defined below */ + unsigned long _flags; + + /* STA powersave lock and frame queues */ + spinlock_t ps_lock; + struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS]; + struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS]; + unsigned long driver_buffered_tids; + unsigned long txq_buffered_tids; + + u64 assoc_at; + long last_connected; + + /* Plus 1 for non-QoS frames */ + __le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1]; + + u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; + + struct airtime_info airtime[IEEE80211_NUM_ACS]; + u16 airtime_weight; + + /* + * Aggregation information, locked with lock. + */ + struct sta_ampdu_mlme ampdu_mlme; + +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *debugfs_dir; +#endif + + struct codel_params cparams; + + u8 reserved_tid; + s8 amsdu_mesh_control; + + struct cfg80211_chan_def tdls_chandef; + + struct ieee80211_fragment_cache frags; + + struct ieee80211_sta_aggregates cur; + struct link_sta_info deflink; + struct link_sta_info __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + + /* keep last! */ + struct ieee80211_sta sta; +}; + +static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta) +{ +#ifdef CONFIG_MAC80211_MESH + return sta->mesh->plink_state; +#endif + return NL80211_PLINK_LISTEN; +} + +static inline void set_sta_flag(struct sta_info *sta, + enum ieee80211_sta_info_flags flag) +{ + WARN_ON(flag == WLAN_STA_AUTH || + flag == WLAN_STA_ASSOC || + flag == WLAN_STA_AUTHORIZED); + set_bit(flag, &sta->_flags); +} + +static inline void clear_sta_flag(struct sta_info *sta, + enum ieee80211_sta_info_flags flag) +{ + WARN_ON(flag == WLAN_STA_AUTH || + flag == WLAN_STA_ASSOC || + flag == WLAN_STA_AUTHORIZED); + clear_bit(flag, &sta->_flags); +} + +static inline int test_sta_flag(struct sta_info *sta, + enum ieee80211_sta_info_flags flag) +{ + return test_bit(flag, &sta->_flags); +} + +static inline int test_and_clear_sta_flag(struct sta_info *sta, + enum ieee80211_sta_info_flags flag) +{ + WARN_ON(flag == WLAN_STA_AUTH || + flag == WLAN_STA_ASSOC || + flag == WLAN_STA_AUTHORIZED); + return test_and_clear_bit(flag, &sta->_flags); +} + +static inline int test_and_set_sta_flag(struct sta_info *sta, + enum ieee80211_sta_info_flags flag) +{ + WARN_ON(flag == WLAN_STA_AUTH || + flag == WLAN_STA_ASSOC || + flag == WLAN_STA_AUTHORIZED); + return test_and_set_bit(flag, &sta->_flags); +} + +int sta_info_move_state(struct sta_info *sta, + enum ieee80211_sta_state new_state); + +static inline void sta_info_pre_move_state(struct sta_info *sta, + enum ieee80211_sta_state new_state) +{ + int ret; + + WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED)); + + ret = sta_info_move_state(sta, new_state); + WARN_ON_ONCE(ret); +} + + +void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, + struct tid_ampdu_tx *tid_tx); + +static inline struct tid_ampdu_tx * +rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid) +{ + return rcu_dereference_protected(sta->ampdu_mlme.tid_tx[tid], + lockdep_is_held(&sta->lock) || + lockdep_is_held(&sta->ampdu_mlme.mtx)); +} + +/* Maximum number of frames to buffer per power saving station per AC */ +#define STA_MAX_TX_BUFFER 64 + +/* Minimum buffered frame expiry time. If STA uses listen interval that is + * smaller than this value, the minimum value here is used instead. */ +#define STA_TX_BUFFER_EXPIRE (10 * HZ) + +/* How often station data is cleaned up (e.g., expiration of buffered frames) + */ +#define STA_INFO_CLEANUP_INTERVAL (10 * HZ) + +struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local, + const u8 *addr); + +/* + * Get a STA info, must be under RCU read lock. + */ +struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, + const u8 *addr); + +struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, + const u8 *addr); + +/* user must hold sta_mtx or be in RCU critical section */ +struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local, + const u8 *sta_addr, const u8 *vif_addr); + +#define for_each_sta_info(local, _addr, _sta, _tmp) \ + rhl_for_each_entry_rcu(_sta, _tmp, \ + sta_info_hash_lookup(local, _addr), hash_node) + +struct rhlist_head *link_sta_info_hash_lookup(struct ieee80211_local *local, + const u8 *addr); + +#define for_each_link_sta_info(local, _addr, _sta, _tmp) \ + rhl_for_each_entry_rcu(_sta, _tmp, \ + link_sta_info_hash_lookup(local, _addr), \ + link_hash_node) + +struct link_sta_info * +link_sta_info_get_bss(struct ieee80211_sub_if_data *sdata, const u8 *addr); + +/* + * Get STA info by index, BROKEN! + */ +struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, + int idx); +/* + * Create a new STA info, caller owns returned structure + * until sta_info_insert(). + */ +struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, + const u8 *addr, gfp_t gfp); +struct sta_info *sta_info_alloc_with_link(struct ieee80211_sub_if_data *sdata, + const u8 *mld_addr, + unsigned int link_id, + const u8 *link_addr, + gfp_t gfp); + +void sta_info_free(struct ieee80211_local *local, struct sta_info *sta); + +/* + * Insert STA info into hash table/list, returns zero or a + * -EEXIST if (if the same MAC address is already present). + * + * Calling the non-rcu version makes the caller relinquish, + * the _rcu version calls read_lock_rcu() and must be called + * without it held. + */ +int sta_info_insert(struct sta_info *sta); +int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU); + +int __must_check __sta_info_destroy(struct sta_info *sta); +int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, + const u8 *addr); +int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, + const u8 *addr); + +void sta_info_recalc_tim(struct sta_info *sta); + +int sta_info_init(struct ieee80211_local *local); +void sta_info_stop(struct ieee80211_local *local); + +/** + * __sta_info_flush - flush matching STA entries from the STA table + * + * Returns the number of removed STA entries. + * + * @sdata: sdata to remove all stations from + * @vlans: if the given interface is an AP interface, also flush VLANs + */ +int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans); + +/** + * sta_info_flush - flush matching STA entries from the STA table + * + * Returns the number of removed STA entries. + * + * @sdata: sdata to remove all stations from + */ +static inline int sta_info_flush(struct ieee80211_sub_if_data *sdata) +{ + return __sta_info_flush(sdata, false); +} + +void sta_set_rate_info_tx(struct sta_info *sta, + const struct ieee80211_tx_rate *rate, + struct rate_info *rinfo); +void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, + bool tidstats); + +u32 sta_get_expected_throughput(struct sta_info *sta); + +void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, + unsigned long exp_time); + +int ieee80211_sta_allocate_link(struct sta_info *sta, unsigned int link_id); +void ieee80211_sta_free_link(struct sta_info *sta, unsigned int link_id); +int ieee80211_sta_activate_link(struct sta_info *sta, unsigned int link_id); +void ieee80211_sta_remove_link(struct sta_info *sta, unsigned int link_id); + +void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta); +void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta); +void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta); + +unsigned long ieee80211_sta_last_active(struct sta_info *sta); + +void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta, + const u8 *ext_capab, + unsigned int ext_capab_len); + +void __ieee80211_sta_recalc_aggregates(struct sta_info *sta, u16 active_links); + +enum sta_stats_type { + STA_STATS_RATE_TYPE_INVALID = 0, + STA_STATS_RATE_TYPE_LEGACY, + STA_STATS_RATE_TYPE_HT, + STA_STATS_RATE_TYPE_VHT, + STA_STATS_RATE_TYPE_HE, + STA_STATS_RATE_TYPE_S1G, + STA_STATS_RATE_TYPE_EHT, +}; + +#define STA_STATS_FIELD_HT_MCS GENMASK( 7, 0) +#define STA_STATS_FIELD_LEGACY_IDX GENMASK( 3, 0) +#define STA_STATS_FIELD_LEGACY_BAND GENMASK( 7, 4) +#define STA_STATS_FIELD_VHT_MCS GENMASK( 3, 0) +#define STA_STATS_FIELD_VHT_NSS GENMASK( 7, 4) +#define STA_STATS_FIELD_HE_MCS GENMASK( 3, 0) +#define STA_STATS_FIELD_HE_NSS GENMASK( 7, 4) +#define STA_STATS_FIELD_EHT_MCS GENMASK( 3, 0) +#define STA_STATS_FIELD_EHT_NSS GENMASK( 7, 4) +#define STA_STATS_FIELD_BW GENMASK(12, 8) +#define STA_STATS_FIELD_SGI GENMASK(13, 13) +#define STA_STATS_FIELD_TYPE GENMASK(16, 14) +#define STA_STATS_FIELD_HE_RU GENMASK(19, 17) +#define STA_STATS_FIELD_HE_GI GENMASK(21, 20) +#define STA_STATS_FIELD_HE_DCM GENMASK(22, 22) +#define STA_STATS_FIELD_EHT_RU GENMASK(20, 17) +#define STA_STATS_FIELD_EHT_GI GENMASK(22, 21) + +#define STA_STATS_FIELD(_n, _v) FIELD_PREP(STA_STATS_FIELD_ ## _n, _v) +#define STA_STATS_GET(_n, _v) FIELD_GET(STA_STATS_FIELD_ ## _n, _v) + +#define STA_STATS_RATE_INVALID 0 + +static inline u32 sta_stats_encode_rate(struct ieee80211_rx_status *s) +{ + u32 r; + + r = STA_STATS_FIELD(BW, s->bw); + + if (s->enc_flags & RX_ENC_FLAG_SHORT_GI) + r |= STA_STATS_FIELD(SGI, 1); + + switch (s->encoding) { + case RX_ENC_VHT: + r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_VHT); + r |= STA_STATS_FIELD(VHT_NSS, s->nss); + r |= STA_STATS_FIELD(VHT_MCS, s->rate_idx); + break; + case RX_ENC_HT: + r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_HT); + r |= STA_STATS_FIELD(HT_MCS, s->rate_idx); + break; + case RX_ENC_LEGACY: + r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_LEGACY); + r |= STA_STATS_FIELD(LEGACY_BAND, s->band); + r |= STA_STATS_FIELD(LEGACY_IDX, s->rate_idx); + break; + case RX_ENC_HE: + r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_HE); + r |= STA_STATS_FIELD(HE_NSS, s->nss); + r |= STA_STATS_FIELD(HE_MCS, s->rate_idx); + r |= STA_STATS_FIELD(HE_GI, s->he_gi); + r |= STA_STATS_FIELD(HE_RU, s->he_ru); + r |= STA_STATS_FIELD(HE_DCM, s->he_dcm); + break; + case RX_ENC_EHT: + r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_EHT); + r |= STA_STATS_FIELD(EHT_NSS, s->nss); + r |= STA_STATS_FIELD(EHT_MCS, s->rate_idx); + r |= STA_STATS_FIELD(EHT_GI, s->eht.gi); + r |= STA_STATS_FIELD(EHT_RU, s->eht.ru); + break; + default: + WARN_ON(1); + return STA_STATS_RATE_INVALID; + } + + return r; +} + +#endif /* STA_INFO_H */ diff --git a/net/mac80211/status.c b/net/mac80211/status.c new file mode 100644 index 0000000000..44d83da60a --- /dev/null +++ b/net/mac80211/status.c @@ -0,0 +1,1272 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2008-2010 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright 2021-2023 Intel Corporation + */ + +#include <linux/export.h> +#include <linux/etherdevice.h> +#include <net/mac80211.h> +#include <asm/unaligned.h> +#include "ieee80211_i.h" +#include "rate.h" +#include "mesh.h" +#include "led.h" +#include "wme.h" + + +void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int tmp; + + skb->pkt_type = IEEE80211_TX_STATUS_MSG; + skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ? + &local->skb_queue : &local->skb_queue_unreliable, skb); + tmp = skb_queue_len(&local->skb_queue) + + skb_queue_len(&local->skb_queue_unreliable); + while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT && + (skb = skb_dequeue(&local->skb_queue_unreliable))) { + ieee80211_free_txskb(hw, skb); + tmp--; + I802_DEBUG_INC(local->tx_status_drop); + } + tasklet_schedule(&local->tasklet); +} +EXPORT_SYMBOL(ieee80211_tx_status_irqsafe); + +static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, + struct sta_info *sta, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; + int ac; + + if (info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER | + IEEE80211_TX_CTL_AMPDU | + IEEE80211_TX_CTL_HW_80211_ENCAP)) { + ieee80211_free_txskb(&local->hw, skb); + return; + } + + /* + * This skb 'survived' a round-trip through the driver, and + * hopefully the driver didn't mangle it too badly. However, + * we can definitely not rely on the control information + * being correct. Clear it so we don't get junk there, and + * indicate that it needs new processing, but must not be + * modified/encrypted again. + */ + memset(&info->control, 0, sizeof(info->control)); + + info->control.jiffies = jiffies; + info->control.vif = &sta->sdata->vif; + info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; + info->flags |= IEEE80211_TX_INTFL_RETRANSMISSION; + info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; + + sta->deflink.status_stats.filtered++; + + /* + * Clear more-data bit on filtered frames, it might be set + * but later frames might time out so it might have to be + * clear again ... It's all rather unlikely (this frame + * should time out first, right?) but let's not confuse + * peers unnecessarily. + */ + if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) + hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA); + + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *p = ieee80211_get_qos_ctl(hdr); + int tid = *p & IEEE80211_QOS_CTL_TID_MASK; + + /* + * Clear EOSP if set, this could happen e.g. + * if an absence period (us being a P2P GO) + * shortens the SP. + */ + if (*p & IEEE80211_QOS_CTL_EOSP) + *p &= ~IEEE80211_QOS_CTL_EOSP; + ac = ieee80211_ac_from_tid(tid); + } else { + ac = IEEE80211_AC_BE; + } + + /* + * Clear the TX filter mask for this STA when sending the next + * packet. If the STA went to power save mode, this will happen + * when it wakes up for the next time. + */ + set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT); + ieee80211_clear_fast_xmit(sta); + + /* + * This code races in the following way: + * + * (1) STA sends frame indicating it will go to sleep and does so + * (2) hardware/firmware adds STA to filter list, passes frame up + * (3) hardware/firmware processes TX fifo and suppresses a frame + * (4) we get TX status before having processed the frame and + * knowing that the STA has gone to sleep. + * + * This is actually quite unlikely even when both those events are + * processed from interrupts coming in quickly after one another or + * even at the same time because we queue both TX status events and + * RX frames to be processed by a tasklet and process them in the + * same order that they were received or TX status last. Hence, there + * is no race as long as the frame RX is processed before the next TX + * status, which drivers can ensure, see below. + * + * Note that this can only happen if the hardware or firmware can + * actually add STAs to the filter list, if this is done by the + * driver in response to set_tim() (which will only reduce the race + * this whole filtering tries to solve, not completely solve it) + * this situation cannot happen. + * + * To completely solve this race drivers need to make sure that they + * (a) don't mix the irq-safe/not irq-safe TX status/RX processing + * functions and + * (b) always process RX events before TX status events if ordering + * can be unknown, for example with different interrupt status + * bits. + * (c) if PS mode transitions are manual (i.e. the flag + * %IEEE80211_HW_AP_LINK_PS is set), always process PS state + * changes before calling TX status events if ordering can be + * unknown. + */ + if (test_sta_flag(sta, WLAN_STA_PS_STA) && + skb_queue_len(&sta->tx_filtered[ac]) < STA_MAX_TX_BUFFER) { + skb_queue_tail(&sta->tx_filtered[ac], skb); + sta_info_recalc_tim(sta); + + if (!timer_pending(&local->sta_cleanup)) + mod_timer(&local->sta_cleanup, + round_jiffies(jiffies + + STA_INFO_CLEANUP_INTERVAL)); + return; + } + + if (!test_sta_flag(sta, WLAN_STA_PS_STA) && + !(info->flags & IEEE80211_TX_INTFL_RETRIED)) { + /* Software retry the packet once */ + info->flags |= IEEE80211_TX_INTFL_RETRIED; + ieee80211_add_pending_skb(local, skb); + return; + } + + ps_dbg_ratelimited(sta->sdata, + "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n", + skb_queue_len(&sta->tx_filtered[ac]), + !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies); + ieee80211_free_txskb(&local->hw, skb); +} + +static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid) +{ + struct tid_ampdu_tx *tid_tx; + + tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); + if (!tid_tx || !tid_tx->bar_pending) + return; + + tid_tx->bar_pending = false; + ieee80211_send_bar(&sta->sdata->vif, addr, tid, tid_tx->failed_bar_ssn); +} + +static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *) skb->data; + struct ieee80211_local *local = sta->local; + struct ieee80211_sub_if_data *sdata = sta->sdata; + + if (ieee80211_is_data_qos(mgmt->frame_control)) { + struct ieee80211_hdr *hdr = (void *) skb->data; + u8 *qc = ieee80211_get_qos_ctl(hdr); + u16 tid = qc[0] & 0xf; + + ieee80211_check_pending_bar(sta, hdr->addr1, tid); + } + + if (ieee80211_is_action(mgmt->frame_control) && + !ieee80211_has_protected(mgmt->frame_control) && + mgmt->u.action.category == WLAN_CATEGORY_HT && + mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS && + ieee80211_sdata_running(sdata)) { + enum ieee80211_smps_mode smps_mode; + + switch (mgmt->u.action.u.ht_smps.smps_control) { + case WLAN_HT_SMPS_CONTROL_DYNAMIC: + smps_mode = IEEE80211_SMPS_DYNAMIC; + break; + case WLAN_HT_SMPS_CONTROL_STATIC: + smps_mode = IEEE80211_SMPS_STATIC; + break; + case WLAN_HT_SMPS_CONTROL_DISABLED: + default: /* shouldn't happen since we don't send that */ + smps_mode = IEEE80211_SMPS_OFF; + break; + } + + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + /* + * This update looks racy, but isn't -- if we come + * here we've definitely got a station that we're + * talking to, and on a managed interface that can + * only be the AP. And the only other place updating + * this variable in managed mode is before association. + */ + sdata->deflink.smps_mode = smps_mode; + ieee80211_queue_work(&local->hw, &sdata->recalc_smps); + } + } +} + +static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn) +{ + struct tid_ampdu_tx *tid_tx; + + tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); + if (!tid_tx) + return; + + tid_tx->failed_bar_ssn = ssn; + tid_tx->bar_pending = true; +} + +static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info, + struct ieee80211_tx_status *status) +{ + struct ieee80211_rate_status *status_rate = NULL; + int len = sizeof(struct ieee80211_radiotap_header); + + if (status && status->n_rates) + status_rate = &status->rates[status->n_rates - 1]; + + /* IEEE80211_RADIOTAP_RATE rate */ + if (status_rate && !(status_rate->rate_idx.flags & + (RATE_INFO_FLAGS_MCS | + RATE_INFO_FLAGS_DMG | + RATE_INFO_FLAGS_EDMG | + RATE_INFO_FLAGS_VHT_MCS | + RATE_INFO_FLAGS_HE_MCS))) + len += 2; + else if (info->status.rates[0].idx >= 0 && + !(info->status.rates[0].flags & + (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))) + len += 2; + + /* IEEE80211_RADIOTAP_TX_FLAGS */ + len += 2; + + /* IEEE80211_RADIOTAP_DATA_RETRIES */ + len += 1; + + /* IEEE80211_RADIOTAP_MCS + * IEEE80211_RADIOTAP_VHT */ + if (status_rate) { + if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_MCS) + len += 3; + else if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_VHT_MCS) + len = ALIGN(len, 2) + 12; + else if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_HE_MCS) + len = ALIGN(len, 2) + 12; + } else if (info->status.rates[0].idx >= 0) { + if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS) + len += 3; + else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) + len = ALIGN(len, 2) + 12; + } + + return len; +} + +static void +ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, + struct sk_buff *skb, int retry_count, + int rtap_len, int shift, + struct ieee80211_tx_status *status) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_radiotap_header *rthdr; + struct ieee80211_rate_status *status_rate = NULL; + unsigned char *pos; + u16 legacy_rate = 0; + u16 txflags; + + if (status && status->n_rates) + status_rate = &status->rates[status->n_rates - 1]; + + rthdr = skb_push(skb, rtap_len); + + memset(rthdr, 0, rtap_len); + rthdr->it_len = cpu_to_le16(rtap_len); + rthdr->it_present = + cpu_to_le32(BIT(IEEE80211_RADIOTAP_TX_FLAGS) | + BIT(IEEE80211_RADIOTAP_DATA_RETRIES)); + pos = (unsigned char *)(rthdr + 1); + + /* + * XXX: Once radiotap gets the bitmap reset thing the vendor + * extensions proposal contains, we can actually report + * the whole set of tries we did. + */ + + /* IEEE80211_RADIOTAP_RATE */ + + if (status_rate) { + if (!(status_rate->rate_idx.flags & + (RATE_INFO_FLAGS_MCS | + RATE_INFO_FLAGS_DMG | + RATE_INFO_FLAGS_EDMG | + RATE_INFO_FLAGS_VHT_MCS | + RATE_INFO_FLAGS_HE_MCS))) + legacy_rate = status_rate->rate_idx.legacy; + } else if (info->status.rates[0].idx >= 0 && + !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS | + IEEE80211_TX_RC_VHT_MCS))) { + struct ieee80211_supported_band *sband; + + sband = local->hw.wiphy->bands[info->band]; + legacy_rate = + sband->bitrates[info->status.rates[0].idx].bitrate; + } + + if (legacy_rate) { + rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_RATE)); + *pos = DIV_ROUND_UP(legacy_rate, 5 * (1 << shift)); + /* padding for tx flags */ + pos += 2; + } + + /* IEEE80211_RADIOTAP_TX_FLAGS */ + txflags = 0; + if (!(info->flags & IEEE80211_TX_STAT_ACK) && + !is_multicast_ether_addr(hdr->addr1)) + txflags |= IEEE80211_RADIOTAP_F_TX_FAIL; + + if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) + txflags |= IEEE80211_RADIOTAP_F_TX_CTS; + if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) + txflags |= IEEE80211_RADIOTAP_F_TX_RTS; + + put_unaligned_le16(txflags, pos); + pos += 2; + + /* IEEE80211_RADIOTAP_DATA_RETRIES */ + /* for now report the total retry_count */ + *pos = retry_count; + pos++; + + if (status_rate && (status_rate->rate_idx.flags & RATE_INFO_FLAGS_MCS)) + { + rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS)); + pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS | + IEEE80211_RADIOTAP_MCS_HAVE_GI | + IEEE80211_RADIOTAP_MCS_HAVE_BW; + if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_SHORT_GI) + pos[1] |= IEEE80211_RADIOTAP_MCS_SGI; + if (status_rate->rate_idx.bw == RATE_INFO_BW_40) + pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40; + pos[2] = status_rate->rate_idx.mcs; + pos += 3; + } else if (status_rate && (status_rate->rate_idx.flags & + RATE_INFO_FLAGS_VHT_MCS)) + { + u16 known = local->hw.radiotap_vht_details & + (IEEE80211_RADIOTAP_VHT_KNOWN_GI | + IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH); + + rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_VHT)); + + /* required alignment from rthdr */ + pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2); + + /* u16 known - IEEE80211_RADIOTAP_VHT_KNOWN_* */ + put_unaligned_le16(known, pos); + pos += 2; + + /* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* */ + if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_SHORT_GI) + *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; + pos++; + + /* u8 bandwidth */ + switch (status_rate->rate_idx.bw) { + case RATE_INFO_BW_160: + *pos = 11; + break; + case RATE_INFO_BW_80: + *pos = 4; + break; + case RATE_INFO_BW_40: + *pos = 1; + break; + default: + *pos = 0; + break; + } + pos++; + + /* u8 mcs_nss[4] */ + *pos = (status_rate->rate_idx.mcs << 4) | + status_rate->rate_idx.nss; + pos += 4; + + /* u8 coding */ + pos++; + /* u8 group_id */ + pos++; + /* u16 partial_aid */ + pos += 2; + } else if (status_rate && (status_rate->rate_idx.flags & + RATE_INFO_FLAGS_HE_MCS)) + { + struct ieee80211_radiotap_he *he; + + rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE)); + + /* required alignment from rthdr */ + pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2); + he = (struct ieee80211_radiotap_he *)pos; + + he->data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU | + IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); + + he->data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN); + +#define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f) + + he->data6 |= HE_PREP(DATA6_NSTS, status_rate->rate_idx.nss); + +#define CHECK_GI(s) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ + (int)NL80211_RATE_INFO_HE_GI_##s) + + CHECK_GI(0_8); + CHECK_GI(1_6); + CHECK_GI(3_2); + + he->data3 |= HE_PREP(DATA3_DATA_MCS, status_rate->rate_idx.mcs); + he->data3 |= HE_PREP(DATA3_DATA_DCM, status_rate->rate_idx.he_dcm); + + he->data5 |= HE_PREP(DATA5_GI, status_rate->rate_idx.he_gi); + + switch (status_rate->rate_idx.bw) { + case RATE_INFO_BW_20: + he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); + break; + case RATE_INFO_BW_40: + he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); + break; + case RATE_INFO_BW_80: + he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); + break; + case RATE_INFO_BW_160: + he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); + break; + case RATE_INFO_BW_HE_RU: +#define CHECK_RU_ALLOC(s) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \ + NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4) + + CHECK_RU_ALLOC(26); + CHECK_RU_ALLOC(52); + CHECK_RU_ALLOC(106); + CHECK_RU_ALLOC(242); + CHECK_RU_ALLOC(484); + CHECK_RU_ALLOC(996); + CHECK_RU_ALLOC(2x996); + + he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + status_rate->rate_idx.he_ru_alloc + 4); + break; + default: + WARN_ONCE(1, "Invalid SU BW %d\n", status_rate->rate_idx.bw); + } + + pos += sizeof(struct ieee80211_radiotap_he); + } + + if (status_rate || info->status.rates[0].idx < 0) + return; + + /* IEEE80211_RADIOTAP_MCS + * IEEE80211_RADIOTAP_VHT */ + if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS) { + rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS)); + pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS | + IEEE80211_RADIOTAP_MCS_HAVE_GI | + IEEE80211_RADIOTAP_MCS_HAVE_BW; + if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) + pos[1] |= IEEE80211_RADIOTAP_MCS_SGI; + if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40; + if (info->status.rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD) + pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF; + pos[2] = info->status.rates[0].idx; + pos += 3; + } else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) { + u16 known = local->hw.radiotap_vht_details & + (IEEE80211_RADIOTAP_VHT_KNOWN_GI | + IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH); + + rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_VHT)); + + /* required alignment from rthdr */ + pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2); + + /* u16 known - IEEE80211_RADIOTAP_VHT_KNOWN_* */ + put_unaligned_le16(known, pos); + pos += 2; + + /* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* */ + if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) + *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; + pos++; + + /* u8 bandwidth */ + if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + *pos = 1; + else if (info->status.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + *pos = 4; + else if (info->status.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH) + *pos = 11; + else /* IEEE80211_TX_RC_{20_MHZ_WIDTH,FIXME:DUP_DATA} */ + *pos = 0; + pos++; + + /* u8 mcs_nss[4] */ + *pos = (ieee80211_rate_get_vht_mcs(&info->status.rates[0]) << 4) | + ieee80211_rate_get_vht_nss(&info->status.rates[0]); + pos += 4; + + /* u8 coding */ + pos++; + /* u8 group_id */ + pos++; + /* u16 partial_aid */ + pos += 2; + } +} + +/* + * Handles the tx for TDLS teardown frames. + * If the frame wasn't ACKed by the peer - it will be re-sent through the AP + */ +static void ieee80211_tdls_td_tx_handle(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u32 flags) +{ + struct sk_buff *teardown_skb; + struct sk_buff *orig_teardown_skb; + bool is_teardown = false; + + /* Get the teardown data we need and free the lock */ + spin_lock(&sdata->u.mgd.teardown_lock); + teardown_skb = sdata->u.mgd.teardown_skb; + orig_teardown_skb = sdata->u.mgd.orig_teardown_skb; + if ((skb == orig_teardown_skb) && teardown_skb) { + sdata->u.mgd.teardown_skb = NULL; + sdata->u.mgd.orig_teardown_skb = NULL; + is_teardown = true; + } + spin_unlock(&sdata->u.mgd.teardown_lock); + + if (is_teardown) { + /* This mechanism relies on being able to get ACKs */ + WARN_ON(!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)); + + /* Check if peer has ACKed */ + if (flags & IEEE80211_TX_STAT_ACK) { + dev_kfree_skb_any(teardown_skb); + } else { + tdls_dbg(sdata, + "TDLS Resending teardown through AP\n"); + + ieee80211_subif_start_xmit(teardown_skb, skb->dev); + } + } +} + +static struct ieee80211_sub_if_data * +ieee80211_sdata_from_skb(struct ieee80211_local *local, struct sk_buff *skb) +{ + struct ieee80211_sub_if_data *sdata; + + if (skb->dev) { + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (!sdata->dev) + continue; + + if (skb->dev == sdata->dev) + return sdata; + } + + return NULL; + } + + return rcu_dereference(local->p2p_sdata); +} + +static void ieee80211_report_ack_skb(struct ieee80211_local *local, + struct sk_buff *orig_skb, + bool acked, bool dropped, + ktime_t ack_hwtstamp) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(orig_skb); + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&local->ack_status_lock, flags); + skb = idr_remove(&local->ack_status_frames, info->ack_frame_id); + spin_unlock_irqrestore(&local->ack_status_lock, flags); + + if (!skb) + return; + + if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { + u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_hdr *hdr = (void *)skb->data; + bool is_valid_ack_signal = + !!(info->status.flags & IEEE80211_TX_STATUS_ACK_SIGNAL_VALID); + struct cfg80211_tx_status status = { + .cookie = cookie, + .buf = skb->data, + .len = skb->len, + .ack = acked, + }; + + if (ieee80211_is_timing_measurement(orig_skb) || + ieee80211_is_ftm(orig_skb)) { + status.tx_tstamp = + ktime_to_ns(skb_hwtstamps(orig_skb)->hwtstamp); + status.ack_tstamp = ktime_to_ns(ack_hwtstamp); + } + + rcu_read_lock(); + sdata = ieee80211_sdata_from_skb(local, skb); + if (sdata) { + if (skb->protocol == sdata->control_port_protocol || + skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) + cfg80211_control_port_tx_status(&sdata->wdev, + cookie, + skb->data, + skb->len, + acked, + GFP_ATOMIC); + else if (ieee80211_is_any_nullfunc(hdr->frame_control)) + cfg80211_probe_status(sdata->dev, hdr->addr1, + cookie, acked, + info->status.ack_signal, + is_valid_ack_signal, + GFP_ATOMIC); + else if (ieee80211_is_mgmt(hdr->frame_control)) + cfg80211_mgmt_tx_status_ext(&sdata->wdev, + &status, + GFP_ATOMIC); + else + pr_warn("Unknown status report in ack skb\n"); + + } + rcu_read_unlock(); + + dev_kfree_skb_any(skb); + } else if (dropped) { + dev_kfree_skb_any(skb); + } else { + /* consumes skb */ + skb_complete_wifi_ack(skb, acked); + } +} + +static void ieee80211_report_used_skb(struct ieee80211_local *local, + struct sk_buff *skb, bool dropped, + ktime_t ack_hwtstamp) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u16 tx_time_est = ieee80211_info_get_tx_time_est(info); + struct ieee80211_hdr *hdr = (void *)skb->data; + bool acked = info->flags & IEEE80211_TX_STAT_ACK; + + if (dropped) + acked = false; + + if (tx_time_est) { + struct sta_info *sta; + + rcu_read_lock(); + + sta = sta_info_get_by_addrs(local, hdr->addr1, hdr->addr2); + ieee80211_sta_update_pending_airtime(local, sta, + skb_get_queue_mapping(skb), + tx_time_est, + true); + rcu_read_unlock(); + } + + if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) { + struct ieee80211_sub_if_data *sdata; + + rcu_read_lock(); + + sdata = ieee80211_sdata_from_skb(local, skb); + + if (!sdata) { + skb->dev = NULL; + } else if (!dropped) { + unsigned int hdr_size = + ieee80211_hdrlen(hdr->frame_control); + + /* Check to see if packet is a TDLS teardown packet */ + if (ieee80211_is_data(hdr->frame_control) && + (ieee80211_get_tdls_action(skb, hdr_size) == + WLAN_TDLS_TEARDOWN)) { + ieee80211_tdls_td_tx_handle(local, sdata, skb, + info->flags); + } else if (ieee80211_s1g_is_twt_setup(skb)) { + if (!acked) { + struct sk_buff *qskb; + + qskb = skb_clone(skb, GFP_ATOMIC); + if (qskb) { + skb_queue_tail(&sdata->status_queue, + qskb); + wiphy_work_queue(local->hw.wiphy, + &sdata->work); + } + } + } else { + ieee80211_mgd_conn_tx_status(sdata, + hdr->frame_control, + acked); + } + } + + rcu_read_unlock(); + } else if (info->ack_frame_id) { + ieee80211_report_ack_skb(local, skb, acked, dropped, + ack_hwtstamp); + } + + if (!dropped && skb->destructor) { + skb->wifi_acked_valid = 1; + skb->wifi_acked = acked; + } + + ieee80211_led_tx(local); + + if (skb_has_frag_list(skb)) { + kfree_skb_list(skb_shinfo(skb)->frag_list); + skb_shinfo(skb)->frag_list = NULL; + } +} + +/* + * Use a static threshold for now, best value to be determined + * by testing ... + * Should it depend on: + * - on # of retransmissions + * - current throughput (higher value for higher tpt)? + */ +#define STA_LOST_PKT_THRESHOLD 50 +#define STA_LOST_PKT_TIME HZ /* 1 sec since last ACK */ +#define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */ + +static void ieee80211_lost_packet(struct sta_info *sta, + struct ieee80211_tx_info *info) +{ + unsigned long pkt_time = STA_LOST_PKT_TIME; + unsigned int pkt_thr = STA_LOST_PKT_THRESHOLD; + + /* If driver relies on its own algorithm for station kickout, skip + * mac80211 packet loss mechanism. + */ + if (ieee80211_hw_check(&sta->local->hw, REPORTS_LOW_ACK)) + return; + + /* This packet was aggregated but doesn't carry status info */ + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && + !(info->flags & IEEE80211_TX_STAT_AMPDU)) + return; + + sta->deflink.status_stats.lost_packets++; + if (sta->sta.tdls) { + pkt_time = STA_LOST_TDLS_PKT_TIME; + pkt_thr = STA_LOST_PKT_THRESHOLD; + } + + /* + * If we're in TDLS mode, make sure that all STA_LOST_PKT_THRESHOLD + * of the last packets were lost, and that no ACK was received in the + * last STA_LOST_TDLS_PKT_TIME ms, before triggering the CQM packet-loss + * mechanism. + * For non-TDLS, use STA_LOST_PKT_THRESHOLD and STA_LOST_PKT_TIME + */ + if (sta->deflink.status_stats.lost_packets < pkt_thr || + !time_after(jiffies, sta->deflink.status_stats.last_pkt_time + pkt_time)) + return; + + cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr, + sta->deflink.status_stats.lost_packets, + GFP_ATOMIC); + sta->deflink.status_stats.lost_packets = 0; +} + +static int ieee80211_tx_get_rates(struct ieee80211_hw *hw, + struct ieee80211_tx_info *info, + int *retry_count) +{ + int count = -1; + int i; + + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && + !(info->flags & IEEE80211_TX_STAT_AMPDU)) { + /* just the first aggr frame carry status info */ + info->status.rates[i].idx = -1; + info->status.rates[i].count = 0; + break; + } else if (info->status.rates[i].idx < 0) { + break; + } else if (i >= hw->max_report_rates) { + /* the HW cannot have attempted that rate */ + info->status.rates[i].idx = -1; + info->status.rates[i].count = 0; + break; + } + + count += info->status.rates[i].count; + } + + if (count < 0) + count = 0; + + *retry_count = count; + return i - 1; +} + +void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, + int retry_count, int shift, bool send_to_cooked, + struct ieee80211_tx_status *status) +{ + struct sk_buff *skb2; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sub_if_data *sdata; + struct net_device *prev_dev = NULL; + int rtap_len; + + /* send frame to monitor interfaces now */ + rtap_len = ieee80211_tx_radiotap_len(info, status); + if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) { + pr_err("ieee80211_tx_status: headroom too small\n"); + dev_kfree_skb(skb); + return; + } + ieee80211_add_tx_radiotap_header(local, skb, retry_count, + rtap_len, shift, status); + + /* XXX: is this sufficient for BPF? */ + skb_reset_mac_header(skb); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + memset(skb->cb, 0, sizeof(skb->cb)); + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { + if (!ieee80211_sdata_running(sdata)) + continue; + + if ((sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) && + !send_to_cooked) + continue; + + if (prev_dev) { + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2) { + skb2->dev = prev_dev; + netif_rx(skb2); + } + } + + prev_dev = sdata->dev; + } + } + if (prev_dev) { + skb->dev = prev_dev; + netif_rx(skb); + skb = NULL; + } + rcu_read_unlock(); + dev_kfree_skb(skb); +} + +static void __ieee80211_tx_status(struct ieee80211_hw *hw, + struct ieee80211_tx_status *status, + int rates_idx, int retry_count) +{ + struct sk_buff *skb = status->skb; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_tx_info *info = status->info; + struct sta_info *sta; + __le16 fc; + bool send_to_cooked; + bool acked; + bool noack_success; + struct ieee80211_bar *bar; + int shift = 0; + int tid = IEEE80211_NUM_TIDS; + + fc = hdr->frame_control; + + if (status->sta) { + sta = container_of(status->sta, struct sta_info, sta); + shift = ieee80211_vif_get_shift(&sta->sdata->vif); + + if (info->flags & IEEE80211_TX_STATUS_EOSP) + clear_sta_flag(sta, WLAN_STA_SP); + + acked = !!(info->flags & IEEE80211_TX_STAT_ACK); + noack_success = !!(info->flags & + IEEE80211_TX_STAT_NOACK_TRANSMITTED); + + /* mesh Peer Service Period support */ + if (ieee80211_vif_is_mesh(&sta->sdata->vif) && + ieee80211_is_data_qos(fc)) + ieee80211_mpsp_trigger_process( + ieee80211_get_qos_ctl(hdr), sta, true, acked); + + if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) && + (ieee80211_is_data(hdr->frame_control)) && + (rates_idx != -1)) + sta->deflink.tx_stats.last_rate = + info->status.rates[rates_idx]; + + if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) && + (ieee80211_is_data_qos(fc))) { + u16 ssn; + u8 *qc; + + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) + & IEEE80211_SCTL_SEQ); + ieee80211_send_bar(&sta->sdata->vif, hdr->addr1, + tid, ssn); + } else if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + + tid = qc[0] & 0xf; + } + + if (!acked && ieee80211_is_back_req(fc)) { + u16 control; + + /* + * BAR failed, store the last SSN and retry sending + * the BAR when the next unicast transmission on the + * same TID succeeds. + */ + bar = (struct ieee80211_bar *) skb->data; + control = le16_to_cpu(bar->control); + if (!(control & IEEE80211_BAR_CTRL_MULTI_TID)) { + u16 ssn = le16_to_cpu(bar->start_seq_num); + + tid = (control & + IEEE80211_BAR_CTRL_TID_INFO_MASK) >> + IEEE80211_BAR_CTRL_TID_INFO_SHIFT; + + ieee80211_set_bar_pending(sta, tid, ssn); + } + } + + if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) { + ieee80211_handle_filtered_frame(local, sta, skb); + return; + } else if (ieee80211_is_data_present(fc)) { + if (!acked && !noack_success) + sta->deflink.status_stats.msdu_failed[tid]++; + + sta->deflink.status_stats.msdu_retries[tid] += + retry_count; + } + + if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked) + ieee80211_frame_acked(sta, skb); + + } + + /* SNMP counters + * Fragments are passed to low-level drivers as separate skbs, so these + * are actually fragments, not frames. Update frame counters only for + * the first fragment of the frame. */ + if ((info->flags & IEEE80211_TX_STAT_ACK) || + (info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED)) { + if (ieee80211_is_first_frag(hdr->seq_ctrl)) { + I802_DEBUG_INC(local->dot11TransmittedFrameCount); + if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) + I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount); + if (retry_count > 0) + I802_DEBUG_INC(local->dot11RetryCount); + if (retry_count > 1) + I802_DEBUG_INC(local->dot11MultipleRetryCount); + } + + /* This counter shall be incremented for an acknowledged MPDU + * with an individual address in the address 1 field or an MPDU + * with a multicast address in the address 1 field of type Data + * or Management. */ + if (!is_multicast_ether_addr(hdr->addr1) || + ieee80211_is_data(fc) || + ieee80211_is_mgmt(fc)) + I802_DEBUG_INC(local->dot11TransmittedFragmentCount); + } else { + if (ieee80211_is_first_frag(hdr->seq_ctrl)) + I802_DEBUG_INC(local->dot11FailedCount); + } + + if (ieee80211_is_any_nullfunc(fc) && + ieee80211_has_pm(fc) && + ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) && + !(info->flags & IEEE80211_TX_CTL_INJECTED) && + local->ps_sdata && !(local->scanning)) { + if (info->flags & IEEE80211_TX_STAT_ACK) + local->ps_sdata->u.mgd.flags |= + IEEE80211_STA_NULLFUNC_ACKED; + mod_timer(&local->dynamic_ps_timer, + jiffies + msecs_to_jiffies(10)); + } + + ieee80211_report_used_skb(local, skb, false, status->ack_hwtstamp); + + /* this was a transmitted frame, but now we want to reuse it */ + skb_orphan(skb); + + /* Need to make a copy before skb->cb gets cleared */ + send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) || + !(ieee80211_is_data(fc)); + + /* + * This is a bit racy but we can avoid a lot of work + * with this test... + */ + if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) { + if (status->free_list) + list_add_tail(&skb->list, status->free_list); + else + dev_kfree_skb(skb); + return; + } + + /* send to monitor interfaces */ + ieee80211_tx_monitor(local, skb, retry_count, shift, + send_to_cooked, status); +} + +void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_tx_status status = { + .skb = skb, + .info = IEEE80211_SKB_CB(skb), + }; + struct sta_info *sta; + + rcu_read_lock(); + + sta = sta_info_get_by_addrs(local, hdr->addr1, hdr->addr2); + if (sta) + status.sta = &sta->sta; + + ieee80211_tx_status_ext(hw, &status); + rcu_read_unlock(); +} +EXPORT_SYMBOL(ieee80211_tx_status); + +void ieee80211_tx_status_ext(struct ieee80211_hw *hw, + struct ieee80211_tx_status *status) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_tx_info *info = status->info; + struct ieee80211_sta *pubsta = status->sta; + struct sk_buff *skb = status->skb; + struct sta_info *sta = NULL; + int rates_idx, retry_count; + bool acked, noack_success, ack_signal_valid; + u16 tx_time_est; + + if (pubsta) { + sta = container_of(pubsta, struct sta_info, sta); + + if (status->n_rates) + sta->deflink.tx_stats.last_rate_info = + status->rates[status->n_rates - 1].rate_idx; + } + + if (skb && (tx_time_est = + ieee80211_info_get_tx_time_est(IEEE80211_SKB_CB(skb))) > 0) { + /* Do this here to avoid the expensive lookup of the sta + * in ieee80211_report_used_skb(). + */ + ieee80211_sta_update_pending_airtime(local, sta, + skb_get_queue_mapping(skb), + tx_time_est, + true); + ieee80211_info_set_tx_time_est(IEEE80211_SKB_CB(skb), 0); + } + + if (!status->info) + goto free; + + rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count); + + acked = !!(info->flags & IEEE80211_TX_STAT_ACK); + noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED); + ack_signal_valid = + !!(info->status.flags & IEEE80211_TX_STATUS_ACK_SIGNAL_VALID); + + if (pubsta) { + struct ieee80211_sub_if_data *sdata = sta->sdata; + + if (!acked && !noack_success) + sta->deflink.status_stats.retry_failed++; + sta->deflink.status_stats.retry_count += retry_count; + + if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { + if (sdata->vif.type == NL80211_IFTYPE_STATION && + skb && !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) + ieee80211_sta_tx_notify(sdata, (void *) skb->data, + acked, info->status.tx_time); + + if (acked) { + sta->deflink.status_stats.last_ack = jiffies; + + if (sta->deflink.status_stats.lost_packets) + sta->deflink.status_stats.lost_packets = 0; + + /* Track when last packet was ACKed */ + sta->deflink.status_stats.last_pkt_time = jiffies; + + /* Reset connection monitor */ + if (sdata->vif.type == NL80211_IFTYPE_STATION && + unlikely(sdata->u.mgd.probe_send_count > 0)) + sdata->u.mgd.probe_send_count = 0; + + if (ack_signal_valid) { + sta->deflink.status_stats.last_ack_signal = + (s8)info->status.ack_signal; + sta->deflink.status_stats.ack_signal_filled = true; + ewma_avg_signal_add(&sta->deflink.status_stats.avg_ack_signal, + -info->status.ack_signal); + } + } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) { + /* + * The STA is in power save mode, so assume + * that this TX packet failed because of that. + */ + if (skb) + ieee80211_handle_filtered_frame(local, sta, skb); + return; + } else if (noack_success) { + /* nothing to do here, do not account as lost */ + } else { + ieee80211_lost_packet(sta, info); + } + } + + rate_control_tx_status(local, status); + if (ieee80211_vif_is_mesh(&sta->sdata->vif)) + ieee80211s_update_metric(local, sta, status); + } + + if (skb && !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) + return __ieee80211_tx_status(hw, status, rates_idx, + retry_count); + + if (acked || noack_success) { + I802_DEBUG_INC(local->dot11TransmittedFrameCount); + if (!pubsta) + I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount); + if (retry_count > 0) + I802_DEBUG_INC(local->dot11RetryCount); + if (retry_count > 1) + I802_DEBUG_INC(local->dot11MultipleRetryCount); + } else { + I802_DEBUG_INC(local->dot11FailedCount); + } + +free: + if (!skb) + return; + + ieee80211_report_used_skb(local, skb, false, status->ack_hwtstamp); + if (status->free_list) + list_add_tail(&skb->list, status->free_list); + else + dev_kfree_skb(skb); +} +EXPORT_SYMBOL(ieee80211_tx_status_ext); + +void ieee80211_tx_rate_update(struct ieee80211_hw *hw, + struct ieee80211_sta *pubsta, + struct ieee80211_tx_info *info) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_tx_status status = { + .info = info, + .sta = pubsta, + }; + + rate_control_tx_status(local, &status); + + if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) + sta->deflink.tx_stats.last_rate = info->status.rates[0]; +} +EXPORT_SYMBOL(ieee80211_tx_rate_update); + +void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr, + num_packets, GFP_ATOMIC); +} +EXPORT_SYMBOL(ieee80211_report_low_ack); + +void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ieee80211_local *local = hw_to_local(hw); + ktime_t kt = ktime_set(0, 0); + + ieee80211_report_used_skb(local, skb, true, kt); + dev_kfree_skb_any(skb); +} +EXPORT_SYMBOL(ieee80211_free_txskb); + +void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, + struct sk_buff_head *skbs) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(skbs))) + ieee80211_free_txskb(hw, skb); +} diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c new file mode 100644 index 0000000000..a4af3b7675 --- /dev/null +++ b/net/mac80211/tdls.c @@ -0,0 +1,2113 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * mac80211 TDLS handling code + * + * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2014, Intel Corporation + * Copyright 2014 Intel Mobile Communications GmbH + * Copyright 2015 - 2016 Intel Deutschland GmbH + * Copyright (C) 2019, 2021-2023 Intel Corporation + */ + +#include <linux/ieee80211.h> +#include <linux/log2.h> +#include <net/cfg80211.h> +#include <linux/rtnetlink.h> +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" +#include "wme.h" + +/* give usermode some time for retries in setting up the TDLS session */ +#define TDLS_PEER_SETUP_TIMEOUT (15 * HZ) + +void ieee80211_tdls_peer_del_work(struct work_struct *wk) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_local *local; + + sdata = container_of(wk, struct ieee80211_sub_if_data, + u.mgd.tdls_peer_del_work.work); + local = sdata->local; + + mutex_lock(&local->mtx); + if (!is_zero_ether_addr(sdata->u.mgd.tdls_peer)) { + tdls_dbg(sdata, "TDLS del peer %pM\n", sdata->u.mgd.tdls_peer); + sta_info_destroy_addr(sdata, sdata->u.mgd.tdls_peer); + eth_zero_addr(sdata->u.mgd.tdls_peer); + } + mutex_unlock(&local->mtx); +} + +static void ieee80211_tdls_add_ext_capab(struct ieee80211_link_data *link, + struct sk_buff *skb) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + bool chan_switch = local->hw.wiphy->features & + NL80211_FEATURE_TDLS_CHANNEL_SWITCH; + bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) && + !ifmgd->tdls_wider_bw_prohibited; + bool buffer_sta = ieee80211_hw_check(&local->hw, + SUPPORTS_TDLS_BUFFER_STA); + struct ieee80211_supported_band *sband = ieee80211_get_link_sband(link); + bool vht = sband && sband->vht_cap.vht_supported; + u8 *pos = skb_put(skb, 10); + + *pos++ = WLAN_EID_EXT_CAPABILITY; + *pos++ = 8; /* len */ + *pos++ = 0x0; + *pos++ = 0x0; + *pos++ = 0x0; + *pos++ = (chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0) | + (buffer_sta ? WLAN_EXT_CAPA4_TDLS_BUFFER_STA : 0); + *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED; + *pos++ = 0; + *pos++ = 0; + *pos++ = (vht && wider_band) ? WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED : 0; +} + +static u8 +ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u16 start, u16 end, + u16 spacing) +{ + u8 subband_cnt = 0, ch_cnt = 0; + struct ieee80211_channel *ch; + struct cfg80211_chan_def chandef; + int i, subband_start; + struct wiphy *wiphy = sdata->local->hw.wiphy; + + for (i = start; i <= end; i += spacing) { + if (!ch_cnt) + subband_start = i; + + ch = ieee80211_get_channel(sdata->local->hw.wiphy, i); + if (ch) { + /* we will be active on the channel */ + cfg80211_chandef_create(&chandef, ch, + NL80211_CHAN_NO_HT); + if (cfg80211_reg_can_beacon_relax(wiphy, &chandef, + sdata->wdev.iftype)) { + ch_cnt++; + /* + * check if the next channel is also part of + * this allowed range + */ + continue; + } + } + + /* + * we've reached the end of a range, with allowed channels + * found + */ + if (ch_cnt) { + u8 *pos = skb_put(skb, 2); + *pos++ = ieee80211_frequency_to_channel(subband_start); + *pos++ = ch_cnt; + + subband_cnt++; + ch_cnt = 0; + } + } + + /* all channels in the requested range are allowed - add them here */ + if (ch_cnt) { + u8 *pos = skb_put(skb, 2); + *pos++ = ieee80211_frequency_to_channel(subband_start); + *pos++ = ch_cnt; + + subband_cnt++; + } + + return subband_cnt; +} + +static void +ieee80211_tdls_add_supp_channels(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + /* + * Add possible channels for TDLS. These are channels that are allowed + * to be active. + */ + u8 subband_cnt; + u8 *pos = skb_put(skb, 2); + + *pos++ = WLAN_EID_SUPPORTED_CHANNELS; + + /* + * 5GHz and 2GHz channels numbers can overlap. Ignore this for now, as + * this doesn't happen in real world scenarios. + */ + + /* 2GHz, with 5MHz spacing */ + subband_cnt = ieee80211_tdls_add_subband(sdata, skb, 2412, 2472, 5); + + /* 5GHz, with 20MHz spacing */ + subband_cnt += ieee80211_tdls_add_subband(sdata, skb, 5000, 5825, 20); + + /* length */ + *pos = 2 * subband_cnt; +} + +static void ieee80211_tdls_add_oper_classes(struct ieee80211_link_data *link, + struct sk_buff *skb) +{ + u8 *pos; + u8 op_class; + + if (!ieee80211_chandef_to_operating_class(&link->conf->chandef, + &op_class)) + return; + + pos = skb_put(skb, 4); + *pos++ = WLAN_EID_SUPPORTED_REGULATORY_CLASSES; + *pos++ = 2; /* len */ + + *pos++ = op_class; + *pos++ = op_class; /* give current operating class as alternate too */ +} + +static void ieee80211_tdls_add_bss_coex_ie(struct sk_buff *skb) +{ + u8 *pos = skb_put(skb, 3); + + *pos++ = WLAN_EID_BSS_COEX_2040; + *pos++ = 1; /* len */ + + *pos++ = WLAN_BSS_COEX_INFORMATION_REQUEST; +} + +static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_link_data *link, + u16 status_code) +{ + struct ieee80211_supported_band *sband; + + /* The capability will be 0 when sending a failure code */ + if (status_code != 0) + return 0; + + sband = ieee80211_get_link_sband(link); + + if (sband && sband->band == NL80211_BAND_2GHZ) { + return WLAN_CAPABILITY_SHORT_SLOT_TIME | + WLAN_CAPABILITY_SHORT_PREAMBLE; + } + + return 0; +} + +static void ieee80211_tdls_add_link_ie(struct ieee80211_link_data *link, + struct sk_buff *skb, const u8 *peer, + bool initiator) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_tdls_lnkie *lnkid; + const u8 *init_addr, *rsp_addr; + + if (initiator) { + init_addr = sdata->vif.addr; + rsp_addr = peer; + } else { + init_addr = peer; + rsp_addr = sdata->vif.addr; + } + + lnkid = skb_put(skb, sizeof(struct ieee80211_tdls_lnkie)); + + lnkid->ie_type = WLAN_EID_LINK_ID; + lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2; + + memcpy(lnkid->bssid, link->u.mgd.bssid, ETH_ALEN); + memcpy(lnkid->init_sta, init_addr, ETH_ALEN); + memcpy(lnkid->resp_sta, rsp_addr, ETH_ALEN); +} + +static void +ieee80211_tdls_add_aid(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) +{ + u8 *pos = skb_put(skb, 4); + + *pos++ = WLAN_EID_AID; + *pos++ = 2; /* len */ + put_unaligned_le16(sdata->vif.cfg.aid, pos); +} + +/* translate numbering in the WMM parameter IE to the mac80211 notation */ +static enum ieee80211_ac_numbers ieee80211_ac_from_wmm(int ac) +{ + switch (ac) { + default: + WARN_ON_ONCE(1); + fallthrough; + case 0: + return IEEE80211_AC_BE; + case 1: + return IEEE80211_AC_BK; + case 2: + return IEEE80211_AC_VI; + case 3: + return IEEE80211_AC_VO; + } +} + +static u8 ieee80211_wmm_aci_aifsn(int aifsn, bool acm, int aci) +{ + u8 ret; + + ret = aifsn & 0x0f; + if (acm) + ret |= 0x10; + ret |= (aci << 5) & 0x60; + return ret; +} + +static u8 ieee80211_wmm_ecw(u16 cw_min, u16 cw_max) +{ + return ((ilog2(cw_min + 1) << 0x0) & 0x0f) | + ((ilog2(cw_max + 1) << 0x4) & 0xf0); +} + +static void ieee80211_tdls_add_wmm_param_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_wmm_param_ie *wmm; + struct ieee80211_tx_queue_params *txq; + int i; + + wmm = skb_put_zero(skb, sizeof(*wmm)); + + wmm->element_id = WLAN_EID_VENDOR_SPECIFIC; + wmm->len = sizeof(*wmm) - 2; + + wmm->oui[0] = 0x00; /* Microsoft OUI 00:50:F2 */ + wmm->oui[1] = 0x50; + wmm->oui[2] = 0xf2; + wmm->oui_type = 2; /* WME */ + wmm->oui_subtype = 1; /* WME param */ + wmm->version = 1; /* WME ver */ + wmm->qos_info = 0; /* U-APSD not in use */ + + /* + * Use the EDCA parameters defined for the BSS, or default if the AP + * doesn't support it, as mandated by 802.11-2012 section 10.22.4 + */ + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + txq = &sdata->deflink.tx_conf[ieee80211_ac_from_wmm(i)]; + wmm->ac[i].aci_aifsn = ieee80211_wmm_aci_aifsn(txq->aifs, + txq->acm, i); + wmm->ac[i].cw = ieee80211_wmm_ecw(txq->cw_min, txq->cw_max); + wmm->ac[i].txop_limit = cpu_to_le16(txq->txop); + } +} + +static void +ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta) +{ + /* IEEE802.11ac-2013 Table E-4 */ + u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 }; + struct cfg80211_chan_def uc = sta->tdls_chandef; + enum nl80211_chan_width max_width = + ieee80211_sta_cap_chan_bw(&sta->deflink); + int i; + + /* only support upgrading non-narrow channels up to 80Mhz */ + if (max_width == NL80211_CHAN_WIDTH_5 || + max_width == NL80211_CHAN_WIDTH_10) + return; + + if (max_width > NL80211_CHAN_WIDTH_80) + max_width = NL80211_CHAN_WIDTH_80; + + if (uc.width >= max_width) + return; + /* + * Channel usage constrains in the IEEE802.11ac-2013 specification only + * allow expanding a 20MHz channel to 80MHz in a single way. In + * addition, there are no 40MHz allowed channels that are not part of + * the allowed 80MHz range in the 5GHz spectrum (the relevant one here). + */ + for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++) + if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) { + uc.center_freq1 = centers_80mhz[i]; + uc.center_freq2 = 0; + uc.width = NL80211_CHAN_WIDTH_80; + break; + } + + if (!uc.center_freq1) + return; + + /* proceed to downgrade the chandef until usable or the same as AP BW */ + while (uc.width > max_width || + (uc.width > sta->tdls_chandef.width && + !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc, + sdata->wdev.iftype))) + ieee80211_chandef_downgrade(&uc); + + if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) { + tdls_dbg(sdata, "TDLS ch width upgraded %d -> %d\n", + sta->tdls_chandef.width, uc.width); + + /* + * the station is not yet authorized when BW upgrade is done, + * locking is not required + */ + sta->tdls_chandef = uc; + } +} + +static void +ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link, + struct sk_buff *skb, const u8 *peer, + u8 action_code, bool initiator, + const u8 *extra_ies, size_t extra_ies_len) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_supported_band *sband; + struct ieee80211_local *local = sdata->local; + struct ieee80211_sta_ht_cap ht_cap; + struct ieee80211_sta_vht_cap vht_cap; + const struct ieee80211_sta_he_cap *he_cap; + const struct ieee80211_sta_eht_cap *eht_cap; + struct sta_info *sta = NULL; + size_t offset = 0, noffset; + u8 *pos; + + sband = ieee80211_get_link_sband(link); + if (WARN_ON_ONCE(!sband)) + return; + + ieee80211_add_srates_ie(sdata, skb, false, sband->band); + ieee80211_add_ext_srates_ie(sdata, skb, false, sband->band); + ieee80211_tdls_add_supp_channels(sdata, skb); + + /* add any custom IEs that go before Extended Capabilities */ + if (extra_ies_len) { + static const u8 before_ext_cap[] = { + WLAN_EID_SUPP_RATES, + WLAN_EID_COUNTRY, + WLAN_EID_EXT_SUPP_RATES, + WLAN_EID_SUPPORTED_CHANNELS, + WLAN_EID_RSN, + }; + noffset = ieee80211_ie_split(extra_ies, extra_ies_len, + before_ext_cap, + ARRAY_SIZE(before_ext_cap), + offset); + skb_put_data(skb, extra_ies + offset, noffset - offset); + offset = noffset; + } + + ieee80211_tdls_add_ext_capab(link, skb); + + /* add the QoS element if we support it */ + if (local->hw.queues >= IEEE80211_NUM_ACS && + action_code != WLAN_PUB_ACTION_TDLS_DISCOVER_RES) + ieee80211_add_wmm_info_ie(skb_put(skb, 9), 0); /* no U-APSD */ + + /* add any custom IEs that go before HT capabilities */ + if (extra_ies_len) { + static const u8 before_ht_cap[] = { + WLAN_EID_SUPP_RATES, + WLAN_EID_COUNTRY, + WLAN_EID_EXT_SUPP_RATES, + WLAN_EID_SUPPORTED_CHANNELS, + WLAN_EID_RSN, + WLAN_EID_EXT_CAPABILITY, + WLAN_EID_QOS_CAPA, + WLAN_EID_FAST_BSS_TRANSITION, + WLAN_EID_TIMEOUT_INTERVAL, + WLAN_EID_SUPPORTED_REGULATORY_CLASSES, + }; + noffset = ieee80211_ie_split(extra_ies, extra_ies_len, + before_ht_cap, + ARRAY_SIZE(before_ht_cap), + offset); + skb_put_data(skb, extra_ies + offset, noffset - offset); + offset = noffset; + } + + /* we should have the peer STA if we're already responding */ + if (action_code == WLAN_TDLS_SETUP_RESPONSE) { + sta = sta_info_get(sdata, peer); + if (WARN_ON_ONCE(!sta)) + return; + + sta->tdls_chandef = link->conf->chandef; + } + + ieee80211_tdls_add_oper_classes(link, skb); + + /* + * with TDLS we can switch channels, and HT-caps are not necessarily + * the same on all bands. The specification limits the setup to a + * single HT-cap, so use the current band for now. + */ + memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); + + if ((action_code == WLAN_TDLS_SETUP_REQUEST || + action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) && + ht_cap.ht_supported) { + ieee80211_apply_htcap_overrides(sdata, &ht_cap); + + /* disable SMPS in TDLS initiator */ + ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED + << IEEE80211_HT_CAP_SM_PS_SHIFT; + + pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); + ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap); + } else if (action_code == WLAN_TDLS_SETUP_RESPONSE && + ht_cap.ht_supported && sta->sta.deflink.ht_cap.ht_supported) { + /* the peer caps are already intersected with our own */ + memcpy(&ht_cap, &sta->sta.deflink.ht_cap, sizeof(ht_cap)); + + pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); + ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap); + } + + if (ht_cap.ht_supported && + (ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) + ieee80211_tdls_add_bss_coex_ie(skb); + + ieee80211_tdls_add_link_ie(link, skb, peer, initiator); + + /* add any custom IEs that go before VHT capabilities */ + if (extra_ies_len) { + static const u8 before_vht_cap[] = { + WLAN_EID_SUPP_RATES, + WLAN_EID_COUNTRY, + WLAN_EID_EXT_SUPP_RATES, + WLAN_EID_SUPPORTED_CHANNELS, + WLAN_EID_RSN, + WLAN_EID_EXT_CAPABILITY, + WLAN_EID_QOS_CAPA, + WLAN_EID_FAST_BSS_TRANSITION, + WLAN_EID_TIMEOUT_INTERVAL, + WLAN_EID_SUPPORTED_REGULATORY_CLASSES, + WLAN_EID_MULTI_BAND, + }; + noffset = ieee80211_ie_split(extra_ies, extra_ies_len, + before_vht_cap, + ARRAY_SIZE(before_vht_cap), + offset); + skb_put_data(skb, extra_ies + offset, noffset - offset); + offset = noffset; + } + + /* add AID if VHT, HE or EHT capabilities supported */ + memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap)); + he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); + eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif); + if ((vht_cap.vht_supported || he_cap || eht_cap) && + (action_code == WLAN_TDLS_SETUP_REQUEST || + action_code == WLAN_TDLS_SETUP_RESPONSE)) + ieee80211_tdls_add_aid(sdata, skb); + + /* build the VHT-cap similarly to the HT-cap */ + if ((action_code == WLAN_TDLS_SETUP_REQUEST || + action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) && + vht_cap.vht_supported) { + ieee80211_apply_vhtcap_overrides(sdata, &vht_cap); + + pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2); + ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap); + } else if (action_code == WLAN_TDLS_SETUP_RESPONSE && + vht_cap.vht_supported && sta->sta.deflink.vht_cap.vht_supported) { + /* the peer caps are already intersected with our own */ + memcpy(&vht_cap, &sta->sta.deflink.vht_cap, sizeof(vht_cap)); + + pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2); + ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap); + + /* + * if both peers support WIDER_BW, we can expand the chandef to + * a wider compatible one, up to 80MHz + */ + if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) + ieee80211_tdls_chandef_vht_upgrade(sdata, sta); + } + + /* add any custom IEs that go before HE capabilities */ + if (extra_ies_len) { + static const u8 before_he_cap[] = { + WLAN_EID_EXTENSION, + WLAN_EID_EXT_FILS_REQ_PARAMS, + WLAN_EID_AP_CSN, + }; + noffset = ieee80211_ie_split(extra_ies, extra_ies_len, + before_he_cap, + ARRAY_SIZE(before_he_cap), + offset); + skb_put_data(skb, extra_ies + offset, noffset - offset); + offset = noffset; + } + + /* build the HE-cap from sband */ + if (he_cap && + (action_code == WLAN_TDLS_SETUP_REQUEST || + action_code == WLAN_TDLS_SETUP_RESPONSE || + action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) { + __le16 he_6ghz_capa; + u8 cap_size; + + cap_size = + 2 + 1 + sizeof(he_cap->he_cap_elem) + + ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) + + ieee80211_he_ppe_size(he_cap->ppe_thres[0], + he_cap->he_cap_elem.phy_cap_info); + pos = skb_put(skb, cap_size); + pos = ieee80211_ie_build_he_cap(0, pos, he_cap, pos + cap_size); + + /* Build HE 6Ghz capa IE from sband */ + if (sband->band == NL80211_BAND_6GHZ) { + cap_size = 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa); + pos = skb_put(skb, cap_size); + he_6ghz_capa = + ieee80211_get_he_6ghz_capa_vif(sband, &sdata->vif); + pos = ieee80211_write_he_6ghz_cap(pos, he_6ghz_capa, + pos + cap_size); + } + } + + /* add any custom IEs that go before EHT capabilities */ + if (extra_ies_len) { + static const u8 before_he_cap[] = { + WLAN_EID_EXTENSION, + WLAN_EID_EXT_FILS_REQ_PARAMS, + WLAN_EID_AP_CSN, + }; + + noffset = ieee80211_ie_split(extra_ies, extra_ies_len, + before_he_cap, + ARRAY_SIZE(before_he_cap), + offset); + skb_put_data(skb, extra_ies + offset, noffset - offset); + offset = noffset; + } + + /* build the EHT-cap from sband */ + if (he_cap && eht_cap && + (action_code == WLAN_TDLS_SETUP_REQUEST || + action_code == WLAN_TDLS_SETUP_RESPONSE || + action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) { + u8 cap_size; + + cap_size = + 2 + 1 + sizeof(eht_cap->eht_cap_elem) + + ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem, + &eht_cap->eht_cap_elem, false) + + ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0], + eht_cap->eht_cap_elem.phy_cap_info); + pos = skb_put(skb, cap_size); + ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + cap_size, false); + } + + /* add any remaining IEs */ + if (extra_ies_len) { + noffset = extra_ies_len; + skb_put_data(skb, extra_ies + offset, noffset - offset); + } + +} + +static void +ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_link_data *link, + struct sk_buff *skb, const u8 *peer, + bool initiator, const u8 *extra_ies, + size_t extra_ies_len) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + size_t offset = 0, noffset; + struct sta_info *sta, *ap_sta; + struct ieee80211_supported_band *sband; + u8 *pos; + + sband = ieee80211_get_link_sband(link); + if (WARN_ON_ONCE(!sband)) + return; + + sta = sta_info_get(sdata, peer); + ap_sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr); + + if (WARN_ON_ONCE(!sta || !ap_sta)) + return; + + sta->tdls_chandef = link->conf->chandef; + + /* add any custom IEs that go before the QoS IE */ + if (extra_ies_len) { + static const u8 before_qos[] = { + WLAN_EID_RSN, + }; + noffset = ieee80211_ie_split(extra_ies, extra_ies_len, + before_qos, + ARRAY_SIZE(before_qos), + offset); + skb_put_data(skb, extra_ies + offset, noffset - offset); + offset = noffset; + } + + /* add the QoS param IE if both the peer and we support it */ + if (local->hw.queues >= IEEE80211_NUM_ACS && sta->sta.wme) + ieee80211_tdls_add_wmm_param_ie(sdata, skb); + + /* add any custom IEs that go before HT operation */ + if (extra_ies_len) { + static const u8 before_ht_op[] = { + WLAN_EID_RSN, + WLAN_EID_QOS_CAPA, + WLAN_EID_FAST_BSS_TRANSITION, + WLAN_EID_TIMEOUT_INTERVAL, + }; + noffset = ieee80211_ie_split(extra_ies, extra_ies_len, + before_ht_op, + ARRAY_SIZE(before_ht_op), + offset); + skb_put_data(skb, extra_ies + offset, noffset - offset); + offset = noffset; + } + + /* + * if HT support is only added in TDLS, we need an HT-operation IE. + * add the IE as required by IEEE802.11-2012 9.23.3.2. + */ + if (!ap_sta->sta.deflink.ht_cap.ht_supported && sta->sta.deflink.ht_cap.ht_supported) { + u16 prot = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED | + IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT | + IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT; + + pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation)); + ieee80211_ie_build_ht_oper(pos, &sta->sta.deflink.ht_cap, + &link->conf->chandef, prot, + true); + } + + ieee80211_tdls_add_link_ie(link, skb, peer, initiator); + + /* only include VHT-operation if not on the 2.4GHz band */ + if (sband->band != NL80211_BAND_2GHZ && + sta->sta.deflink.vht_cap.vht_supported) { + /* + * if both peers support WIDER_BW, we can expand the chandef to + * a wider compatible one, up to 80MHz + */ + if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) + ieee80211_tdls_chandef_vht_upgrade(sdata, sta); + + pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation)); + ieee80211_ie_build_vht_oper(pos, &sta->sta.deflink.vht_cap, + &sta->tdls_chandef); + } + + /* add any remaining IEs */ + if (extra_ies_len) { + noffset = extra_ies_len; + skb_put_data(skb, extra_ies + offset, noffset - offset); + } +} + +static void +ieee80211_tdls_add_chan_switch_req_ies(struct ieee80211_link_data *link, + struct sk_buff *skb, const u8 *peer, + bool initiator, const u8 *extra_ies, + size_t extra_ies_len, u8 oper_class, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_tdls_data *tf; + size_t offset = 0, noffset; + + if (WARN_ON_ONCE(!chandef)) + return; + + tf = (void *)skb->data; + tf->u.chan_switch_req.target_channel = + ieee80211_frequency_to_channel(chandef->chan->center_freq); + tf->u.chan_switch_req.oper_class = oper_class; + + if (extra_ies_len) { + static const u8 before_lnkie[] = { + WLAN_EID_SECONDARY_CHANNEL_OFFSET, + }; + noffset = ieee80211_ie_split(extra_ies, extra_ies_len, + before_lnkie, + ARRAY_SIZE(before_lnkie), + offset); + skb_put_data(skb, extra_ies + offset, noffset - offset); + offset = noffset; + } + + ieee80211_tdls_add_link_ie(link, skb, peer, initiator); + + /* add any remaining IEs */ + if (extra_ies_len) { + noffset = extra_ies_len; + skb_put_data(skb, extra_ies + offset, noffset - offset); + } +} + +static void +ieee80211_tdls_add_chan_switch_resp_ies(struct ieee80211_link_data *link, + struct sk_buff *skb, const u8 *peer, + u16 status_code, bool initiator, + const u8 *extra_ies, + size_t extra_ies_len) +{ + if (status_code == 0) + ieee80211_tdls_add_link_ie(link, skb, peer, initiator); + + if (extra_ies_len) + skb_put_data(skb, extra_ies, extra_ies_len); +} + +static void ieee80211_tdls_add_ies(struct ieee80211_link_data *link, + struct sk_buff *skb, const u8 *peer, + u8 action_code, u16 status_code, + bool initiator, const u8 *extra_ies, + size_t extra_ies_len, u8 oper_class, + struct cfg80211_chan_def *chandef) +{ + switch (action_code) { + case WLAN_TDLS_SETUP_REQUEST: + case WLAN_TDLS_SETUP_RESPONSE: + case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: + if (status_code == 0) + ieee80211_tdls_add_setup_start_ies(link, + skb, peer, + action_code, + initiator, + extra_ies, + extra_ies_len); + break; + case WLAN_TDLS_SETUP_CONFIRM: + if (status_code == 0) + ieee80211_tdls_add_setup_cfm_ies(link, skb, peer, + initiator, extra_ies, + extra_ies_len); + break; + case WLAN_TDLS_TEARDOWN: + case WLAN_TDLS_DISCOVERY_REQUEST: + if (extra_ies_len) + skb_put_data(skb, extra_ies, extra_ies_len); + if (status_code == 0 || action_code == WLAN_TDLS_TEARDOWN) + ieee80211_tdls_add_link_ie(link, skb, + peer, initiator); + break; + case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: + ieee80211_tdls_add_chan_switch_req_ies(link, skb, peer, + initiator, extra_ies, + extra_ies_len, + oper_class, chandef); + break; + case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: + ieee80211_tdls_add_chan_switch_resp_ies(link, skb, peer, + status_code, + initiator, extra_ies, + extra_ies_len); + break; + } + +} + +static int +ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, + struct ieee80211_link_data *link, + const u8 *peer, u8 action_code, u8 dialog_token, + u16 status_code, struct sk_buff *skb) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_tdls_data *tf; + + tf = skb_put(skb, offsetof(struct ieee80211_tdls_data, u)); + + memcpy(tf->da, peer, ETH_ALEN); + memcpy(tf->sa, sdata->vif.addr, ETH_ALEN); + tf->ether_type = cpu_to_be16(ETH_P_TDLS); + tf->payload_type = WLAN_TDLS_SNAP_RFTYPE; + + /* network header is after the ethernet header */ + skb_set_network_header(skb, ETH_HLEN); + + switch (action_code) { + case WLAN_TDLS_SETUP_REQUEST: + tf->category = WLAN_CATEGORY_TDLS; + tf->action_code = WLAN_TDLS_SETUP_REQUEST; + + skb_put(skb, sizeof(tf->u.setup_req)); + tf->u.setup_req.dialog_token = dialog_token; + tf->u.setup_req.capability = + cpu_to_le16(ieee80211_get_tdls_sta_capab(link, + status_code)); + break; + case WLAN_TDLS_SETUP_RESPONSE: + tf->category = WLAN_CATEGORY_TDLS; + tf->action_code = WLAN_TDLS_SETUP_RESPONSE; + + skb_put(skb, sizeof(tf->u.setup_resp)); + tf->u.setup_resp.status_code = cpu_to_le16(status_code); + tf->u.setup_resp.dialog_token = dialog_token; + tf->u.setup_resp.capability = + cpu_to_le16(ieee80211_get_tdls_sta_capab(link, + status_code)); + break; + case WLAN_TDLS_SETUP_CONFIRM: + tf->category = WLAN_CATEGORY_TDLS; + tf->action_code = WLAN_TDLS_SETUP_CONFIRM; + + skb_put(skb, sizeof(tf->u.setup_cfm)); + tf->u.setup_cfm.status_code = cpu_to_le16(status_code); + tf->u.setup_cfm.dialog_token = dialog_token; + break; + case WLAN_TDLS_TEARDOWN: + tf->category = WLAN_CATEGORY_TDLS; + tf->action_code = WLAN_TDLS_TEARDOWN; + + skb_put(skb, sizeof(tf->u.teardown)); + tf->u.teardown.reason_code = cpu_to_le16(status_code); + break; + case WLAN_TDLS_DISCOVERY_REQUEST: + tf->category = WLAN_CATEGORY_TDLS; + tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST; + + skb_put(skb, sizeof(tf->u.discover_req)); + tf->u.discover_req.dialog_token = dialog_token; + break; + case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: + tf->category = WLAN_CATEGORY_TDLS; + tf->action_code = WLAN_TDLS_CHANNEL_SWITCH_REQUEST; + + skb_put(skb, sizeof(tf->u.chan_switch_req)); + break; + case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: + tf->category = WLAN_CATEGORY_TDLS; + tf->action_code = WLAN_TDLS_CHANNEL_SWITCH_RESPONSE; + + skb_put(skb, sizeof(tf->u.chan_switch_resp)); + tf->u.chan_switch_resp.status_code = cpu_to_le16(status_code); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, struct ieee80211_link_data *link, + u8 action_code, u8 dialog_token, + u16 status_code, struct sk_buff *skb) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_mgmt *mgmt; + + mgmt = skb_put_zero(skb, 24); + memcpy(mgmt->da, peer, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, link->u.mgd.bssid, ETH_ALEN); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + + switch (action_code) { + case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: + skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp)); + mgmt->u.action.category = WLAN_CATEGORY_PUBLIC; + mgmt->u.action.u.tdls_discover_resp.action_code = + WLAN_PUB_ACTION_TDLS_DISCOVER_RES; + mgmt->u.action.u.tdls_discover_resp.dialog_token = + dialog_token; + mgmt->u.action.u.tdls_discover_resp.capability = + cpu_to_le16(ieee80211_get_tdls_sta_capab(link, + status_code)); + break; + default: + return -EINVAL; + } + + return 0; +} + +static struct sk_buff * +ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata, + const u8 *peer, int link_id, + u8 action_code, u8 dialog_token, + u16 status_code, bool initiator, + const u8 *extra_ies, size_t extra_ies_len, + u8 oper_class, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + int ret; + struct ieee80211_link_data *link; + + link_id = link_id >= 0 ? link_id : 0; + rcu_read_lock(); + link = rcu_dereference(sdata->link[link_id]); + if (WARN_ON(!link)) + goto unlock; + + skb = netdev_alloc_skb(sdata->dev, + local->hw.extra_tx_headroom + + max(sizeof(struct ieee80211_mgmt), + sizeof(struct ieee80211_tdls_data)) + + 50 + /* supported rates */ + 10 + /* ext capab */ + 26 + /* max(WMM-info, WMM-param) */ + 2 + max(sizeof(struct ieee80211_ht_cap), + sizeof(struct ieee80211_ht_operation)) + + 2 + max(sizeof(struct ieee80211_vht_cap), + sizeof(struct ieee80211_vht_operation)) + + 2 + 1 + sizeof(struct ieee80211_he_cap_elem) + + sizeof(struct ieee80211_he_mcs_nss_supp) + + IEEE80211_HE_PPE_THRES_MAX_LEN + + 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa) + + 2 + 1 + sizeof(struct ieee80211_eht_cap_elem) + + sizeof(struct ieee80211_eht_mcs_nss_supp) + + IEEE80211_EHT_PPE_THRES_MAX_LEN + + 50 + /* supported channels */ + 3 + /* 40/20 BSS coex */ + 4 + /* AID */ + 4 + /* oper classes */ + extra_ies_len + + sizeof(struct ieee80211_tdls_lnkie)); + if (!skb) + goto unlock; + + skb_reserve(skb, local->hw.extra_tx_headroom); + + switch (action_code) { + case WLAN_TDLS_SETUP_REQUEST: + case WLAN_TDLS_SETUP_RESPONSE: + case WLAN_TDLS_SETUP_CONFIRM: + case WLAN_TDLS_TEARDOWN: + case WLAN_TDLS_DISCOVERY_REQUEST: + case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: + case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: + ret = ieee80211_prep_tdls_encap_data(local->hw.wiphy, + sdata->dev, link, peer, + action_code, dialog_token, + status_code, skb); + break; + case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: + ret = ieee80211_prep_tdls_direct(local->hw.wiphy, sdata->dev, + peer, link, action_code, + dialog_token, status_code, + skb); + break; + default: + ret = -ENOTSUPP; + break; + } + + if (ret < 0) + goto fail; + + ieee80211_tdls_add_ies(link, skb, peer, action_code, status_code, + initiator, extra_ies, extra_ies_len, oper_class, + chandef); + rcu_read_unlock(); + return skb; + +fail: + dev_kfree_skb(skb); +unlock: + rcu_read_unlock(); + return NULL; +} + +static int +ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, int link_id, + u8 action_code, u8 dialog_token, + u16 status_code, u32 peer_capability, + bool initiator, const u8 *extra_ies, + size_t extra_ies_len, u8 oper_class, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct sk_buff *skb = NULL; + struct sta_info *sta; + u32 flags = 0; + int ret = 0; + + rcu_read_lock(); + sta = sta_info_get(sdata, peer); + + /* infer the initiator if we can, to support old userspace */ + switch (action_code) { + case WLAN_TDLS_SETUP_REQUEST: + if (sta) { + set_sta_flag(sta, WLAN_STA_TDLS_INITIATOR); + sta->sta.tdls_initiator = false; + } + fallthrough; + case WLAN_TDLS_SETUP_CONFIRM: + case WLAN_TDLS_DISCOVERY_REQUEST: + initiator = true; + break; + case WLAN_TDLS_SETUP_RESPONSE: + /* + * In some testing scenarios, we send a request and response. + * Make the last packet sent take effect for the initiator + * value. + */ + if (sta) { + clear_sta_flag(sta, WLAN_STA_TDLS_INITIATOR); + sta->sta.tdls_initiator = true; + } + fallthrough; + case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: + initiator = false; + break; + case WLAN_TDLS_TEARDOWN: + case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: + case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: + /* any value is ok */ + break; + default: + ret = -ENOTSUPP; + break; + } + + if (sta && test_sta_flag(sta, WLAN_STA_TDLS_INITIATOR)) + initiator = true; + + rcu_read_unlock(); + if (ret < 0) + goto fail; + + skb = ieee80211_tdls_build_mgmt_packet_data(sdata, peer, + link_id, action_code, + dialog_token, status_code, + initiator, extra_ies, + extra_ies_len, oper_class, + chandef); + if (!skb) { + ret = -EINVAL; + goto fail; + } + + if (action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) { + ieee80211_tx_skb_tid(sdata, skb, 7, link_id); + return 0; + } + + /* + * According to 802.11z: Setup req/resp are sent in AC_BK, otherwise + * we should default to AC_VI. + */ + switch (action_code) { + case WLAN_TDLS_SETUP_REQUEST: + case WLAN_TDLS_SETUP_RESPONSE: + skb->priority = 256 + 2; + break; + default: + skb->priority = 256 + 5; + break; + } + + /* + * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress. + * Later, if no ACK is returned from peer, we will re-send the teardown + * packet through the AP. + */ + if ((action_code == WLAN_TDLS_TEARDOWN) && + ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) { + bool try_resend; /* Should we keep skb for possible resend */ + + /* If not sending directly to peer - no point in keeping skb */ + rcu_read_lock(); + sta = sta_info_get(sdata, peer); + try_resend = sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH); + rcu_read_unlock(); + + spin_lock_bh(&sdata->u.mgd.teardown_lock); + if (try_resend && !sdata->u.mgd.teardown_skb) { + /* Mark it as requiring TX status callback */ + flags |= IEEE80211_TX_CTL_REQ_TX_STATUS | + IEEE80211_TX_INTFL_MLME_CONN_TX; + + /* + * skb is copied since mac80211 will later set + * properties that might not be the same as the AP, + * such as encryption, QoS, addresses, etc. + * + * No problem if skb_copy() fails, so no need to check. + */ + sdata->u.mgd.teardown_skb = skb_copy(skb, GFP_ATOMIC); + sdata->u.mgd.orig_teardown_skb = skb; + } + spin_unlock_bh(&sdata->u.mgd.teardown_lock); + } + + /* disable bottom halves when entering the Tx path */ + local_bh_disable(); + __ieee80211_subif_start_xmit(skb, dev, flags, + IEEE80211_TX_CTRL_MLO_LINK_UNSPEC, NULL); + local_bh_enable(); + + return ret; + +fail: + dev_kfree_skb(skb); + return ret; +} + +static int +ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, int link_id, + u8 action_code, u8 dialog_token, + u16 status_code, u32 peer_capability, bool initiator, + const u8 *extra_ies, size_t extra_ies_len) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + enum ieee80211_smps_mode smps_mode = + sdata->deflink.u.mgd.driver_smps_mode; + int ret; + + /* don't support setup with forced SMPS mode that's not off */ + if (smps_mode != IEEE80211_SMPS_AUTOMATIC && + smps_mode != IEEE80211_SMPS_OFF) { + tdls_dbg(sdata, "Aborting TDLS setup due to SMPS mode %d\n", + smps_mode); + return -ENOTSUPP; + } + + mutex_lock(&local->mtx); + + /* we don't support concurrent TDLS peer setups */ + if (!is_zero_ether_addr(sdata->u.mgd.tdls_peer) && + !ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) { + ret = -EBUSY; + goto out_unlock; + } + + /* + * make sure we have a STA representing the peer so we drop or buffer + * non-TDLS-setup frames to the peer. We can't send other packets + * during setup through the AP path. + * Allow error packets to be sent - sometimes we don't even add a STA + * before failing the setup. + */ + if (status_code == 0) { + rcu_read_lock(); + if (!sta_info_get(sdata, peer)) { + rcu_read_unlock(); + ret = -ENOLINK; + goto out_unlock; + } + rcu_read_unlock(); + } + + ieee80211_flush_queues(local, sdata, false); + memcpy(sdata->u.mgd.tdls_peer, peer, ETH_ALEN); + mutex_unlock(&local->mtx); + + /* we cannot take the mutex while preparing the setup packet */ + ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, + link_id, action_code, + dialog_token, status_code, + peer_capability, initiator, + extra_ies, extra_ies_len, 0, + NULL); + if (ret < 0) { + mutex_lock(&local->mtx); + eth_zero_addr(sdata->u.mgd.tdls_peer); + mutex_unlock(&local->mtx); + return ret; + } + + ieee80211_queue_delayed_work(&sdata->local->hw, + &sdata->u.mgd.tdls_peer_del_work, + TDLS_PEER_SETUP_TIMEOUT); + return 0; + +out_unlock: + mutex_unlock(&local->mtx); + return ret; +} + +static int +ieee80211_tdls_mgmt_teardown(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, int link_id, + u8 action_code, u8 dialog_token, + u16 status_code, u32 peer_capability, + bool initiator, const u8 *extra_ies, + size_t extra_ies_len) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + int ret; + + /* + * No packets can be transmitted to the peer via the AP during setup - + * the STA is set as a TDLS peer, but is not authorized. + * During teardown, we prevent direct transmissions by stopping the + * queues and flushing all direct packets. + */ + ieee80211_stop_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN); + ieee80211_flush_queues(local, sdata, false); + + ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, + link_id, action_code, + dialog_token, status_code, + peer_capability, initiator, + extra_ies, extra_ies_len, 0, + NULL); + if (ret < 0) + sdata_err(sdata, "Failed sending TDLS teardown packet %d\n", + ret); + + /* + * Remove the STA AUTH flag to force further traffic through the AP. If + * the STA was unreachable, it was already removed. + */ + rcu_read_lock(); + sta = sta_info_get(sdata, peer); + if (sta) + clear_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH); + rcu_read_unlock(); + + ieee80211_wake_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN); + + return 0; +} + +int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, int link_id, + u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, bool initiator, + const u8 *extra_ies, size_t extra_ies_len) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + int ret; + + if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)) + return -ENOTSUPP; + + /* make sure we are in managed mode, and associated */ + if (sdata->vif.type != NL80211_IFTYPE_STATION || + !sdata->u.mgd.associated) + return -EINVAL; + + switch (action_code) { + case WLAN_TDLS_SETUP_REQUEST: + case WLAN_TDLS_SETUP_RESPONSE: + ret = ieee80211_tdls_mgmt_setup(wiphy, dev, peer, + link_id, action_code, + dialog_token, status_code, + peer_capability, initiator, + extra_ies, extra_ies_len); + break; + case WLAN_TDLS_TEARDOWN: + ret = ieee80211_tdls_mgmt_teardown(wiphy, dev, peer, link_id, + action_code, dialog_token, + status_code, + peer_capability, initiator, + extra_ies, extra_ies_len); + break; + case WLAN_TDLS_DISCOVERY_REQUEST: + /* + * Protect the discovery so we can hear the TDLS discovery + * response frame. It is transmitted directly and not buffered + * by the AP. + */ + drv_mgd_protect_tdls_discover(sdata->local, sdata); + fallthrough; + case WLAN_TDLS_SETUP_CONFIRM: + case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: + /* no special handling */ + ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, + link_id, action_code, + dialog_token, + status_code, + peer_capability, + initiator, extra_ies, + extra_ies_len, 0, NULL); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + tdls_dbg(sdata, "TDLS mgmt action %d peer %pM link_id %d status %d\n", + action_code, peer, link_id, ret); + return ret; +} + +static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *conf; + struct ieee80211_chanctx *ctx; + enum nl80211_chan_width width; + struct ieee80211_supported_band *sband; + + mutex_lock(&local->chanctx_mtx); + conf = rcu_dereference_protected(sdata->vif.bss_conf.chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + if (conf) { + width = conf->def.width; + sband = local->hw.wiphy->bands[conf->def.chan->band]; + ctx = container_of(conf, struct ieee80211_chanctx, conf); + ieee80211_recalc_chanctx_chantype(local, ctx); + + /* if width changed and a peer is given, update its BW */ + if (width != conf->def.width && sta && + test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) { + enum ieee80211_sta_rx_bandwidth bw; + + bw = ieee80211_chan_width_to_rx_bw(conf->def.width); + bw = min(bw, ieee80211_sta_cap_rx_bw(&sta->deflink)); + if (bw != sta->sta.deflink.bandwidth) { + sta->sta.deflink.bandwidth = bw; + rate_control_rate_update(local, sband, sta, 0, + IEEE80211_RC_BW_CHANGED); + /* + * if a TDLS peer BW was updated, we need to + * recalc the chandef width again, to get the + * correct chanctx min_def + */ + ieee80211_recalc_chanctx_chantype(local, ctx); + } + } + + } + mutex_unlock(&local->chanctx_mtx); +} + +static int iee80211_tdls_have_ht_peers(struct ieee80211_sub_if_data *sdata) +{ + struct sta_info *sta; + bool result = false; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { + if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded || + !test_sta_flag(sta, WLAN_STA_AUTHORIZED) || + !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH) || + !sta->sta.deflink.ht_cap.ht_supported) + continue; + result = true; + break; + } + rcu_read_unlock(); + + return result; +} + +static void +iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta) +{ + bool tdls_ht; + u16 protection = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED | + IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT | + IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT; + u16 opmode; + + /* Nothing to do if the BSS connection uses HT */ + if (!(sdata->deflink.u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)) + return; + + tdls_ht = (sta && sta->sta.deflink.ht_cap.ht_supported) || + iee80211_tdls_have_ht_peers(sdata); + + opmode = sdata->vif.bss_conf.ht_operation_mode; + + if (tdls_ht) + opmode |= protection; + else + opmode &= ~protection; + + if (opmode == sdata->vif.bss_conf.ht_operation_mode) + return; + + sdata->vif.bss_conf.ht_operation_mode = opmode; + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_HT); +} + +int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, enum nl80211_tdls_operation oper) +{ + struct sta_info *sta; + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + int ret; + + if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)) + return -ENOTSUPP; + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return -EINVAL; + + switch (oper) { + case NL80211_TDLS_ENABLE_LINK: + case NL80211_TDLS_DISABLE_LINK: + break; + case NL80211_TDLS_TEARDOWN: + case NL80211_TDLS_SETUP: + case NL80211_TDLS_DISCOVERY_REQ: + /* We don't support in-driver setup/teardown/discovery */ + return -ENOTSUPP; + } + + /* protect possible bss_conf changes and avoid concurrency in + * ieee80211_bss_info_change_notify() + */ + sdata_lock(sdata); + mutex_lock(&local->mtx); + tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer); + + switch (oper) { + case NL80211_TDLS_ENABLE_LINK: + if (sdata->vif.bss_conf.csa_active) { + tdls_dbg(sdata, "TDLS: disallow link during CSA\n"); + ret = -EBUSY; + break; + } + + mutex_lock(&local->sta_mtx); + sta = sta_info_get(sdata, peer); + if (!sta) { + mutex_unlock(&local->sta_mtx); + ret = -ENOLINK; + break; + } + + iee80211_tdls_recalc_chanctx(sdata, sta); + iee80211_tdls_recalc_ht_protection(sdata, sta); + + set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH); + mutex_unlock(&local->sta_mtx); + + WARN_ON_ONCE(is_zero_ether_addr(sdata->u.mgd.tdls_peer) || + !ether_addr_equal(sdata->u.mgd.tdls_peer, peer)); + ret = 0; + break; + case NL80211_TDLS_DISABLE_LINK: + /* + * The teardown message in ieee80211_tdls_mgmt_teardown() was + * created while the queues were stopped, so it might still be + * pending. Before flushing the queues we need to be sure the + * message is handled by the tasklet handling pending messages, + * otherwise we might start destroying the station before + * sending the teardown packet. + * Note that this only forces the tasklet to flush pendings - + * not to stop the tasklet from rescheduling itself. + */ + tasklet_kill(&local->tx_pending_tasklet); + /* flush a potentially queued teardown packet */ + ieee80211_flush_queues(local, sdata, false); + + ret = sta_info_destroy_addr(sdata, peer); + + mutex_lock(&local->sta_mtx); + iee80211_tdls_recalc_ht_protection(sdata, NULL); + mutex_unlock(&local->sta_mtx); + + iee80211_tdls_recalc_chanctx(sdata, NULL); + break; + default: + ret = -ENOTSUPP; + break; + } + + if (ret == 0 && ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) { + cancel_delayed_work(&sdata->u.mgd.tdls_peer_del_work); + eth_zero_addr(sdata->u.mgd.tdls_peer); + } + + if (ret == 0) + wiphy_work_queue(sdata->local->hw.wiphy, + &sdata->deflink.u.mgd.request_smps_work); + + mutex_unlock(&local->mtx); + sdata_unlock(sdata); + return ret; +} + +void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer, + enum nl80211_tdls_operation oper, + u16 reason_code, gfp_t gfp) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc) { + sdata_err(sdata, "Discarding TDLS oper %d - not STA or disconnected\n", + oper); + return; + } + + cfg80211_tdls_oper_request(sdata->dev, peer, oper, reason_code, gfp); +} +EXPORT_SYMBOL(ieee80211_tdls_oper_request); + +static void +iee80211_tdls_add_ch_switch_timing(u8 *buf, u16 switch_time, u16 switch_timeout) +{ + struct ieee80211_ch_switch_timing *ch_sw; + + *buf++ = WLAN_EID_CHAN_SWITCH_TIMING; + *buf++ = sizeof(struct ieee80211_ch_switch_timing); + + ch_sw = (void *)buf; + ch_sw->switch_time = cpu_to_le16(switch_time); + ch_sw->switch_timeout = cpu_to_le16(switch_timeout); +} + +/* find switch timing IE in SKB ready for Tx */ +static const u8 *ieee80211_tdls_find_sw_timing_ie(struct sk_buff *skb) +{ + struct ieee80211_tdls_data *tf; + const u8 *ie_start; + + /* + * Get the offset for the new location of the switch timing IE. + * The SKB network header will now point to the "payload_type" + * element of the TDLS data frame struct. + */ + tf = container_of(skb->data + skb_network_offset(skb), + struct ieee80211_tdls_data, payload_type); + ie_start = tf->u.chan_switch_req.variable; + return cfg80211_find_ie(WLAN_EID_CHAN_SWITCH_TIMING, ie_start, + skb->len - (ie_start - skb->data)); +} + +static struct sk_buff * +ieee80211_tdls_ch_sw_tmpl_get(struct sta_info *sta, u8 oper_class, + struct cfg80211_chan_def *chandef, + u32 *ch_sw_tm_ie_offset) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + u8 extra_ies[2 + sizeof(struct ieee80211_sec_chan_offs_ie) + + 2 + sizeof(struct ieee80211_ch_switch_timing)]; + int extra_ies_len = 2 + sizeof(struct ieee80211_ch_switch_timing); + u8 *pos = extra_ies; + struct sk_buff *skb; + int link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0; + + /* + * if chandef points to a wide channel add a Secondary-Channel + * Offset information element + */ + if (chandef->width == NL80211_CHAN_WIDTH_40) { + struct ieee80211_sec_chan_offs_ie *sec_chan_ie; + bool ht40plus; + + *pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET; + *pos++ = sizeof(*sec_chan_ie); + sec_chan_ie = (void *)pos; + + ht40plus = cfg80211_get_chandef_type(chandef) == + NL80211_CHAN_HT40PLUS; + sec_chan_ie->sec_chan_offs = ht40plus ? + IEEE80211_HT_PARAM_CHA_SEC_ABOVE : + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + pos += sizeof(*sec_chan_ie); + + extra_ies_len += 2 + sizeof(struct ieee80211_sec_chan_offs_ie); + } + + /* just set the values to 0, this is a template */ + iee80211_tdls_add_ch_switch_timing(pos, 0, 0); + + skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr, + link_id, + WLAN_TDLS_CHANNEL_SWITCH_REQUEST, + 0, 0, !sta->sta.tdls_initiator, + extra_ies, extra_ies_len, + oper_class, chandef); + if (!skb) + return NULL; + + skb = ieee80211_build_data_template(sdata, skb, 0); + if (IS_ERR(skb)) { + tdls_dbg(sdata, "Failed building TDLS channel switch frame\n"); + return NULL; + } + + if (ch_sw_tm_ie_offset) { + const u8 *tm_ie = ieee80211_tdls_find_sw_timing_ie(skb); + + if (!tm_ie) { + tdls_dbg(sdata, "No switch timing IE in TDLS switch\n"); + dev_kfree_skb_any(skb); + return NULL; + } + + *ch_sw_tm_ie_offset = tm_ie - skb->data; + } + + tdls_dbg(sdata, + "TDLS channel switch request template for %pM ch %d width %d\n", + sta->sta.addr, chandef->chan->center_freq, chandef->width); + return skb; +} + +int +ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev, + const u8 *addr, u8 oper_class, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + struct sk_buff *skb = NULL; + u32 ch_sw_tm_ie; + int ret; + + if (chandef->chan->freq_offset) + /* this may work, but is untested */ + return -EOPNOTSUPP; + + mutex_lock(&local->sta_mtx); + sta = sta_info_get(sdata, addr); + if (!sta) { + tdls_dbg(sdata, + "Invalid TDLS peer %pM for channel switch request\n", + addr); + ret = -ENOENT; + goto out; + } + + if (!test_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH)) { + tdls_dbg(sdata, "TDLS channel switch unsupported by %pM\n", + addr); + ret = -ENOTSUPP; + goto out; + } + + skb = ieee80211_tdls_ch_sw_tmpl_get(sta, oper_class, chandef, + &ch_sw_tm_ie); + if (!skb) { + ret = -ENOENT; + goto out; + } + + ret = drv_tdls_channel_switch(local, sdata, &sta->sta, oper_class, + chandef, skb, ch_sw_tm_ie); + if (!ret) + set_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); + +out: + mutex_unlock(&local->sta_mtx); + dev_kfree_skb_any(skb); + return ret; +} + +void +ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy, + struct net_device *dev, + const u8 *addr) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + mutex_lock(&local->sta_mtx); + sta = sta_info_get(sdata, addr); + if (!sta) { + tdls_dbg(sdata, + "Invalid TDLS peer %pM for channel switch cancel\n", + addr); + goto out; + } + + if (!test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) { + tdls_dbg(sdata, "TDLS channel switch not initiated by %pM\n", + addr); + goto out; + } + + drv_tdls_cancel_channel_switch(local, sdata, &sta->sta); + clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); + +out: + mutex_unlock(&local->sta_mtx); +} + +static struct sk_buff * +ieee80211_tdls_ch_sw_resp_tmpl_get(struct sta_info *sta, + u32 *ch_sw_tm_ie_offset) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct sk_buff *skb; + u8 extra_ies[2 + sizeof(struct ieee80211_ch_switch_timing)]; + int link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0; + + /* initial timing are always zero in the template */ + iee80211_tdls_add_ch_switch_timing(extra_ies, 0, 0); + + skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr, + link_id, + WLAN_TDLS_CHANNEL_SWITCH_RESPONSE, + 0, 0, !sta->sta.tdls_initiator, + extra_ies, sizeof(extra_ies), 0, NULL); + if (!skb) + return NULL; + + skb = ieee80211_build_data_template(sdata, skb, 0); + if (IS_ERR(skb)) { + tdls_dbg(sdata, + "Failed building TDLS channel switch resp frame\n"); + return NULL; + } + + if (ch_sw_tm_ie_offset) { + const u8 *tm_ie = ieee80211_tdls_find_sw_timing_ie(skb); + + if (!tm_ie) { + tdls_dbg(sdata, + "No switch timing IE in TDLS switch resp\n"); + dev_kfree_skb_any(skb); + return NULL; + } + + *ch_sw_tm_ie_offset = tm_ie - skb->data; + } + + tdls_dbg(sdata, "TDLS get channel switch response template for %pM\n", + sta->sta.addr); + return skb; +} + +static int +ieee80211_process_tdls_channel_switch_resp(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee802_11_elems *elems = NULL; + struct sta_info *sta; + struct ieee80211_tdls_data *tf = (void *)skb->data; + bool local_initiator; + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + int baselen = offsetof(typeof(*tf), u.chan_switch_resp.variable); + struct ieee80211_tdls_ch_sw_params params = {}; + int ret; + + params.action_code = WLAN_TDLS_CHANNEL_SWITCH_RESPONSE; + params.timestamp = rx_status->device_timestamp; + + if (skb->len < baselen) { + tdls_dbg(sdata, "TDLS channel switch resp too short: %d\n", + skb->len); + return -EINVAL; + } + + mutex_lock(&local->sta_mtx); + sta = sta_info_get(sdata, tf->sa); + if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) { + tdls_dbg(sdata, "TDLS chan switch from non-peer sta %pM\n", + tf->sa); + ret = -EINVAL; + goto out; + } + + params.sta = &sta->sta; + params.status = le16_to_cpu(tf->u.chan_switch_resp.status_code); + if (params.status != 0) { + ret = 0; + goto call_drv; + } + + elems = ieee802_11_parse_elems(tf->u.chan_switch_resp.variable, + skb->len - baselen, false, NULL); + if (!elems) { + ret = -ENOMEM; + goto out; + } + + if (elems->parse_error) { + tdls_dbg(sdata, "Invalid IEs in TDLS channel switch resp\n"); + ret = -EINVAL; + goto out; + } + + if (!elems->ch_sw_timing || !elems->lnk_id) { + tdls_dbg(sdata, "TDLS channel switch resp - missing IEs\n"); + ret = -EINVAL; + goto out; + } + + /* validate the initiator is set correctly */ + local_initiator = + !memcmp(elems->lnk_id->init_sta, sdata->vif.addr, ETH_ALEN); + if (local_initiator == sta->sta.tdls_initiator) { + tdls_dbg(sdata, "TDLS chan switch invalid lnk-id initiator\n"); + ret = -EINVAL; + goto out; + } + + params.switch_time = le16_to_cpu(elems->ch_sw_timing->switch_time); + params.switch_timeout = le16_to_cpu(elems->ch_sw_timing->switch_timeout); + + params.tmpl_skb = + ieee80211_tdls_ch_sw_resp_tmpl_get(sta, ¶ms.ch_sw_tm_ie); + if (!params.tmpl_skb) { + ret = -ENOENT; + goto out; + } + + ret = 0; +call_drv: + drv_tdls_recv_channel_switch(sdata->local, sdata, ¶ms); + + tdls_dbg(sdata, + "TDLS channel switch response received from %pM status %d\n", + tf->sa, params.status); + +out: + mutex_unlock(&local->sta_mtx); + dev_kfree_skb_any(params.tmpl_skb); + kfree(elems); + return ret; +} + +static int +ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee802_11_elems *elems; + struct cfg80211_chan_def chandef; + struct ieee80211_channel *chan; + enum nl80211_channel_type chan_type; + int freq; + u8 target_channel, oper_class; + bool local_initiator; + struct sta_info *sta; + enum nl80211_band band; + struct ieee80211_tdls_data *tf = (void *)skb->data; + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + int baselen = offsetof(typeof(*tf), u.chan_switch_req.variable); + struct ieee80211_tdls_ch_sw_params params = {}; + int ret = 0; + + params.action_code = WLAN_TDLS_CHANNEL_SWITCH_REQUEST; + params.timestamp = rx_status->device_timestamp; + + if (skb->len < baselen) { + tdls_dbg(sdata, "TDLS channel switch req too short: %d\n", + skb->len); + return -EINVAL; + } + + target_channel = tf->u.chan_switch_req.target_channel; + oper_class = tf->u.chan_switch_req.oper_class; + + /* + * We can't easily infer the channel band. The operating class is + * ambiguous - there are multiple tables (US/Europe/JP/Global). The + * solution here is to treat channels with number >14 as 5GHz ones, + * and specifically check for the (oper_class, channel) combinations + * where this doesn't hold. These are thankfully unique according to + * IEEE802.11-2012. + * We consider only the 2GHz and 5GHz bands and 20MHz+ channels as + * valid here. + */ + if ((oper_class == 112 || oper_class == 2 || oper_class == 3 || + oper_class == 4 || oper_class == 5 || oper_class == 6) && + target_channel < 14) + band = NL80211_BAND_5GHZ; + else + band = target_channel < 14 ? NL80211_BAND_2GHZ : + NL80211_BAND_5GHZ; + + freq = ieee80211_channel_to_frequency(target_channel, band); + if (freq == 0) { + tdls_dbg(sdata, "Invalid channel in TDLS chan switch: %d\n", + target_channel); + return -EINVAL; + } + + chan = ieee80211_get_channel(sdata->local->hw.wiphy, freq); + if (!chan) { + tdls_dbg(sdata, + "Unsupported channel for TDLS chan switch: %d\n", + target_channel); + return -EINVAL; + } + + elems = ieee802_11_parse_elems(tf->u.chan_switch_req.variable, + skb->len - baselen, false, NULL); + if (!elems) + return -ENOMEM; + + if (elems->parse_error) { + tdls_dbg(sdata, "Invalid IEs in TDLS channel switch req\n"); + ret = -EINVAL; + goto free; + } + + if (!elems->ch_sw_timing || !elems->lnk_id) { + tdls_dbg(sdata, "TDLS channel switch req - missing IEs\n"); + ret = -EINVAL; + goto free; + } + + if (!elems->sec_chan_offs) { + chan_type = NL80211_CHAN_HT20; + } else { + switch (elems->sec_chan_offs->sec_chan_offs) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + chan_type = NL80211_CHAN_HT40PLUS; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + chan_type = NL80211_CHAN_HT40MINUS; + break; + default: + chan_type = NL80211_CHAN_HT20; + break; + } + } + + cfg80211_chandef_create(&chandef, chan, chan_type); + + /* we will be active on the TDLS link */ + if (!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &chandef, + sdata->wdev.iftype)) { + tdls_dbg(sdata, "TDLS chan switch to forbidden channel\n"); + ret = -EINVAL; + goto free; + } + + mutex_lock(&local->sta_mtx); + sta = sta_info_get(sdata, tf->sa); + if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) { + tdls_dbg(sdata, "TDLS chan switch from non-peer sta %pM\n", + tf->sa); + ret = -EINVAL; + goto out; + } + + params.sta = &sta->sta; + + /* validate the initiator is set correctly */ + local_initiator = + !memcmp(elems->lnk_id->init_sta, sdata->vif.addr, ETH_ALEN); + if (local_initiator == sta->sta.tdls_initiator) { + tdls_dbg(sdata, "TDLS chan switch invalid lnk-id initiator\n"); + ret = -EINVAL; + goto out; + } + + /* peer should have known better */ + if (!sta->sta.deflink.ht_cap.ht_supported && elems->sec_chan_offs && + elems->sec_chan_offs->sec_chan_offs) { + tdls_dbg(sdata, "TDLS chan switch - wide chan unsupported\n"); + ret = -ENOTSUPP; + goto out; + } + + params.chandef = &chandef; + params.switch_time = le16_to_cpu(elems->ch_sw_timing->switch_time); + params.switch_timeout = le16_to_cpu(elems->ch_sw_timing->switch_timeout); + + params.tmpl_skb = + ieee80211_tdls_ch_sw_resp_tmpl_get(sta, + ¶ms.ch_sw_tm_ie); + if (!params.tmpl_skb) { + ret = -ENOENT; + goto out; + } + + drv_tdls_recv_channel_switch(sdata->local, sdata, ¶ms); + + tdls_dbg(sdata, + "TDLS ch switch request received from %pM ch %d width %d\n", + tf->sa, params.chandef->chan->center_freq, + params.chandef->width); +out: + mutex_unlock(&local->sta_mtx); + dev_kfree_skb_any(params.tmpl_skb); +free: + kfree(elems); + return ret; +} + +void +ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_tdls_data *tf = (void *)skb->data; + struct wiphy *wiphy = sdata->local->hw.wiphy; + + lockdep_assert_wiphy(wiphy); + + /* make sure the driver supports it */ + if (!(wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH)) + return; + + /* we want to access the entire packet */ + if (skb_linearize(skb)) + return; + /* + * The packet/size was already validated by mac80211 Rx path, only look + * at the action type. + */ + switch (tf->action_code) { + case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: + ieee80211_process_tdls_channel_switch_req(sdata, skb); + break; + case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: + ieee80211_process_tdls_channel_switch_resp(sdata, skb); + break; + default: + WARN_ON_ONCE(1); + return; + } +} + +void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata) +{ + struct sta_info *sta; + u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { + if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded || + !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + continue; + + ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr, + NL80211_TDLS_TEARDOWN, reason, + GFP_ATOMIC); + } + rcu_read_unlock(); +} + +void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata, + const u8 *peer, u16 reason) +{ + struct ieee80211_sta *sta; + + rcu_read_lock(); + sta = ieee80211_find_sta(&sdata->vif, peer); + if (!sta || !sta->tdls) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + tdls_dbg(sdata, "disconnected from TDLS peer %pM (Reason: %u=%s)\n", + peer, reason, + ieee80211_get_reason_code_string(reason)); + + ieee80211_tdls_oper_request(&sdata->vif, peer, + NL80211_TDLS_TEARDOWN, + WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE, + GFP_ATOMIC); +} diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c new file mode 100644 index 0000000000..e7f57bb18f --- /dev/null +++ b/net/mac80211/tkip.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2002-2004, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright (C) 2016 Intel Deutschland GmbH + */ +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/types.h> +#include <linux/netdevice.h> +#include <linux/export.h> +#include <asm/unaligned.h> + +#include <net/mac80211.h> +#include "driver-ops.h" +#include "key.h" +#include "tkip.h" +#include "wep.h" + +#define PHASE1_LOOP_COUNT 8 + +/* + * 2-byte by 2-byte subset of the full AES S-box table; second part of this + * table is identical to first part but byte-swapped + */ +static const u16 tkip_sbox[256] = +{ + 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, + 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A, + 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B, + 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B, + 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F, + 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F, + 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5, + 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F, + 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB, + 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397, + 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED, + 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A, + 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194, + 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3, + 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104, + 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D, + 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39, + 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695, + 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83, + 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76, + 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4, + 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B, + 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0, + 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018, + 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751, + 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85, + 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12, + 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9, + 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7, + 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A, + 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8, + 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, +}; + +static u16 tkipS(u16 val) +{ + return tkip_sbox[val & 0xff] ^ swab16(tkip_sbox[val >> 8]); +} + +static u8 *write_tkip_iv(u8 *pos, u16 iv16) +{ + *pos++ = iv16 >> 8; + *pos++ = ((iv16 >> 8) | 0x20) & 0x7f; + *pos++ = iv16 & 0xFF; + return pos; +} + +/* + * P1K := Phase1(TA, TK, TSC) + * TA = transmitter address (48 bits) + * TK = dot11DefaultKeyValue or dot11KeyMappingValue (128 bits) + * TSC = TKIP sequence counter (48 bits, only 32 msb bits used) + * P1K: 80 bits + */ +static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx, + const u8 *ta, u32 tsc_IV32) +{ + int i, j; + u16 *p1k = ctx->p1k; + + p1k[0] = tsc_IV32 & 0xFFFF; + p1k[1] = tsc_IV32 >> 16; + p1k[2] = get_unaligned_le16(ta + 0); + p1k[3] = get_unaligned_le16(ta + 2); + p1k[4] = get_unaligned_le16(ta + 4); + + for (i = 0; i < PHASE1_LOOP_COUNT; i++) { + j = 2 * (i & 1); + p1k[0] += tkipS(p1k[4] ^ get_unaligned_le16(tk + 0 + j)); + p1k[1] += tkipS(p1k[0] ^ get_unaligned_le16(tk + 4 + j)); + p1k[2] += tkipS(p1k[1] ^ get_unaligned_le16(tk + 8 + j)); + p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j)); + p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i; + } + ctx->state = TKIP_STATE_PHASE1_DONE; + ctx->p1k_iv32 = tsc_IV32; +} + +static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx, + u16 tsc_IV16, u8 *rc4key) +{ + u16 ppk[6]; + const u16 *p1k = ctx->p1k; + int i; + + ppk[0] = p1k[0]; + ppk[1] = p1k[1]; + ppk[2] = p1k[2]; + ppk[3] = p1k[3]; + ppk[4] = p1k[4]; + ppk[5] = p1k[4] + tsc_IV16; + + ppk[0] += tkipS(ppk[5] ^ get_unaligned_le16(tk + 0)); + ppk[1] += tkipS(ppk[0] ^ get_unaligned_le16(tk + 2)); + ppk[2] += tkipS(ppk[1] ^ get_unaligned_le16(tk + 4)); + ppk[3] += tkipS(ppk[2] ^ get_unaligned_le16(tk + 6)); + ppk[4] += tkipS(ppk[3] ^ get_unaligned_le16(tk + 8)); + ppk[5] += tkipS(ppk[4] ^ get_unaligned_le16(tk + 10)); + ppk[0] += ror16(ppk[5] ^ get_unaligned_le16(tk + 12), 1); + ppk[1] += ror16(ppk[0] ^ get_unaligned_le16(tk + 14), 1); + ppk[2] += ror16(ppk[1], 1); + ppk[3] += ror16(ppk[2], 1); + ppk[4] += ror16(ppk[3], 1); + ppk[5] += ror16(ppk[4], 1); + + rc4key = write_tkip_iv(rc4key, tsc_IV16); + *rc4key++ = ((ppk[5] ^ get_unaligned_le16(tk)) >> 1) & 0xFF; + + for (i = 0; i < 6; i++) + put_unaligned_le16(ppk[i], rc4key + 2 * i); +} + +/* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets + * of the IV. Returns pointer to the octet following IVs (i.e., beginning of + * the packet payload). */ +u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key_conf *keyconf, u64 pn) +{ + pos = write_tkip_iv(pos, TKIP_PN_TO_IV16(pn)); + *pos++ = (keyconf->keyidx << 6) | (1 << 5) /* Ext IV */; + put_unaligned_le32(TKIP_PN_TO_IV32(pn), pos); + return pos + 4; +} +EXPORT_SYMBOL_GPL(ieee80211_tkip_add_iv); + +static void ieee80211_compute_tkip_p1k(struct ieee80211_key *key, u32 iv32) +{ + struct ieee80211_sub_if_data *sdata = key->sdata; + struct tkip_ctx *ctx = &key->u.tkip.tx; + const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; + + lockdep_assert_held(&key->u.tkip.txlock); + + /* + * Update the P1K when the IV32 is different from the value it + * had when we last computed it (or when not initialised yet). + * This might flip-flop back and forth if packets are processed + * out-of-order due to the different ACs, but then we have to + * just compute the P1K more often. + */ + if (ctx->p1k_iv32 != iv32 || ctx->state == TKIP_STATE_NOT_INIT) + tkip_mixing_phase1(tk, ctx, sdata->vif.addr, iv32); +} + +void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf, + u32 iv32, u16 *p1k) +{ + struct ieee80211_key *key = (struct ieee80211_key *) + container_of(keyconf, struct ieee80211_key, conf); + struct tkip_ctx *ctx = &key->u.tkip.tx; + + spin_lock_bh(&key->u.tkip.txlock); + ieee80211_compute_tkip_p1k(key, iv32); + memcpy(p1k, ctx->p1k, sizeof(ctx->p1k)); + spin_unlock_bh(&key->u.tkip.txlock); +} +EXPORT_SYMBOL(ieee80211_get_tkip_p1k_iv); + +void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf, + const u8 *ta, u32 iv32, u16 *p1k) +{ + const u8 *tk = &keyconf->key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; + struct tkip_ctx ctx; + + tkip_mixing_phase1(tk, &ctx, ta, iv32); + memcpy(p1k, ctx.p1k, sizeof(ctx.p1k)); +} +EXPORT_SYMBOL(ieee80211_get_tkip_rx_p1k); + +void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf, + struct sk_buff *skb, u8 *p2k) +{ + struct ieee80211_key *key = (struct ieee80211_key *) + container_of(keyconf, struct ieee80211_key, conf); + const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; + struct tkip_ctx *ctx = &key->u.tkip.tx; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control); + u32 iv32 = get_unaligned_le32(&data[4]); + u16 iv16 = data[2] | (data[0] << 8); + + spin_lock(&key->u.tkip.txlock); + ieee80211_compute_tkip_p1k(key, iv32); + tkip_mixing_phase2(tk, ctx, iv16, p2k); + spin_unlock(&key->u.tkip.txlock); +} +EXPORT_SYMBOL(ieee80211_get_tkip_p2k); + +/* + * Encrypt packet payload with TKIP using @key. @pos is a pointer to the + * beginning of the buffer containing payload. This payload must include + * the IV/Ext.IV and space for (taildroom) four octets for ICV. + * @payload_len is the length of payload (_not_ including IV/ICV length). + * @ta is the transmitter addresses. + */ +int ieee80211_tkip_encrypt_data(struct arc4_ctx *ctx, + struct ieee80211_key *key, + struct sk_buff *skb, + u8 *payload, size_t payload_len) +{ + u8 rc4key[16]; + + ieee80211_get_tkip_p2k(&key->conf, skb, rc4key); + + return ieee80211_wep_encrypt_data(ctx, rc4key, 16, + payload, payload_len); +} + +/* Decrypt packet payload with TKIP using @key. @pos is a pointer to the + * beginning of the buffer containing IEEE 802.11 header payload, i.e., + * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the + * length of payload, including IV, Ext. IV, MIC, ICV. */ +int ieee80211_tkip_decrypt_data(struct arc4_ctx *ctx, + struct ieee80211_key *key, + u8 *payload, size_t payload_len, u8 *ta, + u8 *ra, int only_iv, int queue, + u32 *out_iv32, u16 *out_iv16) +{ + u32 iv32; + u32 iv16; + u8 rc4key[16], keyid, *pos = payload; + int res; + const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; + struct tkip_ctx_rx *rx_ctx = &key->u.tkip.rx[queue]; + + if (payload_len < 12) + return -1; + + iv16 = (pos[0] << 8) | pos[2]; + keyid = pos[3]; + iv32 = get_unaligned_le32(pos + 4); + pos += 8; + + if (!(keyid & (1 << 5))) + return TKIP_DECRYPT_NO_EXT_IV; + + if ((keyid >> 6) != key->conf.keyidx) + return TKIP_DECRYPT_INVALID_KEYIDX; + + /* Reject replays if the received TSC is smaller than or equal to the + * last received value in a valid message, but with an exception for + * the case where a new key has been set and no valid frame using that + * key has yet received and the local RSC was initialized to 0. This + * exception allows the very first frame sent by the transmitter to be + * accepted even if that transmitter were to use TSC 0 (IEEE 802.11 + * described TSC to be initialized to 1 whenever a new key is taken into + * use). + */ + if (iv32 < rx_ctx->iv32 || + (iv32 == rx_ctx->iv32 && + (iv16 < rx_ctx->iv16 || + (iv16 == rx_ctx->iv16 && + (rx_ctx->iv32 || rx_ctx->iv16 || + rx_ctx->ctx.state != TKIP_STATE_NOT_INIT))))) + return TKIP_DECRYPT_REPLAY; + + if (only_iv) { + res = TKIP_DECRYPT_OK; + rx_ctx->ctx.state = TKIP_STATE_PHASE1_HW_UPLOADED; + goto done; + } + + if (rx_ctx->ctx.state == TKIP_STATE_NOT_INIT || + rx_ctx->iv32 != iv32) { + /* IV16 wrapped around - perform TKIP phase 1 */ + tkip_mixing_phase1(tk, &rx_ctx->ctx, ta, iv32); + } + if (key->local->ops->update_tkip_key && + key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && + rx_ctx->ctx.state != TKIP_STATE_PHASE1_HW_UPLOADED) { + struct ieee80211_sub_if_data *sdata = key->sdata; + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = container_of(key->sdata->bss, + struct ieee80211_sub_if_data, u.ap); + drv_update_tkip_key(key->local, sdata, &key->conf, key->sta, + iv32, rx_ctx->ctx.p1k); + rx_ctx->ctx.state = TKIP_STATE_PHASE1_HW_UPLOADED; + } + + tkip_mixing_phase2(tk, &rx_ctx->ctx, iv16, rc4key); + + res = ieee80211_wep_decrypt_data(ctx, rc4key, 16, pos, payload_len - 12); + done: + if (res == TKIP_DECRYPT_OK) { + /* + * Record previously received IV, will be copied into the + * key information after MIC verification. It is possible + * that we don't catch replays of fragments but that's ok + * because the Michael MIC verication will then fail. + */ + *out_iv32 = iv32; + *out_iv16 = iv16; + } + + return res; +} diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h new file mode 100644 index 0000000000..9d2f8bd36c --- /dev/null +++ b/net/mac80211/tkip.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2002-2004, Instant802 Networks, Inc. + */ + +#ifndef TKIP_H +#define TKIP_H + +#include <linux/types.h> +#include <linux/crypto.h> +#include "key.h" + +int ieee80211_tkip_encrypt_data(struct arc4_ctx *ctx, + struct ieee80211_key *key, + struct sk_buff *skb, + u8 *payload, size_t payload_len); + +enum { + TKIP_DECRYPT_OK = 0, + TKIP_DECRYPT_NO_EXT_IV = -1, + TKIP_DECRYPT_INVALID_KEYIDX = -2, + TKIP_DECRYPT_REPLAY = -3, +}; +int ieee80211_tkip_decrypt_data(struct arc4_ctx *ctx, + struct ieee80211_key *key, + u8 *payload, size_t payload_len, u8 *ta, + u8 *ra, int only_iv, int queue, + u32 *out_iv32, u16 *out_iv16); + +#endif /* TKIP_H */ diff --git a/net/mac80211/trace.c b/net/mac80211/trace.c new file mode 100644 index 0000000000..837857261b --- /dev/null +++ b/net/mac80211/trace.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* bug in tracepoint.h, it should include this */ +#include <linux/module.h> + +/* sparse isn't too happy with all macros... */ +#ifndef __CHECKER__ +#include <net/cfg80211.h> +#include "driver-ops.h" +#include "debug.h" +#define CREATE_TRACE_POINTS +#include "trace.h" +#include "trace_msg.h" + +#ifdef CONFIG_MAC80211_MESSAGE_TRACING +void __sdata_info(const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + + pr_info("%pV", &vaf); + trace_mac80211_info(&vaf); + va_end(args); +} + +void __sdata_dbg(bool print, const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + + if (print) + pr_debug("%pV", &vaf); + trace_mac80211_dbg(&vaf); + va_end(args); +} + +void __sdata_err(const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + + pr_err("%pV", &vaf); + trace_mac80211_err(&vaf); + va_end(args); +} + +void __wiphy_dbg(struct wiphy *wiphy, bool print, const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + + if (print) + wiphy_dbg(wiphy, "%pV", &vaf); + trace_mac80211_dbg(&vaf); + va_end(args); +} +#endif +#endif diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h new file mode 100644 index 0000000000..b8c53b4a71 --- /dev/null +++ b/net/mac80211/trace.h @@ -0,0 +1,3069 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Portions of this file + * Copyright(c) 2016-2017 Intel Deutschland GmbH + * Copyright (C) 2018 - 2023 Intel Corporation + */ + +#if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) +#define __MAC80211_DRIVER_TRACE + +#include <linux/tracepoint.h> +#include <net/mac80211.h> +#include "ieee80211_i.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mac80211 + +#define MAXNAME 32 +#define LOCAL_ENTRY __array(char, wiphy_name, 32) +#define LOCAL_ASSIGN strscpy(__entry->wiphy_name, wiphy_name(local->hw.wiphy), MAXNAME) +#define LOCAL_PR_FMT "%s" +#define LOCAL_PR_ARG __entry->wiphy_name + +#define STA_ENTRY __array(char, sta_addr, ETH_ALEN) +#define STA_ASSIGN (sta ? memcpy(__entry->sta_addr, sta->addr, ETH_ALEN) : \ + eth_zero_addr(__entry->sta_addr)) +#define STA_NAMED_ASSIGN(s) memcpy(__entry->sta_addr, (s)->addr, ETH_ALEN) +#define STA_PR_FMT " sta:%pM" +#define STA_PR_ARG __entry->sta_addr + +#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \ + __field(bool, p2p) \ + __string(vif_name, sdata->name) +#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \ + __entry->p2p = sdata->vif.p2p; \ + __assign_str(vif_name, sdata->name) +#define VIF_PR_FMT " vif:%s(%d%s)" +#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : "" + +#define CHANDEF_ENTRY __field(u32, control_freq) \ + __field(u32, freq_offset) \ + __field(u32, chan_width) \ + __field(u32, center_freq1) \ + __field(u32, freq1_offset) \ + __field(u32, center_freq2) +#define CHANDEF_ASSIGN(c) \ + __entry->control_freq = (c) ? ((c)->chan ? (c)->chan->center_freq : 0) : 0; \ + __entry->freq_offset = (c) ? ((c)->chan ? (c)->chan->freq_offset : 0) : 0; \ + __entry->chan_width = (c) ? (c)->width : 0; \ + __entry->center_freq1 = (c) ? (c)->center_freq1 : 0; \ + __entry->freq1_offset = (c) ? (c)->freq1_offset : 0; \ + __entry->center_freq2 = (c) ? (c)->center_freq2 : 0; +#define CHANDEF_PR_FMT " control:%d.%03d MHz width:%d center: %d.%03d/%d MHz" +#define CHANDEF_PR_ARG __entry->control_freq, __entry->freq_offset, __entry->chan_width, \ + __entry->center_freq1, __entry->freq1_offset, __entry->center_freq2 + +#define MIN_CHANDEF_ENTRY \ + __field(u32, min_control_freq) \ + __field(u32, min_freq_offset) \ + __field(u32, min_chan_width) \ + __field(u32, min_center_freq1) \ + __field(u32, min_freq1_offset) \ + __field(u32, min_center_freq2) + +#define MIN_CHANDEF_ASSIGN(c) \ + __entry->min_control_freq = (c)->chan ? (c)->chan->center_freq : 0; \ + __entry->min_freq_offset = (c)->chan ? (c)->chan->freq_offset : 0; \ + __entry->min_chan_width = (c)->width; \ + __entry->min_center_freq1 = (c)->center_freq1; \ + __entry->min_freq1_offset = (c)->freq1_offset; \ + __entry->min_center_freq2 = (c)->center_freq2; +#define MIN_CHANDEF_PR_FMT " min_control:%d.%03d MHz min_width:%d min_center: %d.%03d/%d MHz" +#define MIN_CHANDEF_PR_ARG __entry->min_control_freq, __entry->min_freq_offset, \ + __entry->min_chan_width, \ + __entry->min_center_freq1, __entry->min_freq1_offset, \ + __entry->min_center_freq2 + +#define CHANCTX_ENTRY CHANDEF_ENTRY \ + MIN_CHANDEF_ENTRY \ + __field(u8, rx_chains_static) \ + __field(u8, rx_chains_dynamic) +#define CHANCTX_ASSIGN CHANDEF_ASSIGN(&ctx->conf.def) \ + MIN_CHANDEF_ASSIGN(&ctx->conf.min_def) \ + __entry->rx_chains_static = ctx->conf.rx_chains_static; \ + __entry->rx_chains_dynamic = ctx->conf.rx_chains_dynamic +#define CHANCTX_PR_FMT CHANDEF_PR_FMT MIN_CHANDEF_PR_FMT " chains:%d/%d" +#define CHANCTX_PR_ARG CHANDEF_PR_ARG, MIN_CHANDEF_PR_ARG, \ + __entry->rx_chains_static, __entry->rx_chains_dynamic + +#define KEY_ENTRY __field(u32, cipher) \ + __field(u8, hw_key_idx) \ + __field(u8, flags) \ + __field(s8, keyidx) +#define KEY_ASSIGN(k) __entry->cipher = (k)->cipher; \ + __entry->flags = (k)->flags; \ + __entry->keyidx = (k)->keyidx; \ + __entry->hw_key_idx = (k)->hw_key_idx; +#define KEY_PR_FMT " cipher:0x%x, flags=%#x, keyidx=%d, hw_key_idx=%d" +#define KEY_PR_ARG __entry->cipher, __entry->flags, __entry->keyidx, __entry->hw_key_idx + +#define AMPDU_ACTION_ENTRY __field(enum ieee80211_ampdu_mlme_action, \ + ieee80211_ampdu_mlme_action) \ + STA_ENTRY \ + __field(u16, tid) \ + __field(u16, ssn) \ + __field(u16, buf_size) \ + __field(bool, amsdu) \ + __field(u16, timeout) \ + __field(u16, action) +#define AMPDU_ACTION_ASSIGN STA_NAMED_ASSIGN(params->sta); \ + __entry->tid = params->tid; \ + __entry->ssn = params->ssn; \ + __entry->buf_size = params->buf_size; \ + __entry->amsdu = params->amsdu; \ + __entry->timeout = params->timeout; \ + __entry->action = params->action; +#define AMPDU_ACTION_PR_FMT STA_PR_FMT " tid %d, ssn %d, buf_size %u, amsdu %d, timeout %d action %d" +#define AMPDU_ACTION_PR_ARG STA_PR_ARG, __entry->tid, __entry->ssn, \ + __entry->buf_size, __entry->amsdu, __entry->timeout, \ + __entry->action + +/* + * Tracing for driver callbacks. + */ + +DECLARE_EVENT_CLASS(local_only_evt, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local), + TP_STRUCT__entry( + LOCAL_ENTRY + ), + TP_fast_assign( + LOCAL_ASSIGN; + ), + TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG) +); + +DECLARE_EVENT_CLASS(local_sdata_addr_evt, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __array(char, addr, ETH_ALEN) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + memcpy(__entry->addr, sdata->vif.addr, ETH_ALEN); + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " addr:%pM", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr + ) +); + +DECLARE_EVENT_CLASS(local_u32_evt, + TP_PROTO(struct ieee80211_local *local, u32 value), + TP_ARGS(local, value), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u32, value) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->value = value; + ), + + TP_printk( + LOCAL_PR_FMT " value:%d", + LOCAL_PR_ARG, __entry->value + ) +); + +DECLARE_EVENT_CLASS(local_sdata_evt, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG + ) +); + +DEFINE_EVENT(local_only_evt, drv_return_void, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local) +); + +TRACE_EVENT(drv_return_int, + TP_PROTO(struct ieee80211_local *local, int ret), + TP_ARGS(local, ret), + TP_STRUCT__entry( + LOCAL_ENTRY + __field(int, ret) + ), + TP_fast_assign( + LOCAL_ASSIGN; + __entry->ret = ret; + ), + TP_printk(LOCAL_PR_FMT " - %d", LOCAL_PR_ARG, __entry->ret) +); + +TRACE_EVENT(drv_return_bool, + TP_PROTO(struct ieee80211_local *local, bool ret), + TP_ARGS(local, ret), + TP_STRUCT__entry( + LOCAL_ENTRY + __field(bool, ret) + ), + TP_fast_assign( + LOCAL_ASSIGN; + __entry->ret = ret; + ), + TP_printk(LOCAL_PR_FMT " - %s", LOCAL_PR_ARG, (__entry->ret) ? + "true" : "false") +); + +TRACE_EVENT(drv_return_u32, + TP_PROTO(struct ieee80211_local *local, u32 ret), + TP_ARGS(local, ret), + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u32, ret) + ), + TP_fast_assign( + LOCAL_ASSIGN; + __entry->ret = ret; + ), + TP_printk(LOCAL_PR_FMT " - %u", LOCAL_PR_ARG, __entry->ret) +); + +TRACE_EVENT(drv_return_u64, + TP_PROTO(struct ieee80211_local *local, u64 ret), + TP_ARGS(local, ret), + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u64, ret) + ), + TP_fast_assign( + LOCAL_ASSIGN; + __entry->ret = ret; + ), + TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret) +); + +DEFINE_EVENT(local_only_evt, drv_start, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local) +); + +DEFINE_EVENT(local_u32_evt, drv_get_et_strings, + TP_PROTO(struct ieee80211_local *local, u32 sset), + TP_ARGS(local, sset) +); + +DEFINE_EVENT(local_u32_evt, drv_get_et_sset_count, + TP_PROTO(struct ieee80211_local *local, u32 sset), + TP_ARGS(local, sset) +); + +DEFINE_EVENT(local_only_evt, drv_get_et_stats, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local) +); + +DEFINE_EVENT(local_only_evt, drv_suspend, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local) +); + +DEFINE_EVENT(local_only_evt, drv_resume, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local) +); + +TRACE_EVENT(drv_set_wakeup, + TP_PROTO(struct ieee80211_local *local, bool enabled), + TP_ARGS(local, enabled), + TP_STRUCT__entry( + LOCAL_ENTRY + __field(bool, enabled) + ), + TP_fast_assign( + LOCAL_ASSIGN; + __entry->enabled = enabled; + ), + TP_printk(LOCAL_PR_FMT " enabled:%d", LOCAL_PR_ARG, __entry->enabled) +); + +DEFINE_EVENT(local_only_evt, drv_stop, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local) +); + +DEFINE_EVENT(local_sdata_addr_evt, drv_add_interface, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +TRACE_EVENT(drv_change_interface, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum nl80211_iftype type, bool p2p), + + TP_ARGS(local, sdata, type, p2p), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u32, new_type) + __field(bool, new_p2p) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->new_type = type; + __entry->new_p2p = p2p; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " new type:%d%s", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->new_type, + __entry->new_p2p ? "/p2p" : "" + ) +); + +DEFINE_EVENT(local_sdata_addr_evt, drv_remove_interface, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +TRACE_EVENT(drv_config, + TP_PROTO(struct ieee80211_local *local, + u32 changed), + + TP_ARGS(local, changed), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u32, changed) + __field(u32, flags) + __field(int, power_level) + __field(int, dynamic_ps_timeout) + __field(u16, listen_interval) + __field(u8, long_frame_max_tx_count) + __field(u8, short_frame_max_tx_count) + CHANDEF_ENTRY + __field(int, smps) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->changed = changed; + __entry->flags = local->hw.conf.flags; + __entry->power_level = local->hw.conf.power_level; + __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout; + __entry->listen_interval = local->hw.conf.listen_interval; + __entry->long_frame_max_tx_count = + local->hw.conf.long_frame_max_tx_count; + __entry->short_frame_max_tx_count = + local->hw.conf.short_frame_max_tx_count; + CHANDEF_ASSIGN(&local->hw.conf.chandef) + __entry->smps = local->hw.conf.smps_mode; + ), + + TP_printk( + LOCAL_PR_FMT " ch:%#x" CHANDEF_PR_FMT, + LOCAL_PR_ARG, __entry->changed, CHANDEF_PR_ARG + ) +); + +TRACE_EVENT(drv_vif_cfg_changed, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u64 changed), + + TP_ARGS(local, sdata, changed), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u64, changed) + __field(bool, assoc) + __field(bool, ibss_joined) + __field(bool, ibss_creator) + __field(u16, aid) + __dynamic_array(u32, arp_addr_list, + sdata->vif.cfg.arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ? + IEEE80211_BSS_ARP_ADDR_LIST_LEN : + sdata->vif.cfg.arp_addr_cnt) + __field(int, arp_addr_cnt) + __dynamic_array(u8, ssid, sdata->vif.cfg.ssid_len) + __field(int, s1g) + __field(bool, idle) + __field(bool, ps) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->changed = changed; + __entry->aid = sdata->vif.cfg.aid; + __entry->assoc = sdata->vif.cfg.assoc; + __entry->ibss_joined = sdata->vif.cfg.ibss_joined; + __entry->ibss_creator = sdata->vif.cfg.ibss_creator; + __entry->ps = sdata->vif.cfg.ps; + + __entry->arp_addr_cnt = sdata->vif.cfg.arp_addr_cnt; + memcpy(__get_dynamic_array(arp_addr_list), + sdata->vif.cfg.arp_addr_list, + sizeof(u32) * (sdata->vif.cfg.arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ? + IEEE80211_BSS_ARP_ADDR_LIST_LEN : + sdata->vif.cfg.arp_addr_cnt)); + memcpy(__get_dynamic_array(ssid), + sdata->vif.cfg.ssid, + sdata->vif.cfg.ssid_len); + __entry->s1g = sdata->vif.cfg.s1g; + __entry->idle = sdata->vif.cfg.idle; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " changed:%#llx", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->changed + ) +); + +TRACE_EVENT(drv_link_info_changed, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *link_conf, + u64 changed), + + TP_ARGS(local, sdata, link_conf, changed), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u64, changed) + __field(int, link_id) + __field(bool, cts) + __field(bool, shortpre) + __field(bool, shortslot) + __field(bool, enable_beacon) + __field(u8, dtimper) + __field(u16, bcnint) + __field(u16, assoc_cap) + __field(u64, sync_tsf) + __field(u32, sync_device_ts) + __field(u8, sync_dtim_count) + __field(u32, basic_rates) + __array(int, mcast_rate, NUM_NL80211_BANDS) + __field(u16, ht_operation_mode) + __field(s32, cqm_rssi_thold) + __field(s32, cqm_rssi_hyst) + __field(u32, channel_width) + __field(u32, channel_cfreq1) + __field(u32, channel_cfreq1_offset) + __field(bool, qos) + __field(bool, hidden_ssid) + __field(int, txpower) + __field(u8, p2p_oppps_ctwindow) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->changed = changed; + __entry->link_id = link_conf->link_id; + __entry->shortpre = link_conf->use_short_preamble; + __entry->cts = link_conf->use_cts_prot; + __entry->shortslot = link_conf->use_short_slot; + __entry->enable_beacon = link_conf->enable_beacon; + __entry->dtimper = link_conf->dtim_period; + __entry->bcnint = link_conf->beacon_int; + __entry->assoc_cap = link_conf->assoc_capability; + __entry->sync_tsf = link_conf->sync_tsf; + __entry->sync_device_ts = link_conf->sync_device_ts; + __entry->sync_dtim_count = link_conf->sync_dtim_count; + __entry->basic_rates = link_conf->basic_rates; + memcpy(__entry->mcast_rate, link_conf->mcast_rate, + sizeof(__entry->mcast_rate)); + __entry->ht_operation_mode = link_conf->ht_operation_mode; + __entry->cqm_rssi_thold = link_conf->cqm_rssi_thold; + __entry->cqm_rssi_hyst = link_conf->cqm_rssi_hyst; + __entry->channel_width = link_conf->chandef.width; + __entry->channel_cfreq1 = link_conf->chandef.center_freq1; + __entry->channel_cfreq1_offset = link_conf->chandef.freq1_offset; + __entry->qos = link_conf->qos; + __entry->hidden_ssid = link_conf->hidden_ssid; + __entry->txpower = link_conf->txpower; + __entry->p2p_oppps_ctwindow = link_conf->p2p_noa_attr.oppps_ctwindow; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " link_id:%d, changed:%#llx", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id, + __entry->changed + ) +); + +TRACE_EVENT(drv_prepare_multicast, + TP_PROTO(struct ieee80211_local *local, int mc_count), + + TP_ARGS(local, mc_count), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(int, mc_count) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->mc_count = mc_count; + ), + + TP_printk( + LOCAL_PR_FMT " prepare mc (%d)", + LOCAL_PR_ARG, __entry->mc_count + ) +); + +TRACE_EVENT(drv_configure_filter, + TP_PROTO(struct ieee80211_local *local, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast), + + TP_ARGS(local, changed_flags, total_flags, multicast), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(unsigned int, changed) + __field(unsigned int, total) + __field(u64, multicast) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->changed = changed_flags; + __entry->total = *total_flags; + __entry->multicast = multicast; + ), + + TP_printk( + LOCAL_PR_FMT " changed:%#x total:%#x", + LOCAL_PR_ARG, __entry->changed, __entry->total + ) +); + +TRACE_EVENT(drv_config_iface_filter, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + unsigned int filter_flags, + unsigned int changed_flags), + + TP_ARGS(local, sdata, filter_flags, changed_flags), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(unsigned int, filter_flags) + __field(unsigned int, changed_flags) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->filter_flags = filter_flags; + __entry->changed_flags = changed_flags; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT + " filter_flags: %#x changed_flags: %#x", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->filter_flags, + __entry->changed_flags + ) +); + +TRACE_EVENT(drv_set_tim, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sta *sta, bool set), + + TP_ARGS(local, sta, set), + + TP_STRUCT__entry( + LOCAL_ENTRY + STA_ENTRY + __field(bool, set) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + STA_ASSIGN; + __entry->set = set; + ), + + TP_printk( + LOCAL_PR_FMT STA_PR_FMT " set:%d", + LOCAL_PR_ARG, STA_PR_ARG, __entry->set + ) +); + +TRACE_EVENT(drv_set_key, + TP_PROTO(struct ieee80211_local *local, + enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key), + + TP_ARGS(local, cmd, sdata, sta, key), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(u32, cmd) + KEY_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->cmd = cmd; + KEY_ASSIGN(key); + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " cmd: %d" KEY_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->cmd, KEY_PR_ARG + ) +); + +TRACE_EVENT(drv_update_tkip_key, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_key_conf *conf, + struct ieee80211_sta *sta, u32 iv32), + + TP_ARGS(local, sdata, conf, sta, iv32), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(u32, iv32) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->iv32 = iv32; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " iv32:%#x", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->iv32 + ) +); + +DEFINE_EVENT(local_sdata_evt, drv_hw_scan, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +DEFINE_EVENT(local_sdata_evt, drv_cancel_hw_scan, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +DEFINE_EVENT(local_sdata_evt, drv_sched_scan_start, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +DEFINE_EVENT(local_sdata_evt, drv_sched_scan_stop, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +TRACE_EVENT(drv_sw_scan_start, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + const u8 *mac_addr), + + TP_ARGS(local, sdata, mac_addr), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __array(char, mac_addr, ETH_ALEN) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + memcpy(__entry->mac_addr, mac_addr, ETH_ALEN); + ), + + TP_printk(LOCAL_PR_FMT ", " VIF_PR_FMT ", addr:%pM", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->mac_addr) +); + +DEFINE_EVENT(local_sdata_evt, drv_sw_scan_complete, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +TRACE_EVENT(drv_get_stats, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_low_level_stats *stats, + int ret), + + TP_ARGS(local, stats, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(int, ret) + __field(unsigned int, ackfail) + __field(unsigned int, rtsfail) + __field(unsigned int, fcserr) + __field(unsigned int, rtssucc) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->ret = ret; + __entry->ackfail = stats->dot11ACKFailureCount; + __entry->rtsfail = stats->dot11RTSFailureCount; + __entry->fcserr = stats->dot11FCSErrorCount; + __entry->rtssucc = stats->dot11RTSSuccessCount; + ), + + TP_printk( + LOCAL_PR_FMT " ret:%d", + LOCAL_PR_ARG, __entry->ret + ) +); + +TRACE_EVENT(drv_get_key_seq, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_key_conf *key), + + TP_ARGS(local, key), + + TP_STRUCT__entry( + LOCAL_ENTRY + KEY_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + KEY_ASSIGN(key); + ), + + TP_printk( + LOCAL_PR_FMT KEY_PR_FMT, + LOCAL_PR_ARG, KEY_PR_ARG + ) +); + +DEFINE_EVENT(local_u32_evt, drv_set_frag_threshold, + TP_PROTO(struct ieee80211_local *local, u32 value), + TP_ARGS(local, value) +); + +DEFINE_EVENT(local_u32_evt, drv_set_rts_threshold, + TP_PROTO(struct ieee80211_local *local, u32 value), + TP_ARGS(local, value) +); + +TRACE_EVENT(drv_set_coverage_class, + TP_PROTO(struct ieee80211_local *local, s16 value), + + TP_ARGS(local, value), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(s16, value) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->value = value; + ), + + TP_printk( + LOCAL_PR_FMT " value:%d", + LOCAL_PR_ARG, __entry->value + ) +); + +TRACE_EVENT(drv_sta_notify, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta), + + TP_ARGS(local, sdata, cmd, sta), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(u32, cmd) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->cmd = cmd; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " cmd:%d", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->cmd + ) +); + +TRACE_EVENT(drv_sta_state, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state), + + TP_ARGS(local, sdata, sta, old_state, new_state), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(u32, old_state) + __field(u32, new_state) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->old_state = old_state; + __entry->new_state = new_state; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " state: %d->%d", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, + __entry->old_state, __entry->new_state + ) +); + +TRACE_EVENT(drv_sta_set_txpwr, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + + TP_ARGS(local, sdata, sta), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(s16, txpwr) + __field(u8, type) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->txpwr = sta->deflink.txpwr.power; + __entry->type = sta->deflink.txpwr.type; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " txpwr: %d type %d", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, + __entry->txpwr, __entry->type + ) +); + +TRACE_EVENT(drv_sta_rc_update, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + u32 changed), + + TP_ARGS(local, sdata, sta, changed), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(u32, changed) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->changed = changed; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " changed: 0x%x", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->changed + ) +); + +DECLARE_EVENT_CLASS(sta_event, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + + TP_ARGS(local, sdata, sta), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG + ) +); + +DEFINE_EVENT(sta_event, drv_sta_statistics, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + TP_ARGS(local, sdata, sta) +); + +DEFINE_EVENT(sta_event, drv_sta_add, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + TP_ARGS(local, sdata, sta) +); + +DEFINE_EVENT(sta_event, drv_sta_remove, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + TP_ARGS(local, sdata, sta) +); + +DEFINE_EVENT(sta_event, drv_sta_pre_rcu_remove, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + TP_ARGS(local, sdata, sta) +); + +DEFINE_EVENT(sta_event, drv_sync_rx_queues, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + TP_ARGS(local, sdata, sta) +); + +DEFINE_EVENT(sta_event, drv_sta_rate_tbl_update, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + TP_ARGS(local, sdata, sta) +); + +TRACE_EVENT(drv_conf_tx, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + unsigned int link_id, + u16 ac, const struct ieee80211_tx_queue_params *params), + + TP_ARGS(local, sdata, link_id, ac, params), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(unsigned int, link_id) + __field(u16, ac) + __field(u16, txop) + __field(u16, cw_min) + __field(u16, cw_max) + __field(u8, aifs) + __field(bool, uapsd) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->link_id = link_id; + __entry->ac = ac; + __entry->txop = params->txop; + __entry->cw_max = params->cw_max; + __entry->cw_min = params->cw_min; + __entry->aifs = params->aifs; + __entry->uapsd = params->uapsd; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " link_id: %d, AC:%d", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id, __entry->ac + ) +); + +DEFINE_EVENT(local_sdata_evt, drv_get_tsf, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +TRACE_EVENT(drv_set_tsf, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u64 tsf), + + TP_ARGS(local, sdata, tsf), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u64, tsf) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->tsf = tsf; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " tsf:%llu", + LOCAL_PR_ARG, VIF_PR_ARG, (unsigned long long)__entry->tsf + ) +); + +TRACE_EVENT(drv_offset_tsf, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + s64 offset), + + TP_ARGS(local, sdata, offset), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(s64, tsf_offset) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->tsf_offset = offset; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " tsf offset:%lld", + LOCAL_PR_ARG, VIF_PR_ARG, + (unsigned long long)__entry->tsf_offset + ) +); + +DEFINE_EVENT(local_sdata_evt, drv_reset_tsf, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +DEFINE_EVENT(local_only_evt, drv_tx_last_beacon, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local) +); + +TRACE_EVENT(drv_ampdu_action, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_ampdu_params *params), + + TP_ARGS(local, sdata, params), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + AMPDU_ACTION_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + AMPDU_ACTION_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT AMPDU_ACTION_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, AMPDU_ACTION_PR_ARG + ) +); + +TRACE_EVENT(drv_get_survey, + TP_PROTO(struct ieee80211_local *local, int _idx, + struct survey_info *survey), + + TP_ARGS(local, _idx, survey), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(int, idx) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->idx = _idx; + ), + + TP_printk( + LOCAL_PR_FMT " idx:%d", + LOCAL_PR_ARG, __entry->idx + ) +); + +TRACE_EVENT(drv_flush, + TP_PROTO(struct ieee80211_local *local, + u32 queues, bool drop), + + TP_ARGS(local, queues, drop), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(bool, drop) + __field(u32, queues) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->drop = drop; + __entry->queues = queues; + ), + + TP_printk( + LOCAL_PR_FMT " queues:0x%x drop:%d", + LOCAL_PR_ARG, __entry->queues, __entry->drop + ) +); + +DEFINE_EVENT(sta_event, drv_flush_sta, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + TP_ARGS(local, sdata, sta) +); + +TRACE_EVENT(drv_channel_switch, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel_switch *ch_switch), + + TP_ARGS(local, sdata, ch_switch), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + CHANDEF_ENTRY + __field(u64, timestamp) + __field(u32, device_timestamp) + __field(bool, block_tx) + __field(u8, count) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + CHANDEF_ASSIGN(&ch_switch->chandef) + __entry->timestamp = ch_switch->timestamp; + __entry->device_timestamp = ch_switch->device_timestamp; + __entry->block_tx = ch_switch->block_tx; + __entry->count = ch_switch->count; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " new " CHANDEF_PR_FMT " count:%d", + LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count + ) +); + +TRACE_EVENT(drv_set_antenna, + TP_PROTO(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant, int ret), + + TP_ARGS(local, tx_ant, rx_ant, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u32, tx_ant) + __field(u32, rx_ant) + __field(int, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->tx_ant = tx_ant; + __entry->rx_ant = rx_ant; + __entry->ret = ret; + ), + + TP_printk( + LOCAL_PR_FMT " tx_ant:%d rx_ant:%d ret:%d", + LOCAL_PR_ARG, __entry->tx_ant, __entry->rx_ant, __entry->ret + ) +); + +TRACE_EVENT(drv_get_antenna, + TP_PROTO(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant, int ret), + + TP_ARGS(local, tx_ant, rx_ant, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u32, tx_ant) + __field(u32, rx_ant) + __field(int, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->tx_ant = tx_ant; + __entry->rx_ant = rx_ant; + __entry->ret = ret; + ), + + TP_printk( + LOCAL_PR_FMT " tx_ant:%d rx_ant:%d ret:%d", + LOCAL_PR_ARG, __entry->tx_ant, __entry->rx_ant, __entry->ret + ) +); + +TRACE_EVENT(drv_remain_on_channel, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel *chan, + unsigned int duration, + enum ieee80211_roc_type type), + + TP_ARGS(local, sdata, chan, duration, type), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(int, center_freq) + __field(int, freq_offset) + __field(unsigned int, duration) + __field(u32, type) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->center_freq = chan->center_freq; + __entry->freq_offset = chan->freq_offset; + __entry->duration = duration; + __entry->type = type; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " freq:%d.%03dMHz duration:%dms type=%d", + LOCAL_PR_ARG, VIF_PR_ARG, + __entry->center_freq, __entry->freq_offset, + __entry->duration, __entry->type + ) +); + +DEFINE_EVENT(local_sdata_evt, drv_cancel_remain_on_channel, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +TRACE_EVENT(drv_set_ringparam, + TP_PROTO(struct ieee80211_local *local, u32 tx, u32 rx), + + TP_ARGS(local, tx, rx), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u32, tx) + __field(u32, rx) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->tx = tx; + __entry->rx = rx; + ), + + TP_printk( + LOCAL_PR_FMT " tx:%d rx %d", + LOCAL_PR_ARG, __entry->tx, __entry->rx + ) +); + +TRACE_EVENT(drv_get_ringparam, + TP_PROTO(struct ieee80211_local *local, u32 *tx, u32 *tx_max, + u32 *rx, u32 *rx_max), + + TP_ARGS(local, tx, tx_max, rx, rx_max), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u32, tx) + __field(u32, tx_max) + __field(u32, rx) + __field(u32, rx_max) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->tx = *tx; + __entry->tx_max = *tx_max; + __entry->rx = *rx; + __entry->rx_max = *rx_max; + ), + + TP_printk( + LOCAL_PR_FMT " tx:%d tx_max %d rx %d rx_max %d", + LOCAL_PR_ARG, + __entry->tx, __entry->tx_max, __entry->rx, __entry->rx_max + ) +); + +DEFINE_EVENT(local_only_evt, drv_tx_frames_pending, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local) +); + +DEFINE_EVENT(local_only_evt, drv_offchannel_tx_cancel_wait, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local) +); + +TRACE_EVENT(drv_set_bitrate_mask, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + const struct cfg80211_bitrate_mask *mask), + + TP_ARGS(local, sdata, mask), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u32, legacy_2g) + __field(u32, legacy_5g) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->legacy_2g = mask->control[NL80211_BAND_2GHZ].legacy; + __entry->legacy_5g = mask->control[NL80211_BAND_5GHZ].legacy; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " 2G Mask:0x%x 5G Mask:0x%x", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->legacy_2g, __entry->legacy_5g + ) +); + +TRACE_EVENT(drv_set_rekey_data, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_gtk_rekey_data *data), + + TP_ARGS(local, sdata, data), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __array(u8, kek, NL80211_KEK_LEN) + __array(u8, kck, NL80211_KCK_LEN) + __array(u8, replay_ctr, NL80211_REPLAY_CTR_LEN) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + memcpy(__entry->kek, data->kek, NL80211_KEK_LEN); + memcpy(__entry->kck, data->kck, NL80211_KCK_LEN); + memcpy(__entry->replay_ctr, data->replay_ctr, + NL80211_REPLAY_CTR_LEN); + ), + + TP_printk(LOCAL_PR_FMT VIF_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG) +); + +TRACE_EVENT(drv_event_callback, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + const struct ieee80211_event *_event), + + TP_ARGS(local, sdata, _event), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u32, type) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->type = _event->type; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " event:%d", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->type + ) +); + +DECLARE_EVENT_CLASS(release_evt, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sta *sta, + u16 tids, int num_frames, + enum ieee80211_frame_release_type reason, + bool more_data), + + TP_ARGS(local, sta, tids, num_frames, reason, more_data), + + TP_STRUCT__entry( + LOCAL_ENTRY + STA_ENTRY + __field(u16, tids) + __field(int, num_frames) + __field(int, reason) + __field(bool, more_data) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + STA_ASSIGN; + __entry->tids = tids; + __entry->num_frames = num_frames; + __entry->reason = reason; + __entry->more_data = more_data; + ), + + TP_printk( + LOCAL_PR_FMT STA_PR_FMT + " TIDs:0x%.4x frames:%d reason:%d more:%d", + LOCAL_PR_ARG, STA_PR_ARG, __entry->tids, __entry->num_frames, + __entry->reason, __entry->more_data + ) +); + +DEFINE_EVENT(release_evt, drv_release_buffered_frames, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sta *sta, + u16 tids, int num_frames, + enum ieee80211_frame_release_type reason, + bool more_data), + + TP_ARGS(local, sta, tids, num_frames, reason, more_data) +); + +DEFINE_EVENT(release_evt, drv_allow_buffered_frames, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sta *sta, + u16 tids, int num_frames, + enum ieee80211_frame_release_type reason, + bool more_data), + + TP_ARGS(local, sta, tids, num_frames, reason, more_data) +); + +DECLARE_EVENT_CLASS(mgd_prepare_complete_tx_evt, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u16 duration, u16 subtype, bool success), + + TP_ARGS(local, sdata, duration, subtype, success), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u32, duration) + __field(u16, subtype) + __field(u8, success) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->duration = duration; + __entry->subtype = subtype; + __entry->success = success; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " duration: %u, subtype:0x%x, success:%d", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->duration, + __entry->subtype, __entry->success + ) +); + +DEFINE_EVENT(mgd_prepare_complete_tx_evt, drv_mgd_prepare_tx, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u16 duration, u16 subtype, bool success), + + TP_ARGS(local, sdata, duration, subtype, success) +); + +DEFINE_EVENT(mgd_prepare_complete_tx_evt, drv_mgd_complete_tx, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u16 duration, u16 subtype, bool success), + + TP_ARGS(local, sdata, duration, subtype, success) +); + +DEFINE_EVENT(local_sdata_evt, drv_mgd_protect_tdls_discover, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + + TP_ARGS(local, sdata) +); + +DECLARE_EVENT_CLASS(local_chanctx, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx), + + TP_ARGS(local, ctx), + + TP_STRUCT__entry( + LOCAL_ENTRY + CHANCTX_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + CHANCTX_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT CHANCTX_PR_FMT, + LOCAL_PR_ARG, CHANCTX_PR_ARG + ) +); + +DEFINE_EVENT(local_chanctx, drv_add_chanctx, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx), + TP_ARGS(local, ctx) +); + +DEFINE_EVENT(local_chanctx, drv_remove_chanctx, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx), + TP_ARGS(local, ctx) +); + +TRACE_EVENT(drv_change_chanctx, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + u32 changed), + + TP_ARGS(local, ctx, changed), + + TP_STRUCT__entry( + LOCAL_ENTRY + CHANCTX_ENTRY + __field(u32, changed) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + CHANCTX_ASSIGN; + __entry->changed = changed; + ), + + TP_printk( + LOCAL_PR_FMT CHANCTX_PR_FMT " changed:%#x", + LOCAL_PR_ARG, CHANCTX_PR_ARG, __entry->changed + ) +); + +#if !defined(__TRACE_VIF_ENTRY) +#define __TRACE_VIF_ENTRY +struct trace_vif_entry { + enum nl80211_iftype vif_type; + bool p2p; + char vif_name[IFNAMSIZ]; +} __packed; + +struct trace_chandef_entry { + u32 control_freq; + u32 freq_offset; + u32 chan_width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; +} __packed; + +struct trace_switch_entry { + struct trace_vif_entry vif; + unsigned int link_id; + struct trace_chandef_entry old_chandef; + struct trace_chandef_entry new_chandef; +} __packed; + +#define SWITCH_ENTRY_ASSIGN(to, from) local_vifs[i].to = vifs[i].from +#endif + +TRACE_EVENT(drv_switch_vif_chanctx, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, enum ieee80211_chanctx_switch_mode mode), + TP_ARGS(local, vifs, n_vifs, mode), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(int, n_vifs) + __field(u32, mode) + __dynamic_array(u8, vifs, + sizeof(struct trace_switch_entry) * n_vifs) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->n_vifs = n_vifs; + __entry->mode = mode; + { + struct trace_switch_entry *local_vifs = + __get_dynamic_array(vifs); + int i; + + for (i = 0; i < n_vifs; i++) { + struct ieee80211_sub_if_data *sdata; + + sdata = container_of(vifs[i].vif, + struct ieee80211_sub_if_data, + vif); + + SWITCH_ENTRY_ASSIGN(vif.vif_type, vif->type); + SWITCH_ENTRY_ASSIGN(vif.p2p, vif->p2p); + SWITCH_ENTRY_ASSIGN(link_id, link_conf->link_id); + strncpy(local_vifs[i].vif.vif_name, + sdata->name, + sizeof(local_vifs[i].vif.vif_name)); + SWITCH_ENTRY_ASSIGN(old_chandef.control_freq, + old_ctx->def.chan->center_freq); + SWITCH_ENTRY_ASSIGN(old_chandef.freq_offset, + old_ctx->def.chan->freq_offset); + SWITCH_ENTRY_ASSIGN(old_chandef.chan_width, + old_ctx->def.width); + SWITCH_ENTRY_ASSIGN(old_chandef.center_freq1, + old_ctx->def.center_freq1); + SWITCH_ENTRY_ASSIGN(old_chandef.freq1_offset, + old_ctx->def.freq1_offset); + SWITCH_ENTRY_ASSIGN(old_chandef.center_freq2, + old_ctx->def.center_freq2); + SWITCH_ENTRY_ASSIGN(new_chandef.control_freq, + new_ctx->def.chan->center_freq); + SWITCH_ENTRY_ASSIGN(new_chandef.freq_offset, + new_ctx->def.chan->freq_offset); + SWITCH_ENTRY_ASSIGN(new_chandef.chan_width, + new_ctx->def.width); + SWITCH_ENTRY_ASSIGN(new_chandef.center_freq1, + new_ctx->def.center_freq1); + SWITCH_ENTRY_ASSIGN(new_chandef.freq1_offset, + new_ctx->def.freq1_offset); + SWITCH_ENTRY_ASSIGN(new_chandef.center_freq2, + new_ctx->def.center_freq2); + } + } + ), + + TP_printk( + LOCAL_PR_FMT " n_vifs:%d mode:%d", + LOCAL_PR_ARG, __entry->n_vifs, __entry->mode + ) +); + +DECLARE_EVENT_CLASS(local_sdata_chanctx, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx *ctx), + + TP_ARGS(local, sdata, link_conf, ctx), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + CHANCTX_ENTRY + __field(unsigned int, link_id) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + CHANCTX_ASSIGN; + __entry->link_id = link_conf->link_id; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " link_id:%d" CHANCTX_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id, CHANCTX_PR_ARG + ) +); + +DEFINE_EVENT(local_sdata_chanctx, drv_assign_vif_chanctx, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx *ctx), + TP_ARGS(local, sdata, link_conf, ctx) +); + +DEFINE_EVENT(local_sdata_chanctx, drv_unassign_vif_chanctx, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx *ctx), + TP_ARGS(local, sdata, link_conf, ctx) +); + +TRACE_EVENT(drv_start_ap, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *link_conf), + + TP_ARGS(local, sdata, link_conf), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u32, link_id) + __field(u8, dtimper) + __field(u16, bcnint) + __dynamic_array(u8, ssid, sdata->vif.cfg.ssid_len) + __field(bool, hidden_ssid) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->link_id = link_conf->link_id; + __entry->dtimper = link_conf->dtim_period; + __entry->bcnint = link_conf->beacon_int; + __entry->hidden_ssid = link_conf->hidden_ssid; + memcpy(__get_dynamic_array(ssid), + sdata->vif.cfg.ssid, + sdata->vif.cfg.ssid_len); + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " link id %u", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id + ) +); + +TRACE_EVENT(drv_stop_ap, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *link_conf), + + TP_ARGS(local, sdata, link_conf), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u32, link_id) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->link_id = link_conf->link_id; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " link id %u", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id + ) +); + +TRACE_EVENT(drv_reconfig_complete, + TP_PROTO(struct ieee80211_local *local, + enum ieee80211_reconfig_type reconfig_type), + TP_ARGS(local, reconfig_type), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u8, reconfig_type) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->reconfig_type = reconfig_type; + ), + + TP_printk( + LOCAL_PR_FMT " reconfig_type:%d", + LOCAL_PR_ARG, __entry->reconfig_type + ) + +); + +#if IS_ENABLED(CONFIG_IPV6) +DEFINE_EVENT(local_sdata_evt, drv_ipv6_addr_change, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); +#endif + +TRACE_EVENT(drv_join_ibss, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *info), + + TP_ARGS(local, sdata, info), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u8, dtimper) + __field(u16, bcnint) + __dynamic_array(u8, ssid, sdata->vif.cfg.ssid_len) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->dtimper = info->dtim_period; + __entry->bcnint = info->beacon_int; + memcpy(__get_dynamic_array(ssid), + sdata->vif.cfg.ssid, + sdata->vif.cfg.ssid_len); + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG + ) +); + +DEFINE_EVENT(local_sdata_evt, drv_leave_ibss, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +TRACE_EVENT(drv_get_expected_throughput, + TP_PROTO(struct ieee80211_sta *sta), + + TP_ARGS(sta), + + TP_STRUCT__entry( + STA_ENTRY + ), + + TP_fast_assign( + STA_ASSIGN; + ), + + TP_printk( + STA_PR_FMT, STA_PR_ARG + ) +); + +TRACE_EVENT(drv_start_nan, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_nan_conf *conf), + + TP_ARGS(local, sdata, conf), + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u8, master_pref) + __field(u8, bands) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->master_pref = conf->master_pref; + __entry->bands = conf->bands; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT + ", master preference: %u, bands: 0x%0x", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->master_pref, + __entry->bands + ) +); + +TRACE_EVENT(drv_stop_nan, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + + TP_ARGS(local, sdata), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG + ) +); + +TRACE_EVENT(drv_nan_change_conf, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_nan_conf *conf, + u32 changes), + + TP_ARGS(local, sdata, conf, changes), + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u8, master_pref) + __field(u8, bands) + __field(u32, changes) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->master_pref = conf->master_pref; + __entry->bands = conf->bands; + __entry->changes = changes; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT + ", master preference: %u, bands: 0x%0x, changes: 0x%x", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->master_pref, + __entry->bands, __entry->changes + ) +); + +TRACE_EVENT(drv_add_nan_func, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + const struct cfg80211_nan_func *func), + + TP_ARGS(local, sdata, func), + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u8, type) + __field(u8, inst_id) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->type = func->type; + __entry->inst_id = func->instance_id; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT + ", type: %u, inst_id: %u", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->type, __entry->inst_id + ) +); + +TRACE_EVENT(drv_del_nan_func, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u8 instance_id), + + TP_ARGS(local, sdata, instance_id), + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u8, instance_id) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->instance_id = instance_id; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT + ", instance_id: %u", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->instance_id + ) +); + +DEFINE_EVENT(local_sdata_evt, drv_start_pmsr, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +DEFINE_EVENT(local_sdata_evt, drv_abort_pmsr, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +TRACE_EVENT(drv_set_default_unicast_key, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + int key_idx), + + TP_ARGS(local, sdata, key_idx), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(int, key_idx) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->key_idx = key_idx; + ), + + TP_printk(LOCAL_PR_FMT VIF_PR_FMT " key_idx:%d", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->key_idx) +); + +TRACE_EVENT(drv_channel_switch_beacon, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_chan_def *chandef), + + TP_ARGS(local, sdata, chandef), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + CHANDEF_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + CHANDEF_ASSIGN(chandef); + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " channel switch to " CHANDEF_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG + ) +); + +TRACE_EVENT(drv_pre_channel_switch, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel_switch *ch_switch), + + TP_ARGS(local, sdata, ch_switch), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + CHANDEF_ENTRY + __field(u64, timestamp) + __field(u32, device_timestamp) + __field(bool, block_tx) + __field(u8, count) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + CHANDEF_ASSIGN(&ch_switch->chandef) + __entry->timestamp = ch_switch->timestamp; + __entry->device_timestamp = ch_switch->device_timestamp; + __entry->block_tx = ch_switch->block_tx; + __entry->count = ch_switch->count; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " prepare channel switch to " + CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu", + LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count, + __entry->block_tx, __entry->timestamp + ) +); + +DEFINE_EVENT(local_sdata_evt, drv_post_channel_switch, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +DEFINE_EVENT(local_sdata_evt, drv_abort_channel_switch, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +TRACE_EVENT(drv_channel_switch_rx_beacon, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel_switch *ch_switch), + + TP_ARGS(local, sdata, ch_switch), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + CHANDEF_ENTRY + __field(u64, timestamp) + __field(u32, device_timestamp) + __field(bool, block_tx) + __field(u8, count) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + CHANDEF_ASSIGN(&ch_switch->chandef) + __entry->timestamp = ch_switch->timestamp; + __entry->device_timestamp = ch_switch->device_timestamp; + __entry->block_tx = ch_switch->block_tx; + __entry->count = ch_switch->count; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT + " received a channel switch beacon to " + CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu", + LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count, + __entry->block_tx, __entry->timestamp + ) +); + +TRACE_EVENT(drv_get_txpower, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + int dbm, int ret), + + TP_ARGS(local, sdata, dbm, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(int, dbm) + __field(int, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->dbm = dbm; + __entry->ret = ret; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " dbm:%d ret:%d", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->dbm, __entry->ret + ) +); + +TRACE_EVENT(drv_tdls_channel_switch, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, u8 oper_class, + struct cfg80211_chan_def *chandef), + + TP_ARGS(local, sdata, sta, oper_class, chandef), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(u8, oper_class) + CHANDEF_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->oper_class = oper_class; + CHANDEF_ASSIGN(chandef) + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " tdls channel switch to" + CHANDEF_PR_FMT " oper_class:%d " STA_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->oper_class, + STA_PR_ARG + ) +); + +TRACE_EVENT(drv_tdls_cancel_channel_switch, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + + TP_ARGS(local, sdata, sta), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT + " tdls cancel channel switch with " STA_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG + ) +); + +TRACE_EVENT(drv_tdls_recv_channel_switch, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_tdls_ch_sw_params *params), + + TP_ARGS(local, sdata, params), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u8, action_code) + STA_ENTRY + CHANDEF_ENTRY + __field(u32, status) + __field(bool, peer_initiator) + __field(u32, timestamp) + __field(u16, switch_time) + __field(u16, switch_timeout) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_NAMED_ASSIGN(params->sta); + CHANDEF_ASSIGN(params->chandef) + __entry->peer_initiator = params->sta->tdls_initiator; + __entry->action_code = params->action_code; + __entry->status = params->status; + __entry->timestamp = params->timestamp; + __entry->switch_time = params->switch_time; + __entry->switch_timeout = params->switch_timeout; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " received tdls channel switch packet" + " action:%d status:%d time:%d switch time:%d switch" + " timeout:%d initiator: %d chan:" CHANDEF_PR_FMT STA_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, __entry->action_code, __entry->status, + __entry->timestamp, __entry->switch_time, + __entry->switch_timeout, __entry->peer_initiator, + CHANDEF_PR_ARG, STA_PR_ARG + ) +); + +TRACE_EVENT(drv_wake_tx_queue, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct txq_info *txq), + + TP_ARGS(local, sdata, txq), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(u8, ac) + __field(u8, tid) + ), + + TP_fast_assign( + struct ieee80211_sta *sta = txq->txq.sta; + + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->ac = txq->txq.ac; + __entry->tid = txq->txq.tid; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ac:%d tid:%d", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ac, __entry->tid + ) +); + +TRACE_EVENT(drv_get_ftm_responder_stats, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_ftm_responder_stats *ftm_stats), + + TP_ARGS(local, sdata, ftm_stats), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG + ) +); + +DEFINE_EVENT(local_sdata_addr_evt, drv_update_vif_offload, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +DECLARE_EVENT_CLASS(sta_flag_evt, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, bool enabled), + + TP_ARGS(local, sdata, sta, enabled), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(bool, enabled) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->enabled = enabled; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " enabled:%d", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->enabled + ) +); + +DEFINE_EVENT(sta_flag_evt, drv_sta_set_4addr, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, bool enabled), + + TP_ARGS(local, sdata, sta, enabled) +); + +DEFINE_EVENT(sta_flag_evt, drv_sta_set_decap_offload, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, bool enabled), + + TP_ARGS(local, sdata, sta, enabled) +); + +TRACE_EVENT(drv_add_twt_setup, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sta *sta, + struct ieee80211_twt_setup *twt, + struct ieee80211_twt_params *twt_agrt), + + TP_ARGS(local, sta, twt, twt_agrt), + + TP_STRUCT__entry( + LOCAL_ENTRY + STA_ENTRY + __field(u8, dialog_token) + __field(u8, control) + __field(__le16, req_type) + __field(__le64, twt) + __field(u8, duration) + __field(__le16, mantissa) + __field(u8, channel) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + STA_ASSIGN; + __entry->dialog_token = twt->dialog_token; + __entry->control = twt->control; + __entry->req_type = twt_agrt->req_type; + __entry->twt = twt_agrt->twt; + __entry->duration = twt_agrt->min_twt_dur; + __entry->mantissa = twt_agrt->mantissa; + __entry->channel = twt_agrt->channel; + ), + + TP_printk( + LOCAL_PR_FMT STA_PR_FMT + " token:%d control:0x%02x req_type:0x%04x" + " twt:%llu duration:%d mantissa:%d channel:%d", + LOCAL_PR_ARG, STA_PR_ARG, __entry->dialog_token, + __entry->control, le16_to_cpu(__entry->req_type), + le64_to_cpu(__entry->twt), __entry->duration, + le16_to_cpu(__entry->mantissa), __entry->channel + ) +); + +TRACE_EVENT(drv_twt_teardown_request, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sta *sta, u8 flowid), + + TP_ARGS(local, sta, flowid), + + TP_STRUCT__entry( + LOCAL_ENTRY + STA_ENTRY + __field(u8, flowid) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + STA_ASSIGN; + __entry->flowid = flowid; + ), + + TP_printk( + LOCAL_PR_FMT STA_PR_FMT " flowid:%d", + LOCAL_PR_ARG, STA_PR_ARG, __entry->flowid + ) +); + +DEFINE_EVENT(sta_event, drv_net_fill_forward_path, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + TP_ARGS(local, sdata, sta) +); + +TRACE_EVENT(drv_net_setup_tc, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u8 type), + + TP_ARGS(local, sdata, type), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u8, type) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->type = type; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " type:%d\n", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->type + ) +); + +TRACE_EVENT(drv_change_vif_links, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u16 old_links, u16 new_links), + + TP_ARGS(local, sdata, old_links, new_links), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u16, old_links) + __field(u16, new_links) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->old_links = old_links; + __entry->new_links = new_links; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " old_links:0x%04x, new_links:0x%04x\n", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->old_links, __entry->new_links + ) +); + +TRACE_EVENT(drv_change_sta_links, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + u16 old_links, u16 new_links), + + TP_ARGS(local, sdata, sta, old_links, new_links), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(u16, old_links) + __field(u16, new_links) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->old_links = old_links; + __entry->new_links = new_links; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " old_links:0x%04x, new_links:0x%04x\n", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, + __entry->old_links, __entry->new_links + ) +); + +/* + * Tracing for API calls that drivers call. + */ + +TRACE_EVENT(api_start_tx_ba_session, + TP_PROTO(struct ieee80211_sta *sta, u16 tid), + + TP_ARGS(sta, tid), + + TP_STRUCT__entry( + STA_ENTRY + __field(u16, tid) + ), + + TP_fast_assign( + STA_ASSIGN; + __entry->tid = tid; + ), + + TP_printk( + STA_PR_FMT " tid:%d", + STA_PR_ARG, __entry->tid + ) +); + +TRACE_EVENT(api_start_tx_ba_cb, + TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid), + + TP_ARGS(sdata, ra, tid), + + TP_STRUCT__entry( + VIF_ENTRY + __array(u8, ra, ETH_ALEN) + __field(u16, tid) + ), + + TP_fast_assign( + VIF_ASSIGN; + memcpy(__entry->ra, ra, ETH_ALEN); + __entry->tid = tid; + ), + + TP_printk( + VIF_PR_FMT " ra:%pM tid:%d", + VIF_PR_ARG, __entry->ra, __entry->tid + ) +); + +TRACE_EVENT(api_stop_tx_ba_session, + TP_PROTO(struct ieee80211_sta *sta, u16 tid), + + TP_ARGS(sta, tid), + + TP_STRUCT__entry( + STA_ENTRY + __field(u16, tid) + ), + + TP_fast_assign( + STA_ASSIGN; + __entry->tid = tid; + ), + + TP_printk( + STA_PR_FMT " tid:%d", + STA_PR_ARG, __entry->tid + ) +); + +TRACE_EVENT(api_stop_tx_ba_cb, + TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid), + + TP_ARGS(sdata, ra, tid), + + TP_STRUCT__entry( + VIF_ENTRY + __array(u8, ra, ETH_ALEN) + __field(u16, tid) + ), + + TP_fast_assign( + VIF_ASSIGN; + memcpy(__entry->ra, ra, ETH_ALEN); + __entry->tid = tid; + ), + + TP_printk( + VIF_PR_FMT " ra:%pM tid:%d", + VIF_PR_ARG, __entry->ra, __entry->tid + ) +); + +DEFINE_EVENT(local_only_evt, api_restart_hw, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local) +); + +TRACE_EVENT(api_beacon_loss, + TP_PROTO(struct ieee80211_sub_if_data *sdata), + + TP_ARGS(sdata), + + TP_STRUCT__entry( + VIF_ENTRY + ), + + TP_fast_assign( + VIF_ASSIGN; + ), + + TP_printk( + VIF_PR_FMT, + VIF_PR_ARG + ) +); + +TRACE_EVENT(api_connection_loss, + TP_PROTO(struct ieee80211_sub_if_data *sdata), + + TP_ARGS(sdata), + + TP_STRUCT__entry( + VIF_ENTRY + ), + + TP_fast_assign( + VIF_ASSIGN; + ), + + TP_printk( + VIF_PR_FMT, + VIF_PR_ARG + ) +); + +TRACE_EVENT(api_disconnect, + TP_PROTO(struct ieee80211_sub_if_data *sdata, bool reconnect), + + TP_ARGS(sdata, reconnect), + + TP_STRUCT__entry( + VIF_ENTRY + __field(int, reconnect) + ), + + TP_fast_assign( + VIF_ASSIGN; + __entry->reconnect = reconnect; + ), + + TP_printk( + VIF_PR_FMT " reconnect:%d", + VIF_PR_ARG, __entry->reconnect + ) +); + +TRACE_EVENT(api_cqm_rssi_notify, + TP_PROTO(struct ieee80211_sub_if_data *sdata, + enum nl80211_cqm_rssi_threshold_event rssi_event, + s32 rssi_level), + + TP_ARGS(sdata, rssi_event, rssi_level), + + TP_STRUCT__entry( + VIF_ENTRY + __field(u32, rssi_event) + __field(s32, rssi_level) + ), + + TP_fast_assign( + VIF_ASSIGN; + __entry->rssi_event = rssi_event; + __entry->rssi_level = rssi_level; + ), + + TP_printk( + VIF_PR_FMT " event:%d rssi:%d", + VIF_PR_ARG, __entry->rssi_event, __entry->rssi_level + ) +); + +DEFINE_EVENT(local_sdata_evt, api_cqm_beacon_loss_notify, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +TRACE_EVENT(api_scan_completed, + TP_PROTO(struct ieee80211_local *local, bool aborted), + + TP_ARGS(local, aborted), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(bool, aborted) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->aborted = aborted; + ), + + TP_printk( + LOCAL_PR_FMT " aborted:%d", + LOCAL_PR_ARG, __entry->aborted + ) +); + +TRACE_EVENT(api_sched_scan_results, + TP_PROTO(struct ieee80211_local *local), + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) +); + +TRACE_EVENT(api_sched_scan_stopped, + TP_PROTO(struct ieee80211_local *local), + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) +); + +TRACE_EVENT(api_sta_block_awake, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sta *sta, bool block), + + TP_ARGS(local, sta, block), + + TP_STRUCT__entry( + LOCAL_ENTRY + STA_ENTRY + __field(bool, block) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + STA_ASSIGN; + __entry->block = block; + ), + + TP_printk( + LOCAL_PR_FMT STA_PR_FMT " block:%d", + LOCAL_PR_ARG, STA_PR_ARG, __entry->block + ) +); + +TRACE_EVENT(api_chswitch_done, + TP_PROTO(struct ieee80211_sub_if_data *sdata, bool success), + + TP_ARGS(sdata, success), + + TP_STRUCT__entry( + VIF_ENTRY + __field(bool, success) + ), + + TP_fast_assign( + VIF_ASSIGN; + __entry->success = success; + ), + + TP_printk( + VIF_PR_FMT " success=%d", + VIF_PR_ARG, __entry->success + ) +); + +DEFINE_EVENT(local_only_evt, api_ready_on_channel, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local) +); + +DEFINE_EVENT(local_only_evt, api_remain_on_channel_expired, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local) +); + +TRACE_EVENT(api_gtk_rekey_notify, + TP_PROTO(struct ieee80211_sub_if_data *sdata, + const u8 *bssid, const u8 *replay_ctr), + + TP_ARGS(sdata, bssid, replay_ctr), + + TP_STRUCT__entry( + VIF_ENTRY + __array(u8, bssid, ETH_ALEN) + __array(u8, replay_ctr, NL80211_REPLAY_CTR_LEN) + ), + + TP_fast_assign( + VIF_ASSIGN; + memcpy(__entry->bssid, bssid, ETH_ALEN); + memcpy(__entry->replay_ctr, replay_ctr, NL80211_REPLAY_CTR_LEN); + ), + + TP_printk(VIF_PR_FMT, VIF_PR_ARG) +); + +TRACE_EVENT(api_enable_rssi_reports, + TP_PROTO(struct ieee80211_sub_if_data *sdata, + int rssi_min_thold, int rssi_max_thold), + + TP_ARGS(sdata, rssi_min_thold, rssi_max_thold), + + TP_STRUCT__entry( + VIF_ENTRY + __field(int, rssi_min_thold) + __field(int, rssi_max_thold) + ), + + TP_fast_assign( + VIF_ASSIGN; + __entry->rssi_min_thold = rssi_min_thold; + __entry->rssi_max_thold = rssi_max_thold; + ), + + TP_printk( + VIF_PR_FMT " rssi_min_thold =%d, rssi_max_thold = %d", + VIF_PR_ARG, __entry->rssi_min_thold, __entry->rssi_max_thold + ) +); + +TRACE_EVENT(api_eosp, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sta *sta), + + TP_ARGS(local, sta), + + TP_STRUCT__entry( + LOCAL_ENTRY + STA_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + STA_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT STA_PR_FMT, + LOCAL_PR_ARG, STA_PR_ARG + ) +); + +TRACE_EVENT(api_send_eosp_nullfunc, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sta *sta, + u8 tid), + + TP_ARGS(local, sta, tid), + + TP_STRUCT__entry( + LOCAL_ENTRY + STA_ENTRY + __field(u8, tid) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + STA_ASSIGN; + __entry->tid = tid; + ), + + TP_printk( + LOCAL_PR_FMT STA_PR_FMT " tid:%d", + LOCAL_PR_ARG, STA_PR_ARG, __entry->tid + ) +); + +TRACE_EVENT(api_sta_set_buffered, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sta *sta, + u8 tid, bool buffered), + + TP_ARGS(local, sta, tid, buffered), + + TP_STRUCT__entry( + LOCAL_ENTRY + STA_ENTRY + __field(u8, tid) + __field(bool, buffered) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + STA_ASSIGN; + __entry->tid = tid; + __entry->buffered = buffered; + ), + + TP_printk( + LOCAL_PR_FMT STA_PR_FMT " tid:%d buffered:%d", + LOCAL_PR_ARG, STA_PR_ARG, __entry->tid, __entry->buffered + ) +); + +TRACE_EVENT(api_radar_detected, + TP_PROTO(struct ieee80211_local *local), + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT " radar detected", + LOCAL_PR_ARG + ) +); + +/* + * Tracing for internal functions + * (which may also be called in response to driver calls) + */ + +TRACE_EVENT(wake_queue, + TP_PROTO(struct ieee80211_local *local, u16 queue, + enum queue_stop_reason reason), + + TP_ARGS(local, queue, reason), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u16, queue) + __field(u32, reason) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->queue = queue; + __entry->reason = reason; + ), + + TP_printk( + LOCAL_PR_FMT " queue:%d, reason:%d", + LOCAL_PR_ARG, __entry->queue, __entry->reason + ) +); + +TRACE_EVENT(stop_queue, + TP_PROTO(struct ieee80211_local *local, u16 queue, + enum queue_stop_reason reason), + + TP_ARGS(local, queue, reason), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u16, queue) + __field(u32, reason) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->queue = queue; + __entry->reason = reason; + ), + + TP_printk( + LOCAL_PR_FMT " queue:%d, reason:%d", + LOCAL_PR_ARG, __entry->queue, __entry->reason + ) +); + +#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace +#include <trace/define_trace.h> diff --git a/net/mac80211/trace_msg.h b/net/mac80211/trace_msg.h new file mode 100644 index 0000000000..c9dbe9aab7 --- /dev/null +++ b/net/mac80211/trace_msg.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Portions of this file + * Copyright (C) 2019 Intel Corporation + */ + +#ifdef CONFIG_MAC80211_MESSAGE_TRACING + +#if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) +#define __MAC80211_MSG_DRIVER_TRACE + +#include <linux/tracepoint.h> +#include <net/mac80211.h> +#include "ieee80211_i.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mac80211_msg + +#define MAX_MSG_LEN 120 + +DECLARE_EVENT_CLASS(mac80211_msg_event, + TP_PROTO(struct va_format *vaf), + + TP_ARGS(vaf), + + TP_STRUCT__entry( + __vstring(msg, vaf->fmt, vaf->va) + ), + + TP_fast_assign( + __assign_vstr(msg, vaf->fmt, vaf->va); + ), + + TP_printk("%s", __get_str(msg)) +); + +DEFINE_EVENT(mac80211_msg_event, mac80211_info, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); +DEFINE_EVENT(mac80211_msg_event, mac80211_dbg, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); +DEFINE_EVENT(mac80211_msg_event, mac80211_err, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); +#endif /* !__MAC80211_MSG_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace_msg +#include <trace/define_trace.h> + +#endif diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c new file mode 100644 index 0000000000..d45d4be63d --- /dev/null +++ b/net/mac80211/tx.c @@ -0,0 +1,6236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2018-2022 Intel Corporation + * + * Transmit and frame generation functions. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/skbuff.h> +#include <linux/if_vlan.h> +#include <linux/etherdevice.h> +#include <linux/bitmap.h> +#include <linux/rcupdate.h> +#include <linux/export.h> +#include <net/net_namespace.h> +#include <net/ieee80211_radiotap.h> +#include <net/cfg80211.h> +#include <net/mac80211.h> +#include <net/codel.h> +#include <net/codel_impl.h> +#include <asm/unaligned.h> +#include <net/fq_impl.h> +#include <net/gso.h> + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "led.h" +#include "mesh.h" +#include "wep.h" +#include "wpa.h" +#include "wme.h" +#include "rate.h" + +/* misc utils */ + +static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, + struct sk_buff *skb, int group_addr, + int next_frag_len) +{ + int rate, mrate, erp, dur, i, shift = 0; + struct ieee80211_rate *txrate; + struct ieee80211_local *local = tx->local; + struct ieee80211_supported_band *sband; + struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_chanctx_conf *chanctx_conf; + u32 rate_flags = 0; + + /* assume HW handles this */ + if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS)) + return 0; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(tx->sdata->vif.bss_conf.chanctx_conf); + if (chanctx_conf) { + shift = ieee80211_chandef_get_shift(&chanctx_conf->def); + rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); + } + rcu_read_unlock(); + + /* uh huh? */ + if (WARN_ON_ONCE(tx->rate.idx < 0)) + return 0; + + sband = local->hw.wiphy->bands[info->band]; + txrate = &sband->bitrates[tx->rate.idx]; + + erp = txrate->flags & IEEE80211_RATE_ERP_G; + + /* device is expected to do this */ + if (sband->band == NL80211_BAND_S1GHZ) + return 0; + + /* + * data and mgmt (except PS Poll): + * - during CFP: 32768 + * - during contention period: + * if addr1 is group address: 0 + * if more fragments = 0 and addr1 is individual address: time to + * transmit one ACK plus SIFS + * if more fragments = 1 and addr1 is individual address: time to + * transmit next fragment plus 2 x ACK plus 3 x SIFS + * + * IEEE 802.11, 9.6: + * - control response frame (CTS or ACK) shall be transmitted using the + * same rate as the immediately previous frame in the frame exchange + * sequence, if this rate belongs to the PHY mandatory rates, or else + * at the highest possible rate belonging to the PHY rates in the + * BSSBasicRateSet + */ + hdr = (struct ieee80211_hdr *)skb->data; + if (ieee80211_is_ctl(hdr->frame_control)) { + /* TODO: These control frames are not currently sent by + * mac80211, but should they be implemented, this function + * needs to be updated to support duration field calculation. + * + * RTS: time needed to transmit pending data/mgmt frame plus + * one CTS frame plus one ACK frame plus 3 x SIFS + * CTS: duration of immediately previous RTS minus time + * required to transmit CTS and its SIFS + * ACK: 0 if immediately previous directed data/mgmt had + * more=0, with more=1 duration in ACK frame is duration + * from previous frame minus time needed to transmit ACK + * and its SIFS + * PS Poll: BIT(15) | BIT(14) | aid + */ + return 0; + } + + /* data/mgmt */ + if (0 /* FIX: data/mgmt during CFP */) + return cpu_to_le16(32768); + + if (group_addr) /* Group address as the destination - no ACK */ + return 0; + + /* Individual destination address: + * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes) + * CTS and ACK frames shall be transmitted using the highest rate in + * basic rate set that is less than or equal to the rate of the + * immediately previous frame and that is using the same modulation + * (CCK or OFDM). If no basic rate set matches with these requirements, + * the highest mandatory rate of the PHY that is less than or equal to + * the rate of the previous frame is used. + * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps + */ + rate = -1; + /* use lowest available if everything fails */ + mrate = sband->bitrates[0].bitrate; + for (i = 0; i < sband->n_bitrates; i++) { + struct ieee80211_rate *r = &sband->bitrates[i]; + + if (r->bitrate > txrate->bitrate) + break; + + if ((rate_flags & r->flags) != rate_flags) + continue; + + if (tx->sdata->vif.bss_conf.basic_rates & BIT(i)) + rate = DIV_ROUND_UP(r->bitrate, 1 << shift); + + switch (sband->band) { + case NL80211_BAND_2GHZ: + case NL80211_BAND_LC: { + u32 flag; + if (tx->sdata->deflink.operating_11g_mode) + flag = IEEE80211_RATE_MANDATORY_G; + else + flag = IEEE80211_RATE_MANDATORY_B; + if (r->flags & flag) + mrate = r->bitrate; + break; + } + case NL80211_BAND_5GHZ: + case NL80211_BAND_6GHZ: + if (r->flags & IEEE80211_RATE_MANDATORY_A) + mrate = r->bitrate; + break; + case NL80211_BAND_S1GHZ: + case NL80211_BAND_60GHZ: + /* TODO, for now fall through */ + case NUM_NL80211_BANDS: + WARN_ON(1); + break; + } + } + if (rate == -1) { + /* No matching basic rate found; use highest suitable mandatory + * PHY rate */ + rate = DIV_ROUND_UP(mrate, 1 << shift); + } + + /* Don't calculate ACKs for QoS Frames with NoAck Policy set */ + if (ieee80211_is_data_qos(hdr->frame_control) && + *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK) + dur = 0; + else + /* Time needed to transmit ACK + * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up + * to closest integer */ + dur = ieee80211_frame_duration(sband->band, 10, rate, erp, + tx->sdata->vif.bss_conf.use_short_preamble, + shift); + + if (next_frag_len) { + /* Frame is fragmented: duration increases with time needed to + * transmit next fragment plus ACK and 2 x SIFS. */ + dur *= 2; /* ACK + SIFS */ + /* next fragment */ + dur += ieee80211_frame_duration(sband->band, next_frag_len, + txrate->bitrate, erp, + tx->sdata->vif.bss_conf.use_short_preamble, + shift); + } + + return cpu_to_le16(dur); +} + +/* tx handlers */ +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) +{ + struct ieee80211_local *local = tx->local; + struct ieee80211_if_managed *ifmgd; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + + /* driver doesn't support power save */ + if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS)) + return TX_CONTINUE; + + /* hardware does dynamic power save */ + if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) + return TX_CONTINUE; + + /* dynamic power save disabled */ + if (local->hw.conf.dynamic_ps_timeout <= 0) + return TX_CONTINUE; + + /* we are scanning, don't enable power save */ + if (local->scanning) + return TX_CONTINUE; + + if (!local->ps_sdata) + return TX_CONTINUE; + + /* No point if we're going to suspend */ + if (local->quiescing) + return TX_CONTINUE; + + /* dynamic ps is supported only in managed mode */ + if (tx->sdata->vif.type != NL80211_IFTYPE_STATION) + return TX_CONTINUE; + + if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) + return TX_CONTINUE; + + ifmgd = &tx->sdata->u.mgd; + + /* + * Don't wakeup from power save if u-apsd is enabled, voip ac has + * u-apsd enabled and the frame is in voip class. This effectively + * means that even if all access categories have u-apsd enabled, in + * practise u-apsd is only used with the voip ac. This is a + * workaround for the case when received voip class packets do not + * have correct qos tag for some reason, due the network or the + * peer application. + * + * Note: ifmgd->uapsd_queues access is racy here. If the value is + * changed via debugfs, user needs to reassociate manually to have + * everything in sync. + */ + if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) && + (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) && + skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO) + return TX_CONTINUE; + + if (local->hw.conf.flags & IEEE80211_CONF_PS) { + ieee80211_stop_queues_by_reason(&local->hw, + IEEE80211_MAX_QUEUE_MAP, + IEEE80211_QUEUE_STOP_REASON_PS, + false); + ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; + ieee80211_queue_work(&local->hw, + &local->dynamic_ps_disable_work); + } + + /* Don't restart the timer if we're not disassociated */ + if (!ifmgd->associated) + return TX_CONTINUE; + + mod_timer(&local->dynamic_ps_timer, jiffies + + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); + + return TX_CONTINUE; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) +{ + + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + bool assoc = false; + + if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) + return TX_CONTINUE; + + if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) && + test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) && + !ieee80211_is_probe_req(hdr->frame_control) && + !ieee80211_is_any_nullfunc(hdr->frame_control)) + /* + * When software scanning only nullfunc frames (to notify + * the sleep state to the AP) and probe requests (for the + * active scan) are allowed, all other frames should not be + * sent and we should not get here, but if we do + * nonetheless, drop them to avoid sending them + * off-channel. See the link below and + * ieee80211_start_scan() for more. + * + * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089 + */ + return TX_DROP; + + if (tx->sdata->vif.type == NL80211_IFTYPE_OCB) + return TX_CONTINUE; + + if (tx->flags & IEEE80211_TX_PS_BUFFERED) + return TX_CONTINUE; + + if (tx->sta) + assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC); + + if (likely(tx->flags & IEEE80211_TX_UNICAST)) { + if (unlikely(!assoc && + ieee80211_is_data(hdr->frame_control))) { +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + sdata_info(tx->sdata, + "dropped data frame to not associated station %pM\n", + hdr->addr1); +#endif + I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); + return TX_DROP; + } + } else if (unlikely(ieee80211_is_data(hdr->frame_control) && + ieee80211_vif_get_num_mcast_if(tx->sdata) == 0)) { + /* + * No associated STAs - no need to send multicast + * frames. + */ + return TX_DROP; + } + + return TX_CONTINUE; +} + +/* This function is called whenever the AP is about to exceed the maximum limit + * of buffered frames for power saving STAs. This situation should not really + * happen often during normal operation, so dropping the oldest buffered packet + * from each queue should be OK to make some room for new frames. */ +static void purge_old_ps_buffers(struct ieee80211_local *local) +{ + int total = 0, purged = 0; + struct sk_buff *skb; + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + struct ps_data *ps; + + if (sdata->vif.type == NL80211_IFTYPE_AP) + ps = &sdata->u.ap.ps; + else if (ieee80211_vif_is_mesh(&sdata->vif)) + ps = &sdata->u.mesh.ps; + else + continue; + + skb = skb_dequeue(&ps->bc_buf); + if (skb) { + purged++; + ieee80211_free_txskb(&local->hw, skb); + } + total += skb_queue_len(&ps->bc_buf); + } + + /* + * Drop one frame from each station from the lowest-priority + * AC that has frames at all. + */ + list_for_each_entry_rcu(sta, &local->sta_list, list) { + int ac; + + for (ac = IEEE80211_AC_BK; ac >= IEEE80211_AC_VO; ac--) { + skb = skb_dequeue(&sta->ps_tx_buf[ac]); + total += skb_queue_len(&sta->ps_tx_buf[ac]); + if (skb) { + purged++; + ieee80211_free_txskb(&local->hw, skb); + break; + } + } + } + + local->total_ps_buffered = total; + ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged); +} + +static ieee80211_tx_result +ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + struct ps_data *ps; + + /* + * broadcast/multicast frame + * + * If any of the associated/peer stations is in power save mode, + * the frame is buffered to be sent after DTIM beacon frame. + * This is done either by the hardware or us. + */ + + /* powersaving STAs currently only in AP/VLAN/mesh mode */ + if (tx->sdata->vif.type == NL80211_IFTYPE_AP || + tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + if (!tx->sdata->bss) + return TX_CONTINUE; + + ps = &tx->sdata->bss->ps; + } else if (ieee80211_vif_is_mesh(&tx->sdata->vif)) { + ps = &tx->sdata->u.mesh.ps; + } else { + return TX_CONTINUE; + } + + + /* no buffering for ordered frames */ + if (ieee80211_has_order(hdr->frame_control)) + return TX_CONTINUE; + + if (ieee80211_is_probe_req(hdr->frame_control)) + return TX_CONTINUE; + + if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL)) + info->hw_queue = tx->sdata->vif.cab_queue; + + /* no stations in PS mode and no buffered packets */ + if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf)) + return TX_CONTINUE; + + info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; + + /* device releases frame after DTIM beacon */ + if (!ieee80211_hw_check(&tx->local->hw, HOST_BROADCAST_PS_BUFFERING)) + return TX_CONTINUE; + + /* buffered in mac80211 */ + if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) + purge_old_ps_buffers(tx->local); + + if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) { + ps_dbg(tx->sdata, + "BC TX buffer full - dropping the oldest frame\n"); + ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf)); + } else + tx->local->total_ps_buffered++; + + skb_queue_tail(&ps->bc_buf, tx->skb); + + return TX_QUEUED; +} + +static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta, + struct sk_buff *skb) +{ + if (!ieee80211_is_mgmt(fc)) + return 0; + + if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP)) + return 0; + + if (!ieee80211_is_robust_mgmt_frame(skb)) + return 0; + + return 1; +} + +static ieee80211_tx_result +ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) +{ + struct sta_info *sta = tx->sta; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + struct ieee80211_local *local = tx->local; + + if (unlikely(!sta)) + return TX_CONTINUE; + + if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) || + test_sta_flag(sta, WLAN_STA_PS_DRIVER) || + test_sta_flag(sta, WLAN_STA_PS_DELIVER)) && + !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { + int ac = skb_get_queue_mapping(tx->skb); + + if (ieee80211_is_mgmt(hdr->frame_control) && + !ieee80211_is_bufferable_mmpdu(tx->skb)) { + info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; + return TX_CONTINUE; + } + + ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", + sta->sta.addr, sta->sta.aid, ac); + if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) + purge_old_ps_buffers(tx->local); + + /* sync with ieee80211_sta_ps_deliver_wakeup */ + spin_lock(&sta->ps_lock); + /* + * STA woke up the meantime and all the frames on ps_tx_buf have + * been queued to pending queue. No reordering can happen, go + * ahead and Tx the packet. + */ + if (!test_sta_flag(sta, WLAN_STA_PS_STA) && + !test_sta_flag(sta, WLAN_STA_PS_DRIVER) && + !test_sta_flag(sta, WLAN_STA_PS_DELIVER)) { + spin_unlock(&sta->ps_lock); + return TX_CONTINUE; + } + + if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) { + struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]); + ps_dbg(tx->sdata, + "STA %pM TX buffer for AC %d full - dropping oldest frame\n", + sta->sta.addr, ac); + ieee80211_free_txskb(&local->hw, old); + } else + tx->local->total_ps_buffered++; + + info->control.jiffies = jiffies; + info->control.vif = &tx->sdata->vif; + info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; + info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; + skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb); + spin_unlock(&sta->ps_lock); + + if (!timer_pending(&local->sta_cleanup)) + mod_timer(&local->sta_cleanup, + round_jiffies(jiffies + + STA_INFO_CLEANUP_INTERVAL)); + + /* + * We queued up some frames, so the TIM bit might + * need to be set, recalculate it. + */ + sta_info_recalc_tim(sta); + + return TX_QUEUED; + } else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) { + ps_dbg(tx->sdata, + "STA %pM in PS mode, but polling/in SP -> send frame\n", + sta->sta.addr); + } + + return TX_CONTINUE; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) +{ + if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) + return TX_CONTINUE; + + if (tx->flags & IEEE80211_TX_UNICAST) + return ieee80211_tx_h_unicast_ps_buf(tx); + else + return ieee80211_tx_h_multicast_ps_buf(tx); +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + + if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) { + if (tx->sdata->control_port_no_encrypt) + info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; + info->flags |= IEEE80211_TX_CTL_USE_MINRATE; + } + + return TX_CONTINUE; +} + +static struct ieee80211_key * +ieee80211_select_link_key(struct ieee80211_tx_data *tx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct ieee80211_link_data *link; + unsigned int link_id; + + link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK); + if (link_id == IEEE80211_LINK_UNSPECIFIED) { + link = &tx->sdata->deflink; + } else { + link = rcu_dereference(tx->sdata->link[link_id]); + if (!link) + return NULL; + } + + if (ieee80211_is_group_privacy_action(tx->skb)) + return rcu_dereference(link->default_multicast_key); + else if (ieee80211_is_mgmt(hdr->frame_control) && + is_multicast_ether_addr(hdr->addr1) && + ieee80211_is_robust_mgmt_frame(tx->skb)) + return rcu_dereference(link->default_mgmt_key); + else if (is_multicast_ether_addr(hdr->addr1)) + return rcu_dereference(link->default_multicast_key); + + return NULL; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) +{ + struct ieee80211_key *key; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + + if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) { + tx->key = NULL; + return TX_CONTINUE; + } + + if (tx->sta && + (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx]))) + tx->key = key; + else if ((key = ieee80211_select_link_key(tx))) + tx->key = key; + else if (!is_multicast_ether_addr(hdr->addr1) && + (key = rcu_dereference(tx->sdata->default_unicast_key))) + tx->key = key; + else + tx->key = NULL; + + if (tx->key) { + bool skip_hw = false; + + /* TODO: add threshold stuff again */ + + switch (tx->key->conf.cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + if (!ieee80211_is_data_present(hdr->frame_control)) + tx->key = NULL; + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + if (!ieee80211_is_data_present(hdr->frame_control) && + !ieee80211_use_mfp(hdr->frame_control, tx->sta, + tx->skb) && + !ieee80211_is_group_privacy_action(tx->skb)) + tx->key = NULL; + else + skip_hw = (tx->key->conf.flags & + IEEE80211_KEY_FLAG_SW_MGMT_TX) && + ieee80211_is_mgmt(hdr->frame_control); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + if (!ieee80211_is_mgmt(hdr->frame_control)) + tx->key = NULL; + break; + } + + if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED && + !ieee80211_is_deauth(hdr->frame_control)) && + tx->skb->protocol != tx->sdata->control_port_protocol) + return TX_DROP; + + if (!skip_hw && tx->key && + tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) + info->control.hw_key = &tx->key->conf; + } else if (ieee80211_is_data_present(hdr->frame_control) && tx->sta && + test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) { + return TX_DROP; + } + + return TX_CONTINUE; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct ieee80211_hdr *hdr = (void *)tx->skb->data; + struct ieee80211_supported_band *sband; + u32 len; + struct ieee80211_tx_rate_control txrc; + struct ieee80211_sta_rates *ratetbl = NULL; + bool encap = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; + bool assoc = false; + + memset(&txrc, 0, sizeof(txrc)); + + sband = tx->local->hw.wiphy->bands[info->band]; + + len = min_t(u32, tx->skb->len + FCS_LEN, + tx->local->hw.wiphy->frag_threshold); + + /* set up the tx rate control struct we give the RC algo */ + txrc.hw = &tx->local->hw; + txrc.sband = sband; + txrc.bss_conf = &tx->sdata->vif.bss_conf; + txrc.skb = tx->skb; + txrc.reported_rate.idx = -1; + txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band]; + + if (tx->sdata->rc_has_mcs_mask[info->band]) + txrc.rate_idx_mcs_mask = + tx->sdata->rc_rateidx_mcs_mask[info->band]; + + txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP || + tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT || + tx->sdata->vif.type == NL80211_IFTYPE_ADHOC || + tx->sdata->vif.type == NL80211_IFTYPE_OCB); + + /* set up RTS protection if desired */ + if (len > tx->local->hw.wiphy->rts_threshold) { + txrc.rts = true; + } + + info->control.use_rts = txrc.rts; + info->control.use_cts_prot = tx->sdata->vif.bss_conf.use_cts_prot; + + /* + * Use short preamble if the BSS can handle it, but not for + * management frames unless we know the receiver can handle + * that -- the management frame might be to a station that + * just wants a probe response. + */ + if (tx->sdata->vif.bss_conf.use_short_preamble && + (ieee80211_is_tx_data(tx->skb) || + (tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE)))) + txrc.short_preamble = true; + + info->control.short_preamble = txrc.short_preamble; + + /* don't ask rate control when rate already injected via radiotap */ + if (info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT) + return TX_CONTINUE; + + if (tx->sta) + assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC); + + /* + * Lets not bother rate control if we're associated and cannot + * talk to the sta. This should not happen. + */ + if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc && + !rate_usable_index_exists(sband, &tx->sta->sta), + "%s: Dropped data frame as no usable bitrate found while " + "scanning and associated. Target station: " + "%pM on %d GHz band\n", + tx->sdata->name, + encap ? ((struct ethhdr *)hdr)->h_dest : hdr->addr1, + info->band ? 5 : 2)) + return TX_DROP; + + /* + * If we're associated with the sta at this point we know we can at + * least send the frame at the lowest bit rate. + */ + rate_control_get_rate(tx->sdata, tx->sta, &txrc); + + if (tx->sta && !info->control.skip_table) + ratetbl = rcu_dereference(tx->sta->sta.rates); + + if (unlikely(info->control.rates[0].idx < 0)) { + if (ratetbl) { + struct ieee80211_tx_rate rate = { + .idx = ratetbl->rate[0].idx, + .flags = ratetbl->rate[0].flags, + .count = ratetbl->rate[0].count + }; + + if (ratetbl->rate[0].idx < 0) + return TX_DROP; + + tx->rate = rate; + } else { + return TX_DROP; + } + } else { + tx->rate = info->control.rates[0]; + } + + if (txrc.reported_rate.idx < 0) { + txrc.reported_rate = tx->rate; + if (tx->sta && ieee80211_is_tx_data(tx->skb)) + tx->sta->deflink.tx_stats.last_rate = txrc.reported_rate; + } else if (tx->sta) + tx->sta->deflink.tx_stats.last_rate = txrc.reported_rate; + + if (ratetbl) + return TX_CONTINUE; + + if (unlikely(!info->control.rates[0].count)) + info->control.rates[0].count = 1; + + if (WARN_ON_ONCE((info->control.rates[0].count > 1) && + (info->flags & IEEE80211_TX_CTL_NO_ACK))) + info->control.rates[0].count = 1; + + return TX_CONTINUE; +} + +static __le16 ieee80211_tx_next_seq(struct sta_info *sta, int tid) +{ + u16 *seq = &sta->tid_seq[tid]; + __le16 ret = cpu_to_le16(*seq); + + /* Increase the sequence number. */ + *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ; + + return ret; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + int tid; + + /* + * Packet injection may want to control the sequence + * number, if we have no matching interface then we + * neither assign one ourselves nor ask the driver to. + */ + if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR)) + return TX_CONTINUE; + + if (unlikely(ieee80211_is_ctl(hdr->frame_control))) + return TX_CONTINUE; + + if (ieee80211_hdrlen(hdr->frame_control) < 24) + return TX_CONTINUE; + + if (ieee80211_is_qos_nullfunc(hdr->frame_control)) + return TX_CONTINUE; + + if (info->control.flags & IEEE80211_TX_CTRL_NO_SEQNO) + return TX_CONTINUE; + + /* SNS11 from 802.11be 10.3.2.14 */ + if (unlikely(is_multicast_ether_addr(hdr->addr1) && + ieee80211_vif_is_mld(info->control.vif) && + info->control.vif->type == NL80211_IFTYPE_AP)) { + if (info->control.flags & IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX) + tx->sdata->mld_mcast_seq += 0x10; + hdr->seq_ctrl = cpu_to_le16(tx->sdata->mld_mcast_seq); + return TX_CONTINUE; + } + + /* + * Anything but QoS data that has a sequence number field + * (is long enough) gets a sequence number from the global + * counter. QoS data frames with a multicast destination + * also use the global counter (802.11-2012 9.3.2.10). + */ + if (!ieee80211_is_data_qos(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1)) { + /* driver should assign sequence number */ + info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; + /* for pure STA mode without beacons, we can do it */ + hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number); + tx->sdata->sequence_number += 0x10; + if (tx->sta) + tx->sta->deflink.tx_stats.msdu[IEEE80211_NUM_TIDS]++; + return TX_CONTINUE; + } + + /* + * This should be true for injected/management frames only, for + * management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ + * above since they are not QoS-data frames. + */ + if (!tx->sta) + return TX_CONTINUE; + + /* include per-STA, per-TID sequence counter */ + tid = ieee80211_get_tid(hdr); + tx->sta->deflink.tx_stats.msdu[tid]++; + + hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid); + + return TX_CONTINUE; +} + +static int ieee80211_fragment(struct ieee80211_tx_data *tx, + struct sk_buff *skb, int hdrlen, + int frag_threshold) +{ + struct ieee80211_local *local = tx->local; + struct ieee80211_tx_info *info; + struct sk_buff *tmp; + int per_fragm = frag_threshold - hdrlen - FCS_LEN; + int pos = hdrlen + per_fragm; + int rem = skb->len - hdrlen - per_fragm; + + if (WARN_ON(rem < 0)) + return -EINVAL; + + /* first fragment was already added to queue by caller */ + + while (rem) { + int fraglen = per_fragm; + + if (fraglen > rem) + fraglen = rem; + rem -= fraglen; + tmp = dev_alloc_skb(local->tx_headroom + + frag_threshold + + IEEE80211_ENCRYPT_HEADROOM + + IEEE80211_ENCRYPT_TAILROOM); + if (!tmp) + return -ENOMEM; + + __skb_queue_tail(&tx->skbs, tmp); + + skb_reserve(tmp, + local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM); + + /* copy control information */ + memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); + + info = IEEE80211_SKB_CB(tmp); + info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | + IEEE80211_TX_CTL_FIRST_FRAGMENT); + + if (rem) + info->flags |= IEEE80211_TX_CTL_MORE_FRAMES; + + skb_copy_queue_mapping(tmp, skb); + tmp->priority = skb->priority; + tmp->dev = skb->dev; + + /* copy header and data */ + skb_put_data(tmp, skb->data, hdrlen); + skb_put_data(tmp, skb->data + pos, fraglen); + + pos += fraglen; + } + + /* adjust first fragment's length */ + skb_trim(skb, hdrlen + per_fragm); + return 0; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb = tx->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; + int frag_threshold = tx->local->hw.wiphy->frag_threshold; + int hdrlen; + int fragnum; + + /* no matter what happens, tx->skb moves to tx->skbs */ + __skb_queue_tail(&tx->skbs, skb); + tx->skb = NULL; + + if (info->flags & IEEE80211_TX_CTL_DONTFRAG) + return TX_CONTINUE; + + if (ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG)) + return TX_CONTINUE; + + /* + * Warn when submitting a fragmented A-MPDU frame and drop it. + * This scenario is handled in ieee80211_tx_prepare but extra + * caution taken here as fragmented ampdu may cause Tx stop. + */ + if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) + return TX_DROP; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + + /* internal error, why isn't DONTFRAG set? */ + if (WARN_ON(skb->len + FCS_LEN <= frag_threshold)) + return TX_DROP; + + /* + * Now fragment the frame. This will allocate all the fragments and + * chain them (using skb as the first fragment) to skb->next. + * During transmission, we will remove the successfully transmitted + * fragments from this list. When the low-level driver rejects one + * of the fragments then we will simply pretend to accept the skb + * but store it away as pending. + */ + if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold)) + return TX_DROP; + + /* update duration/seq/flags of fragments */ + fragnum = 0; + + skb_queue_walk(&tx->skbs, skb) { + const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); + + hdr = (void *)skb->data; + info = IEEE80211_SKB_CB(skb); + + if (!skb_queue_is_last(&tx->skbs, skb)) { + hdr->frame_control |= morefrags; + /* + * No multi-rate retries for fragmented frames, that + * would completely throw off the NAV at other STAs. + */ + info->control.rates[1].idx = -1; + info->control.rates[2].idx = -1; + info->control.rates[3].idx = -1; + BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 4); + info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; + } else { + hdr->frame_control &= ~morefrags; + } + hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG); + fragnum++; + } + + return TX_CONTINUE; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb; + int ac = -1; + + if (!tx->sta) + return TX_CONTINUE; + + skb_queue_walk(&tx->skbs, skb) { + ac = skb_get_queue_mapping(skb); + tx->sta->deflink.tx_stats.bytes[ac] += skb->len; + } + if (ac >= 0) + tx->sta->deflink.tx_stats.packets[ac]++; + + return TX_CONTINUE; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) +{ + if (!tx->key) + return TX_CONTINUE; + + switch (tx->key->conf.cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + return ieee80211_crypto_wep_encrypt(tx); + case WLAN_CIPHER_SUITE_TKIP: + return ieee80211_crypto_tkip_encrypt(tx); + case WLAN_CIPHER_SUITE_CCMP: + return ieee80211_crypto_ccmp_encrypt( + tx, IEEE80211_CCMP_MIC_LEN); + case WLAN_CIPHER_SUITE_CCMP_256: + return ieee80211_crypto_ccmp_encrypt( + tx, IEEE80211_CCMP_256_MIC_LEN); + case WLAN_CIPHER_SUITE_AES_CMAC: + return ieee80211_crypto_aes_cmac_encrypt(tx); + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + return ieee80211_crypto_aes_cmac_256_encrypt(tx); + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + return ieee80211_crypto_aes_gmac_encrypt(tx); + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + return ieee80211_crypto_gcmp_encrypt(tx); + } + + return TX_DROP; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + int next_len; + bool group_addr; + + skb_queue_walk(&tx->skbs, skb) { + hdr = (void *) skb->data; + if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) + break; /* must not overwrite AID */ + if (!skb_queue_is_last(&tx->skbs, skb)) { + struct sk_buff *next = skb_queue_next(&tx->skbs, skb); + next_len = next->len; + } else + next_len = 0; + group_addr = is_multicast_ether_addr(hdr->addr1); + + hdr->duration_id = + ieee80211_duration(tx, skb, group_addr, next_len); + } + + return TX_CONTINUE; +} + +/* actual transmit path */ + +static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, + struct sk_buff *skb, + struct ieee80211_tx_info *info, + struct tid_ampdu_tx *tid_tx, + int tid) +{ + bool queued = false; + bool reset_agg_timer = false; + struct sk_buff *purge_skb = NULL; + + if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { + reset_agg_timer = true; + } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { + /* + * nothing -- this aggregation session is being started + * but that might still fail with the driver + */ + } else if (!tx->sta->sta.txq[tid]) { + spin_lock(&tx->sta->lock); + /* + * Need to re-check now, because we may get here + * + * 1) in the window during which the setup is actually + * already done, but not marked yet because not all + * packets are spliced over to the driver pending + * queue yet -- if this happened we acquire the lock + * either before or after the splice happens, but + * need to recheck which of these cases happened. + * + * 2) during session teardown, if the OPERATIONAL bit + * was cleared due to the teardown but the pointer + * hasn't been assigned NULL yet (or we loaded it + * before it was assigned) -- in this case it may + * now be NULL which means we should just let the + * packet pass through because splicing the frames + * back is already done. + */ + tid_tx = rcu_dereference_protected_tid_tx(tx->sta, tid); + + if (!tid_tx) { + /* do nothing, let packet pass through */ + } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { + reset_agg_timer = true; + } else { + queued = true; + if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) { + clear_sta_flag(tx->sta, WLAN_STA_SP); + ps_dbg(tx->sta->sdata, + "STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n", + tx->sta->sta.addr, tx->sta->sta.aid); + } + info->control.vif = &tx->sdata->vif; + info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; + info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; + __skb_queue_tail(&tid_tx->pending, skb); + if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) + purge_skb = __skb_dequeue(&tid_tx->pending); + } + spin_unlock(&tx->sta->lock); + + if (purge_skb) + ieee80211_free_txskb(&tx->local->hw, purge_skb); + } + + /* reset session timer */ + if (reset_agg_timer) + tid_tx->last_tx = jiffies; + + return queued; +} + +void ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, struct sk_buff *skb) +{ + struct rate_control_ref *ref = sdata->local->rate_ctrl; + u16 tid; + + if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER)) + return; + + if (!sta || !sta->sta.deflink.ht_cap.ht_supported || + !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO || + skb->protocol == sdata->control_port_protocol) + return; + + tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + if (likely(sta->ampdu_mlme.tid_tx[tid])) + return; + + ieee80211_start_tx_ba_session(&sta->sta, tid, 0); +} + +/* + * initialises @tx + * pass %NULL for the station if unknown, a valid pointer if known + * or an ERR_PTR() if the station is known not to exist + */ +static ieee80211_tx_result +ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, + struct ieee80211_tx_data *tx, + struct sta_info *sta, struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + bool aggr_check = false; + int tid; + + memset(tx, 0, sizeof(*tx)); + tx->skb = skb; + tx->local = local; + tx->sdata = sdata; + __skb_queue_head_init(&tx->skbs); + + /* + * If this flag is set to true anywhere, and we get here, + * we are doing the needed processing, so remove the flag + * now. + */ + info->control.flags &= ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING; + + hdr = (struct ieee80211_hdr *) skb->data; + + if (likely(sta)) { + if (!IS_ERR(sta)) + tx->sta = sta; + } else { + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + tx->sta = rcu_dereference(sdata->u.vlan.sta); + if (!tx->sta && sdata->wdev.use_4addr) + return TX_DROP; + } else if (tx->sdata->control_port_protocol == tx->skb->protocol) { + tx->sta = sta_info_get_bss(sdata, hdr->addr1); + } + if (!tx->sta && !is_multicast_ether_addr(hdr->addr1)) { + tx->sta = sta_info_get(sdata, hdr->addr1); + aggr_check = true; + } + } + + if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && + !ieee80211_is_qos_nullfunc(hdr->frame_control) && + ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && + !ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) { + struct tid_ampdu_tx *tid_tx; + + tid = ieee80211_get_tid(hdr); + tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]); + if (!tid_tx && aggr_check) { + ieee80211_aggr_check(sdata, tx->sta, skb); + tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]); + } + + if (tid_tx) { + bool queued; + + queued = ieee80211_tx_prep_agg(tx, skb, info, + tid_tx, tid); + + if (unlikely(queued)) + return TX_QUEUED; + } + } + + if (is_multicast_ether_addr(hdr->addr1)) { + tx->flags &= ~IEEE80211_TX_UNICAST; + info->flags |= IEEE80211_TX_CTL_NO_ACK; + } else + tx->flags |= IEEE80211_TX_UNICAST; + + if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) { + if (!(tx->flags & IEEE80211_TX_UNICAST) || + skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold || + info->flags & IEEE80211_TX_CTL_AMPDU) + info->flags |= IEEE80211_TX_CTL_DONTFRAG; + } + + if (!tx->sta) + info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; + else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) { + info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; + ieee80211_check_fast_xmit(tx->sta); + } + + info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT; + + return TX_CONTINUE; +} + +static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, + struct ieee80211_vif *vif, + struct sta_info *sta, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_txq *txq = NULL; + + if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) || + (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) + return NULL; + + if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && + unlikely(!ieee80211_is_data_present(hdr->frame_control))) { + if ((!ieee80211_is_mgmt(hdr->frame_control) || + ieee80211_is_bufferable_mmpdu(skb) || + vif->type == NL80211_IFTYPE_STATION) && + sta && sta->uploaded) { + /* + * This will be NULL if the driver didn't set the + * opt-in hardware flag. + */ + txq = sta->sta.txq[IEEE80211_NUM_TIDS]; + } + } else if (sta) { + u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + + if (!sta->uploaded) + return NULL; + + txq = sta->sta.txq[tid]; + } else { + txq = vif->txq; + } + + if (!txq) + return NULL; + + return to_txq_info(txq); +} + +static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb) +{ + struct sk_buff *next; + codel_time_t now = codel_get_time(); + + skb_list_walk_safe(skb, skb, next) + IEEE80211_SKB_CB(skb)->control.enqueue_time = now; +} + +static u32 codel_skb_len_func(const struct sk_buff *skb) +{ + return skb->len; +} + +static codel_time_t codel_skb_time_func(const struct sk_buff *skb) +{ + const struct ieee80211_tx_info *info; + + info = (const struct ieee80211_tx_info *)skb->cb; + return info->control.enqueue_time; +} + +static struct sk_buff *codel_dequeue_func(struct codel_vars *cvars, + void *ctx) +{ + struct ieee80211_local *local; + struct txq_info *txqi; + struct fq *fq; + struct fq_flow *flow; + + txqi = ctx; + local = vif_to_sdata(txqi->txq.vif)->local; + fq = &local->fq; + + if (cvars == &txqi->def_cvars) + flow = &txqi->tin.default_flow; + else + flow = &fq->flows[cvars - local->cvars]; + + return fq_flow_dequeue(fq, flow); +} + +static void codel_drop_func(struct sk_buff *skb, + void *ctx) +{ + struct ieee80211_local *local; + struct ieee80211_hw *hw; + struct txq_info *txqi; + + txqi = ctx; + local = vif_to_sdata(txqi->txq.vif)->local; + hw = &local->hw; + + ieee80211_free_txskb(hw, skb); +} + +static struct sk_buff *fq_tin_dequeue_func(struct fq *fq, + struct fq_tin *tin, + struct fq_flow *flow) +{ + struct ieee80211_local *local; + struct txq_info *txqi; + struct codel_vars *cvars; + struct codel_params *cparams; + struct codel_stats *cstats; + + local = container_of(fq, struct ieee80211_local, fq); + txqi = container_of(tin, struct txq_info, tin); + cstats = &txqi->cstats; + + if (txqi->txq.sta) { + struct sta_info *sta = container_of(txqi->txq.sta, + struct sta_info, sta); + cparams = &sta->cparams; + } else { + cparams = &local->cparams; + } + + if (flow == &tin->default_flow) + cvars = &txqi->def_cvars; + else + cvars = &local->cvars[flow - fq->flows]; + + return codel_dequeue(txqi, + &flow->backlog, + cparams, + cvars, + cstats, + codel_skb_len_func, + codel_skb_time_func, + codel_drop_func, + codel_dequeue_func); +} + +static void fq_skb_free_func(struct fq *fq, + struct fq_tin *tin, + struct fq_flow *flow, + struct sk_buff *skb) +{ + struct ieee80211_local *local; + + local = container_of(fq, struct ieee80211_local, fq); + ieee80211_free_txskb(&local->hw, skb); +} + +static void ieee80211_txq_enqueue(struct ieee80211_local *local, + struct txq_info *txqi, + struct sk_buff *skb) +{ + struct fq *fq = &local->fq; + struct fq_tin *tin = &txqi->tin; + u32 flow_idx = fq_flow_idx(fq, skb); + + ieee80211_set_skb_enqueue_time(skb); + + spin_lock_bh(&fq->lock); + /* + * For management frames, don't really apply codel etc., + * we don't want to apply any shaping or anything we just + * want to simplify the driver API by having them on the + * txqi. + */ + if (unlikely(txqi->txq.tid == IEEE80211_NUM_TIDS)) { + IEEE80211_SKB_CB(skb)->control.flags |= + IEEE80211_TX_INTCFL_NEED_TXPROCESSING; + __skb_queue_tail(&txqi->frags, skb); + } else { + fq_tin_enqueue(fq, tin, flow_idx, skb, + fq_skb_free_func); + } + spin_unlock_bh(&fq->lock); +} + +static bool fq_vlan_filter_func(struct fq *fq, struct fq_tin *tin, + struct fq_flow *flow, struct sk_buff *skb, + void *data) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + return info->control.vif == data; +} + +void ieee80211_txq_remove_vlan(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + struct fq *fq = &local->fq; + struct txq_info *txqi; + struct fq_tin *tin; + struct ieee80211_sub_if_data *ap; + + if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN)) + return; + + ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); + + if (!ap->vif.txq) + return; + + txqi = to_txq_info(ap->vif.txq); + tin = &txqi->tin; + + spin_lock_bh(&fq->lock); + fq_tin_filter(fq, tin, fq_vlan_filter_func, &sdata->vif, + fq_skb_free_func); + spin_unlock_bh(&fq->lock); +} + +void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct txq_info *txqi, int tid) +{ + fq_tin_init(&txqi->tin); + codel_vars_init(&txqi->def_cvars); + codel_stats_init(&txqi->cstats); + __skb_queue_head_init(&txqi->frags); + INIT_LIST_HEAD(&txqi->schedule_order); + + txqi->txq.vif = &sdata->vif; + + if (!sta) { + sdata->vif.txq = &txqi->txq; + txqi->txq.tid = 0; + txqi->txq.ac = IEEE80211_AC_BE; + + return; + } + + if (tid == IEEE80211_NUM_TIDS) { + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + /* Drivers need to opt in to the management MPDU TXQ */ + if (!ieee80211_hw_check(&sdata->local->hw, + STA_MMPDU_TXQ)) + return; + } else if (!ieee80211_hw_check(&sdata->local->hw, + BUFF_MMPDU_TXQ)) { + /* Drivers need to opt in to the bufferable MMPDU TXQ */ + return; + } + txqi->txq.ac = IEEE80211_AC_VO; + } else { + txqi->txq.ac = ieee80211_ac_from_tid(tid); + } + + txqi->txq.sta = &sta->sta; + txqi->txq.tid = tid; + sta->sta.txq[tid] = &txqi->txq; +} + +void ieee80211_txq_purge(struct ieee80211_local *local, + struct txq_info *txqi) +{ + struct fq *fq = &local->fq; + struct fq_tin *tin = &txqi->tin; + + spin_lock_bh(&fq->lock); + fq_tin_reset(fq, tin, fq_skb_free_func); + ieee80211_purge_tx_queue(&local->hw, &txqi->frags); + spin_unlock_bh(&fq->lock); + + spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]); + list_del_init(&txqi->schedule_order); + spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]); +} + +void ieee80211_txq_set_params(struct ieee80211_local *local) +{ + if (local->hw.wiphy->txq_limit) + local->fq.limit = local->hw.wiphy->txq_limit; + else + local->hw.wiphy->txq_limit = local->fq.limit; + + if (local->hw.wiphy->txq_memory_limit) + local->fq.memory_limit = local->hw.wiphy->txq_memory_limit; + else + local->hw.wiphy->txq_memory_limit = local->fq.memory_limit; + + if (local->hw.wiphy->txq_quantum) + local->fq.quantum = local->hw.wiphy->txq_quantum; + else + local->hw.wiphy->txq_quantum = local->fq.quantum; +} + +int ieee80211_txq_setup_flows(struct ieee80211_local *local) +{ + struct fq *fq = &local->fq; + int ret; + int i; + bool supp_vht = false; + enum nl80211_band band; + + ret = fq_init(fq, 4096); + if (ret) + return ret; + + /* + * If the hardware doesn't support VHT, it is safe to limit the maximum + * queue size. 4 Mbytes is 64 max-size aggregates in 802.11n. + */ + for (band = 0; band < NUM_NL80211_BANDS; band++) { + struct ieee80211_supported_band *sband; + + sband = local->hw.wiphy->bands[band]; + if (!sband) + continue; + + supp_vht = supp_vht || sband->vht_cap.vht_supported; + } + + if (!supp_vht) + fq->memory_limit = 4 << 20; /* 4 Mbytes */ + + codel_params_init(&local->cparams); + local->cparams.interval = MS2TIME(100); + local->cparams.target = MS2TIME(20); + local->cparams.ecn = true; + + local->cvars = kcalloc(fq->flows_cnt, sizeof(local->cvars[0]), + GFP_KERNEL); + if (!local->cvars) { + spin_lock_bh(&fq->lock); + fq_reset(fq, fq_skb_free_func); + spin_unlock_bh(&fq->lock); + return -ENOMEM; + } + + for (i = 0; i < fq->flows_cnt; i++) + codel_vars_init(&local->cvars[i]); + + ieee80211_txq_set_params(local); + + return 0; +} + +void ieee80211_txq_teardown_flows(struct ieee80211_local *local) +{ + struct fq *fq = &local->fq; + + kfree(local->cvars); + local->cvars = NULL; + + spin_lock_bh(&fq->lock); + fq_reset(fq, fq_skb_free_func); + spin_unlock_bh(&fq->lock); +} + +static bool ieee80211_queue_skb(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct sk_buff *skb) +{ + struct ieee80211_vif *vif; + struct txq_info *txqi; + + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) + return false; + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = container_of(sdata->bss, + struct ieee80211_sub_if_data, u.ap); + + vif = &sdata->vif; + txqi = ieee80211_get_txq(local, vif, sta, skb); + + if (!txqi) + return false; + + ieee80211_txq_enqueue(local, txqi, skb); + + schedule_and_wake_txq(local, txqi); + + return true; +} + +static bool ieee80211_tx_frags(struct ieee80211_local *local, + struct ieee80211_vif *vif, + struct sta_info *sta, + struct sk_buff_head *skbs, + bool txpending) +{ + struct ieee80211_tx_control control = {}; + struct sk_buff *skb, *tmp; + unsigned long flags; + + skb_queue_walk_safe(skbs, skb, tmp) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int q = info->hw_queue; + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + if (WARN_ON_ONCE(q >= local->hw.queues)) { + __skb_unlink(skb, skbs); + ieee80211_free_txskb(&local->hw, skb); + continue; + } +#endif + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + if (local->queue_stop_reasons[q] || + (!txpending && !skb_queue_empty(&local->pending[q]))) { + if (unlikely(info->flags & + IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) { + if (local->queue_stop_reasons[q] & + ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) { + /* + * Drop off-channel frames if queues + * are stopped for any reason other + * than off-channel operation. Never + * queue them. + */ + spin_unlock_irqrestore( + &local->queue_stop_reason_lock, + flags); + ieee80211_purge_tx_queue(&local->hw, + skbs); + return true; + } + } else { + + /* + * Since queue is stopped, queue up frames for + * later transmission from the tx-pending + * tasklet when the queue is woken again. + */ + if (txpending) + skb_queue_splice_init(skbs, + &local->pending[q]); + else + skb_queue_splice_tail_init(skbs, + &local->pending[q]); + + spin_unlock_irqrestore(&local->queue_stop_reason_lock, + flags); + return false; + } + } + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + + info->control.vif = vif; + control.sta = sta ? &sta->sta : NULL; + + __skb_unlink(skb, skbs); + drv_tx(local, &control, skb); + } + + return true; +} + +/* + * Returns false if the frame couldn't be transmitted but was queued instead. + */ +static bool __ieee80211_tx(struct ieee80211_local *local, + struct sk_buff_head *skbs, struct sta_info *sta, + bool txpending) +{ + struct ieee80211_tx_info *info; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_vif *vif; + struct sk_buff *skb; + bool result; + + if (WARN_ON(skb_queue_empty(skbs))) + return true; + + skb = skb_peek(skbs); + info = IEEE80211_SKB_CB(skb); + sdata = vif_to_sdata(info->control.vif); + if (sta && !sta->uploaded) + sta = NULL; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_MONITOR: + if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) { + vif = &sdata->vif; + break; + } + sdata = rcu_dereference(local->monitor_sdata); + if (sdata) { + vif = &sdata->vif; + info->hw_queue = + vif->hw_queue[skb_get_queue_mapping(skb)]; + } else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) { + ieee80211_purge_tx_queue(&local->hw, skbs); + return true; + } else + vif = NULL; + break; + case NL80211_IFTYPE_AP_VLAN: + sdata = container_of(sdata->bss, + struct ieee80211_sub_if_data, u.ap); + fallthrough; + default: + vif = &sdata->vif; + break; + } + + result = ieee80211_tx_frags(local, vif, sta, skbs, txpending); + + WARN_ON_ONCE(!skb_queue_empty(skbs)); + + return result; +} + +/* + * Invoke TX handlers, return 0 on success and non-zero if the + * frame was dropped or queued. + * + * The handlers are split into an early and late part. The latter is everything + * that can be sensitive to reordering, and will be deferred to after packets + * are dequeued from the intermediate queues (when they are enabled). + */ +static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx) +{ + ieee80211_tx_result res = TX_DROP; + +#define CALL_TXH(txh) \ + do { \ + res = txh(tx); \ + if (res != TX_CONTINUE) \ + goto txh_done; \ + } while (0) + + CALL_TXH(ieee80211_tx_h_dynamic_ps); + CALL_TXH(ieee80211_tx_h_check_assoc); + CALL_TXH(ieee80211_tx_h_ps_buf); + CALL_TXH(ieee80211_tx_h_check_control_port_protocol); + CALL_TXH(ieee80211_tx_h_select_key); + + txh_done: + if (unlikely(res == TX_DROP)) { + I802_DEBUG_INC(tx->local->tx_handlers_drop); + if (tx->skb) + ieee80211_free_txskb(&tx->local->hw, tx->skb); + else + ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs); + return -1; + } else if (unlikely(res == TX_QUEUED)) { + I802_DEBUG_INC(tx->local->tx_handlers_queued); + return -1; + } + + return 0; +} + +/* + * Late handlers can be called while the sta lock is held. Handlers that can + * cause packets to be generated will cause deadlock! + */ +static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + ieee80211_tx_result res = TX_CONTINUE; + + if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL)) + CALL_TXH(ieee80211_tx_h_rate_ctrl); + + if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) { + __skb_queue_tail(&tx->skbs, tx->skb); + tx->skb = NULL; + goto txh_done; + } + + CALL_TXH(ieee80211_tx_h_michael_mic_add); + CALL_TXH(ieee80211_tx_h_sequence); + CALL_TXH(ieee80211_tx_h_fragment); + /* handlers after fragment must be aware of tx info fragmentation! */ + CALL_TXH(ieee80211_tx_h_stats); + CALL_TXH(ieee80211_tx_h_encrypt); + if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL)) + CALL_TXH(ieee80211_tx_h_calculate_duration); +#undef CALL_TXH + + txh_done: + if (unlikely(res == TX_DROP)) { + I802_DEBUG_INC(tx->local->tx_handlers_drop); + if (tx->skb) + ieee80211_free_txskb(&tx->local->hw, tx->skb); + else + ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs); + return -1; + } else if (unlikely(res == TX_QUEUED)) { + I802_DEBUG_INC(tx->local->tx_handlers_queued); + return -1; + } + + return 0; +} + +static int invoke_tx_handlers(struct ieee80211_tx_data *tx) +{ + int r = invoke_tx_handlers_early(tx); + + if (r) + return r; + return invoke_tx_handlers_late(tx); +} + +bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, struct sk_buff *skb, + int band, struct ieee80211_sta **sta) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_data tx; + struct sk_buff *skb2; + + if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP) + return false; + + info->band = band; + info->control.vif = vif; + info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)]; + + if (invoke_tx_handlers(&tx)) + return false; + + if (sta) { + if (tx.sta) + *sta = &tx.sta->sta; + else + *sta = NULL; + } + + /* this function isn't suitable for fragmented data frames */ + skb2 = __skb_dequeue(&tx.skbs); + if (WARN_ON(skb2 != skb || !skb_queue_empty(&tx.skbs))) { + ieee80211_free_txskb(hw, skb2); + ieee80211_purge_tx_queue(hw, &tx.skbs); + return false; + } + + return true; +} +EXPORT_SYMBOL(ieee80211_tx_prepare_skb); + +/* + * Returns false if the frame couldn't be transmitted but was queued instead. + */ +static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, struct sk_buff *skb, + bool txpending) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_tx_data tx; + ieee80211_tx_result res_prepare; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + bool result = true; + + if (unlikely(skb->len < 10)) { + dev_kfree_skb(skb); + return true; + } + + /* initialises tx */ + res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb); + + if (unlikely(res_prepare == TX_DROP)) { + ieee80211_free_txskb(&local->hw, skb); + return true; + } else if (unlikely(res_prepare == TX_QUEUED)) { + return true; + } + + /* set up hw_queue value early */ + if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || + !ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) + info->hw_queue = + sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; + + if (invoke_tx_handlers_early(&tx)) + return true; + + if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb)) + return true; + + if (!invoke_tx_handlers_late(&tx)) + result = __ieee80211_tx(local, &tx.skbs, tx.sta, txpending); + + return result; +} + +/* device xmit handlers */ + +enum ieee80211_encrypt { + ENCRYPT_NO, + ENCRYPT_MGMT, + ENCRYPT_DATA, +}; + +static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, + int head_need, + enum ieee80211_encrypt encrypt) +{ + struct ieee80211_local *local = sdata->local; + bool enc_tailroom; + int tail_need = 0; + + enc_tailroom = encrypt == ENCRYPT_MGMT || + (encrypt == ENCRYPT_DATA && + sdata->crypto_tx_tailroom_needed_cnt); + + if (enc_tailroom) { + tail_need = IEEE80211_ENCRYPT_TAILROOM; + tail_need -= skb_tailroom(skb); + tail_need = max_t(int, tail_need, 0); + } + + if (skb_cloned(skb) && + (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) || + !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom)) + I802_DEBUG_INC(local->tx_expand_skb_head_cloned); + else if (head_need || tail_need) + I802_DEBUG_INC(local->tx_expand_skb_head); + else + return 0; + + if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) { + wiphy_debug(local->hw.wiphy, + "failed to reallocate TX buffer\n"); + return -ENOMEM; + } + + return 0; +} + +void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + int headroom; + enum ieee80211_encrypt encrypt; + + if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT) + encrypt = ENCRYPT_NO; + else if (ieee80211_is_mgmt(hdr->frame_control)) + encrypt = ENCRYPT_MGMT; + else + encrypt = ENCRYPT_DATA; + + headroom = local->tx_headroom; + if (encrypt != ENCRYPT_NO) + headroom += IEEE80211_ENCRYPT_HEADROOM; + headroom -= skb_headroom(skb); + headroom = max_t(int, 0, headroom); + + if (ieee80211_skb_resize(sdata, skb, headroom, encrypt)) { + ieee80211_free_txskb(&local->hw, skb); + return; + } + + /* reload after potential resize */ + hdr = (struct ieee80211_hdr *) skb->data; + info->control.vif = &sdata->vif; + + if (ieee80211_vif_is_mesh(&sdata->vif)) { + if (ieee80211_is_data(hdr->frame_control) && + is_unicast_ether_addr(hdr->addr1)) { + if (mesh_nexthop_resolve(sdata, skb)) + return; /* skb queued: don't free */ + } else { + ieee80211_mps_set_frame_flags(sdata, NULL, hdr); + } + } + + ieee80211_set_qos_hdr(sdata, skb); + ieee80211_tx(sdata, sta, skb, false); +} + +static bool ieee80211_validate_radiotap_len(struct sk_buff *skb) +{ + struct ieee80211_radiotap_header *rthdr = + (struct ieee80211_radiotap_header *)skb->data; + + /* check for not even having the fixed radiotap header part */ + if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) + return false; /* too short to be possibly valid */ + + /* is it a header version we can trust to find length from? */ + if (unlikely(rthdr->it_version)) + return false; /* only version 0 is supported */ + + /* does the skb contain enough to deliver on the alleged length? */ + if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data))) + return false; /* skb too short for claimed rt header extent */ + + return true; +} + +bool ieee80211_parse_tx_radiotap(struct sk_buff *skb, + struct net_device *dev) +{ + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_radiotap_iterator iterator; + struct ieee80211_radiotap_header *rthdr = + (struct ieee80211_radiotap_header *) skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len, + NULL); + u16 txflags; + u16 rate = 0; + bool rate_found = false; + u8 rate_retries = 0; + u16 rate_flags = 0; + u8 mcs_known, mcs_flags, mcs_bw; + u16 vht_known; + u8 vht_mcs = 0, vht_nss = 0; + int i; + + if (!ieee80211_validate_radiotap_len(skb)) + return false; + + info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | + IEEE80211_TX_CTL_DONTFRAG; + + /* + * for every radiotap entry that is present + * (ieee80211_radiotap_iterator_next returns -ENOENT when no more + * entries present, or -EINVAL on error) + */ + + while (!ret) { + ret = ieee80211_radiotap_iterator_next(&iterator); + + if (ret) + continue; + + /* see if this argument is something we can use */ + switch (iterator.this_arg_index) { + /* + * You must take care when dereferencing iterator.this_arg + * for multibyte types... the pointer is not aligned. Use + * get_unaligned((type *)iterator.this_arg) to dereference + * iterator.this_arg for type "type" safely on all arches. + */ + case IEEE80211_RADIOTAP_FLAGS: + if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) { + /* + * this indicates that the skb we have been + * handed has the 32-bit FCS CRC at the end... + * we should react to that by snipping it off + * because it will be recomputed and added + * on transmission + */ + if (skb->len < (iterator._max_length + FCS_LEN)) + return false; + + skb_trim(skb, skb->len - FCS_LEN); + } + if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) + info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT; + if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) + info->flags &= ~IEEE80211_TX_CTL_DONTFRAG; + break; + + case IEEE80211_RADIOTAP_TX_FLAGS: + txflags = get_unaligned_le16(iterator.this_arg); + if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK) + info->flags |= IEEE80211_TX_CTL_NO_ACK; + if (txflags & IEEE80211_RADIOTAP_F_TX_NOSEQNO) + info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO; + if (txflags & IEEE80211_RADIOTAP_F_TX_ORDER) + info->control.flags |= + IEEE80211_TX_CTRL_DONT_REORDER; + break; + + case IEEE80211_RADIOTAP_RATE: + rate = *iterator.this_arg; + rate_flags = 0; + rate_found = true; + break; + + case IEEE80211_RADIOTAP_DATA_RETRIES: + rate_retries = *iterator.this_arg; + break; + + case IEEE80211_RADIOTAP_MCS: + mcs_known = iterator.this_arg[0]; + mcs_flags = iterator.this_arg[1]; + if (!(mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_MCS)) + break; + + rate_found = true; + rate = iterator.this_arg[2]; + rate_flags = IEEE80211_TX_RC_MCS; + + if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_GI && + mcs_flags & IEEE80211_RADIOTAP_MCS_SGI) + rate_flags |= IEEE80211_TX_RC_SHORT_GI; + + mcs_bw = mcs_flags & IEEE80211_RADIOTAP_MCS_BW_MASK; + if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_BW && + mcs_bw == IEEE80211_RADIOTAP_MCS_BW_40) + rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + + if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_FEC && + mcs_flags & IEEE80211_RADIOTAP_MCS_FEC_LDPC) + info->flags |= IEEE80211_TX_CTL_LDPC; + + if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_STBC) { + u8 stbc = u8_get_bits(mcs_flags, + IEEE80211_RADIOTAP_MCS_STBC_MASK); + + info->flags |= + u32_encode_bits(stbc, + IEEE80211_TX_CTL_STBC); + } + break; + + case IEEE80211_RADIOTAP_VHT: + vht_known = get_unaligned_le16(iterator.this_arg); + rate_found = true; + + rate_flags = IEEE80211_TX_RC_VHT_MCS; + if ((vht_known & IEEE80211_RADIOTAP_VHT_KNOWN_GI) && + (iterator.this_arg[2] & + IEEE80211_RADIOTAP_VHT_FLAG_SGI)) + rate_flags |= IEEE80211_TX_RC_SHORT_GI; + if (vht_known & + IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) { + if (iterator.this_arg[3] == 1) + rate_flags |= + IEEE80211_TX_RC_40_MHZ_WIDTH; + else if (iterator.this_arg[3] == 4) + rate_flags |= + IEEE80211_TX_RC_80_MHZ_WIDTH; + else if (iterator.this_arg[3] == 11) + rate_flags |= + IEEE80211_TX_RC_160_MHZ_WIDTH; + } + + vht_mcs = iterator.this_arg[4] >> 4; + if (vht_mcs > 11) + vht_mcs = 0; + vht_nss = iterator.this_arg[4] & 0xF; + if (!vht_nss || vht_nss > 8) + vht_nss = 1; + break; + + /* + * Please update the file + * Documentation/networking/mac80211-injection.rst + * when parsing new fields here. + */ + + default: + break; + } + } + + if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ + return false; + + if (rate_found) { + struct ieee80211_supported_band *sband = + local->hw.wiphy->bands[info->band]; + + info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT; + + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + info->control.rates[i].idx = -1; + info->control.rates[i].flags = 0; + info->control.rates[i].count = 0; + } + + if (rate_flags & IEEE80211_TX_RC_MCS) { + info->control.rates[0].idx = rate; + } else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) { + ieee80211_rate_set_vht(info->control.rates, vht_mcs, + vht_nss); + } else if (sband) { + for (i = 0; i < sband->n_bitrates; i++) { + if (rate * 5 != sband->bitrates[i].bitrate) + continue; + + info->control.rates[0].idx = i; + break; + } + } + + if (info->control.rates[0].idx < 0) + info->control.flags &= ~IEEE80211_TX_CTRL_RATE_INJECT; + + info->control.rates[0].flags = rate_flags; + info->control.rates[0].count = min_t(u8, rate_retries + 1, + local->hw.max_rate_tries); + } + + return true; +} + +netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr; + struct ieee80211_sub_if_data *tmp_sdata, *sdata; + struct cfg80211_chan_def *chandef; + u16 len_rthdr; + int hdrlen; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + if (unlikely(!ieee80211_sdata_running(sdata))) + goto fail; + + memset(info, 0, sizeof(*info)); + info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS | + IEEE80211_TX_CTL_INJECTED; + + /* Sanity-check the length of the radiotap header */ + if (!ieee80211_validate_radiotap_len(skb)) + goto fail; + + /* we now know there is a radiotap header with a length we can use */ + len_rthdr = ieee80211_get_radiotap_len(skb->data); + + /* + * fix up the pointers accounting for the radiotap + * header still being in there. We are being given + * a precooked IEEE80211 header so no need for + * normal processing + */ + skb_set_mac_header(skb, len_rthdr); + /* + * these are just fixed to the end of the rt area since we + * don't have any better information and at this point, nobody cares + */ + skb_set_network_header(skb, len_rthdr); + skb_set_transport_header(skb, len_rthdr); + + if (skb->len < len_rthdr + 2) + goto fail; + + hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); + hdrlen = ieee80211_hdrlen(hdr->frame_control); + + if (skb->len < len_rthdr + hdrlen) + goto fail; + + /* + * Initialize skb->protocol if the injected frame is a data frame + * carrying a rfc1042 header + */ + if (ieee80211_is_data(hdr->frame_control) && + skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) { + u8 *payload = (u8 *)hdr + hdrlen; + + if (ether_addr_equal(payload, rfc1042_header)) + skb->protocol = cpu_to_be16((payload[6] << 8) | + payload[7]); + } + + rcu_read_lock(); + + /* + * We process outgoing injected frames that have a local address + * we handle as though they are non-injected frames. + * This code here isn't entirely correct, the local MAC address + * isn't always enough to find the interface to use; for proper + * VLAN support we have an nl80211-based mechanism. + * + * This is necessary, for example, for old hostapd versions that + * don't use nl80211-based management TX/RX. + */ + list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(tmp_sdata)) + continue; + if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR || + tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + continue; + if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) { + sdata = tmp_sdata; + break; + } + } + + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + if (!chanctx_conf) { + tmp_sdata = rcu_dereference(local->monitor_sdata); + if (tmp_sdata) + chanctx_conf = + rcu_dereference(tmp_sdata->vif.bss_conf.chanctx_conf); + } + + if (chanctx_conf) + chandef = &chanctx_conf->def; + else if (!local->use_chanctx) + chandef = &local->_oper_chandef; + else + goto fail_rcu; + + /* + * Frame injection is not allowed if beaconing is not allowed + * or if we need radar detection. Beaconing is usually not allowed when + * the mode or operation (Adhoc, AP, Mesh) does not support DFS. + * Passive scan is also used in world regulatory domains where + * your country is not known and as such it should be treated as + * NO TX unless the channel is explicitly allowed in which case + * your current regulatory domain would not have the passive scan + * flag. + * + * Since AP mode uses monitor interfaces to inject/TX management + * frames we can make AP mode the exception to this rule once it + * supports radar detection as its implementation can deal with + * radar detection by itself. We can do that later by adding a + * monitor flag interfaces used for AP support. + */ + if (!cfg80211_reg_can_beacon(local->hw.wiphy, chandef, + sdata->vif.type)) + goto fail_rcu; + + info->band = chandef->chan->band; + + /* Initialize skb->priority according to frame type and TID class, + * with respect to the sub interface that the frame will actually + * be transmitted on. If the DONT_REORDER flag is set, the original + * skb-priority is preserved to assure frames injected with this + * flag are not reordered relative to each other. + */ + ieee80211_select_queue_80211(sdata, skb, hdr); + skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority)); + + /* + * Process the radiotap header. This will now take into account the + * selected chandef above to accurately set injection rates and + * retransmissions. + */ + if (!ieee80211_parse_tx_radiotap(skb, dev)) + goto fail_rcu; + + /* remove the injection radiotap header */ + skb_pull(skb, len_rthdr); + + ieee80211_xmit(sdata, NULL, skb); + rcu_read_unlock(); + + return NETDEV_TX_OK; + +fail_rcu: + rcu_read_unlock(); +fail: + dev_kfree_skb(skb); + return NETDEV_TX_OK; /* meaning, we dealt with the skb */ +} + +static inline bool ieee80211_is_tdls_setup(struct sk_buff *skb) +{ + u16 ethertype = (skb->data[12] << 8) | skb->data[13]; + + return ethertype == ETH_P_TDLS && + skb->len > 14 && + skb->data[14] == WLAN_TDLS_SNAP_RFTYPE; +} + +int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, + struct sta_info **sta_out) +{ + struct sta_info *sta; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + sta = rcu_dereference(sdata->u.vlan.sta); + if (sta) { + *sta_out = sta; + return 0; + } else if (sdata->wdev.use_4addr) { + return -ENOLINK; + } + fallthrough; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_OCB: + case NL80211_IFTYPE_ADHOC: + if (is_multicast_ether_addr(skb->data)) { + *sta_out = ERR_PTR(-ENOENT); + return 0; + } + sta = sta_info_get_bss(sdata, skb->data); + break; +#ifdef CONFIG_MAC80211_MESH + case NL80211_IFTYPE_MESH_POINT: + /* determined much later */ + *sta_out = NULL; + return 0; +#endif + case NL80211_IFTYPE_STATION: + if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) { + sta = sta_info_get(sdata, skb->data); + if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { + if (test_sta_flag(sta, + WLAN_STA_TDLS_PEER_AUTH)) { + *sta_out = sta; + return 0; + } + + /* + * TDLS link during setup - throw out frames to + * peer. Allow TDLS-setup frames to unauthorized + * peers for the special case of a link teardown + * after a TDLS sta is removed due to being + * unreachable. + */ + if (!ieee80211_is_tdls_setup(skb)) + return -EINVAL; + } + + } + + sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr); + if (!sta) + return -ENOLINK; + break; + default: + return -EINVAL; + } + + *sta_out = sta ?: ERR_PTR(-ENOENT); + return 0; +} + +static u16 ieee80211_store_ack_skb(struct ieee80211_local *local, + struct sk_buff *skb, + u32 *info_flags, + u64 *cookie) +{ + struct sk_buff *ack_skb; + u16 info_id = 0; + + if (skb->sk) + ack_skb = skb_clone_sk(skb); + else + ack_skb = skb_clone(skb, GFP_ATOMIC); + + if (ack_skb) { + unsigned long flags; + int id; + + spin_lock_irqsave(&local->ack_status_lock, flags); + id = idr_alloc(&local->ack_status_frames, ack_skb, + 1, 0x2000, GFP_ATOMIC); + spin_unlock_irqrestore(&local->ack_status_lock, flags); + + if (id >= 0) { + info_id = id; + *info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + if (cookie) { + *cookie = ieee80211_mgmt_tx_cookie(local); + IEEE80211_SKB_CB(ack_skb)->ack.cookie = *cookie; + } + } else { + kfree_skb(ack_skb); + } + } + + return info_id; +} + +/** + * ieee80211_build_hdr - build 802.11 header in the given frame + * @sdata: virtual interface to build the header for + * @skb: the skb to build the header in + * @info_flags: skb flags to set + * @sta: the station pointer + * @ctrl_flags: info control flags to set + * @cookie: cookie pointer to fill (if not %NULL) + * + * This function takes the skb with 802.3 header and reformats the header to + * the appropriate IEEE 802.11 header based on which interface the packet is + * being transmitted on. + * + * Note that this function also takes care of the TX status request and + * potential unsharing of the SKB - this needs to be interleaved with the + * header building. + * + * The function requires the read-side RCU lock held + * + * Returns: the (possibly reallocated) skb or an ERR_PTR() code + */ +static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u32 info_flags, + struct sta_info *sta, u32 ctrl_flags, + u64 *cookie) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_tx_info *info; + int head_need; + u16 ethertype, hdrlen, meshhdrlen = 0; + __le16 fc; + struct ieee80211_hdr hdr; + struct ieee80211s_hdr mesh_hdr __maybe_unused; + struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL; + const u8 *encaps_data; + int encaps_len, skip_header_bytes; + bool wme_sta = false, authorized = false; + bool tdls_peer; + bool multicast; + u16 info_id = 0; + struct ieee80211_chanctx_conf *chanctx_conf = NULL; + enum nl80211_band band; + int ret; + u8 link_id = u32_get_bits(ctrl_flags, IEEE80211_TX_CTRL_MLO_LINK); + + if (IS_ERR(sta)) + sta = NULL; + +#ifdef CONFIG_MAC80211_DEBUGFS + if (local->force_tx_status) + info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; +#endif + + /* convert Ethernet header to proper 802.11 header (based on + * operation mode) */ + ethertype = (skb->data[12] << 8) | skb->data[13]; + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); + + if (!ieee80211_vif_is_mld(&sdata->vif)) + chanctx_conf = + rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + if (sdata->wdev.use_4addr) { + fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); + /* RA TA DA SA */ + memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN); + memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); + memcpy(hdr.addr3, skb->data, ETH_ALEN); + memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); + hdrlen = 30; + authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); + wme_sta = sta->sta.wme; + } + if (!ieee80211_vif_is_mld(&sdata->vif)) { + struct ieee80211_sub_if_data *ap_sdata; + + /* override chanctx_conf from AP (we don't have one) */ + ap_sdata = container_of(sdata->bss, + struct ieee80211_sub_if_data, + u.ap); + chanctx_conf = + rcu_dereference(ap_sdata->vif.bss_conf.chanctx_conf); + } + if (sdata->wdev.use_4addr) + break; + fallthrough; + case NL80211_IFTYPE_AP: + fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); + /* DA BSSID SA */ + memcpy(hdr.addr1, skb->data, ETH_ALEN); + + if (ieee80211_vif_is_mld(&sdata->vif) && sta && !sta->sta.mlo) { + struct ieee80211_link_data *link; + + link_id = sta->deflink.link_id; + link = rcu_dereference(sdata->link[link_id]); + if (WARN_ON(!link)) { + ret = -ENOLINK; + goto free; + } + memcpy(hdr.addr2, link->conf->addr, ETH_ALEN); + } else if (link_id == IEEE80211_LINK_UNSPECIFIED || + (sta && sta->sta.mlo)) { + memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); + } else { + struct ieee80211_bss_conf *conf; + + conf = rcu_dereference(sdata->vif.link_conf[link_id]); + if (unlikely(!conf)) { + ret = -ENOLINK; + goto free; + } + + memcpy(hdr.addr2, conf->addr, ETH_ALEN); + } + + memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); + hdrlen = 24; + break; +#ifdef CONFIG_MAC80211_MESH + case NL80211_IFTYPE_MESH_POINT: + if (!is_multicast_ether_addr(skb->data)) { + struct sta_info *next_hop; + bool mpp_lookup = true; + + mpath = mesh_path_lookup(sdata, skb->data); + if (mpath) { + mpp_lookup = false; + next_hop = rcu_dereference(mpath->next_hop); + if (!next_hop || + !(mpath->flags & (MESH_PATH_ACTIVE | + MESH_PATH_RESOLVING))) + mpp_lookup = true; + } + + if (mpp_lookup) { + mppath = mpp_path_lookup(sdata, skb->data); + if (mppath) + mppath->exp_time = jiffies; + } + + if (mppath && mpath) + mesh_path_del(sdata, mpath->dst); + } + + /* + * Use address extension if it is a packet from + * another interface or if we know the destination + * is being proxied by a portal (i.e. portal address + * differs from proxied address) + */ + if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) && + !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) { + hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, + skb->data, skb->data + ETH_ALEN); + meshhdrlen = ieee80211_new_mesh_header(sdata, &mesh_hdr, + NULL, NULL); + } else { + /* DS -> MBSS (802.11-2012 13.11.3.3). + * For unicast with unknown forwarding information, + * destination might be in the MBSS or if that fails + * forwarded to another mesh gate. In either case + * resolution will be handled in ieee80211_xmit(), so + * leave the original DA. This also works for mcast */ + const u8 *mesh_da = skb->data; + + if (mppath) + mesh_da = mppath->mpp; + else if (mpath) + mesh_da = mpath->dst; + + hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, + mesh_da, sdata->vif.addr); + if (is_multicast_ether_addr(mesh_da)) + /* DA TA mSA AE:SA */ + meshhdrlen = ieee80211_new_mesh_header( + sdata, &mesh_hdr, + skb->data + ETH_ALEN, NULL); + else + /* RA TA mDA mSA AE:DA SA */ + meshhdrlen = ieee80211_new_mesh_header( + sdata, &mesh_hdr, skb->data, + skb->data + ETH_ALEN); + + } + + /* For injected frames, fill RA right away as nexthop lookup + * will be skipped. + */ + if ((ctrl_flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP) && + is_zero_ether_addr(hdr.addr1)) + memcpy(hdr.addr1, skb->data, ETH_ALEN); + break; +#endif + case NL80211_IFTYPE_STATION: + /* we already did checks when looking up the RA STA */ + tdls_peer = test_sta_flag(sta, WLAN_STA_TDLS_PEER); + + if (tdls_peer) { + /* For TDLS only one link can be valid with peer STA */ + int tdls_link_id = sta->sta.valid_links ? + __ffs(sta->sta.valid_links) : 0; + struct ieee80211_link_data *link; + + /* DA SA BSSID */ + memcpy(hdr.addr1, skb->data, ETH_ALEN); + memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); + link = rcu_dereference(sdata->link[tdls_link_id]); + if (WARN_ON_ONCE(!link)) { + ret = -EINVAL; + goto free; + } + memcpy(hdr.addr3, link->u.mgd.bssid, ETH_ALEN); + hdrlen = 24; + } else if (sdata->u.mgd.use_4addr && + cpu_to_be16(ethertype) != sdata->control_port_protocol) { + fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | + IEEE80211_FCTL_TODS); + /* RA TA DA SA */ + memcpy(hdr.addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN); + memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); + memcpy(hdr.addr3, skb->data, ETH_ALEN); + memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); + hdrlen = 30; + } else { + fc |= cpu_to_le16(IEEE80211_FCTL_TODS); + /* BSSID SA DA */ + memcpy(hdr.addr1, sdata->vif.cfg.ap_addr, ETH_ALEN); + memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); + memcpy(hdr.addr3, skb->data, ETH_ALEN); + hdrlen = 24; + } + break; + case NL80211_IFTYPE_OCB: + /* DA SA BSSID */ + memcpy(hdr.addr1, skb->data, ETH_ALEN); + memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); + eth_broadcast_addr(hdr.addr3); + hdrlen = 24; + break; + case NL80211_IFTYPE_ADHOC: + /* DA SA BSSID */ + memcpy(hdr.addr1, skb->data, ETH_ALEN); + memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); + memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN); + hdrlen = 24; + break; + default: + ret = -EINVAL; + goto free; + } + + if (!chanctx_conf) { + if (!ieee80211_vif_is_mld(&sdata->vif)) { + ret = -ENOTCONN; + goto free; + } + /* MLD transmissions must not rely on the band */ + band = 0; + } else { + band = chanctx_conf->def.chan->band; + } + + multicast = is_multicast_ether_addr(hdr.addr1); + + /* sta is always NULL for mesh */ + if (sta) { + authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); + wme_sta = sta->sta.wme; + } else if (ieee80211_vif_is_mesh(&sdata->vif)) { + /* For mesh, the use of the QoS header is mandatory */ + wme_sta = true; + } + + /* receiver does QoS (which also means we do) use it */ + if (wme_sta) { + fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); + hdrlen += 2; + } + + /* + * Drop unicast frames to unauthorised stations unless they are + * EAPOL frames from the local station. + */ + if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) && + (sdata->vif.type != NL80211_IFTYPE_OCB) && + !multicast && !authorized && + (cpu_to_be16(ethertype) != sdata->control_port_protocol || + !ieee80211_is_our_addr(sdata, skb->data + ETH_ALEN, NULL)))) { +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n", + sdata->name, hdr.addr1); +#endif + + I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); + + ret = -EPERM; + goto free; + } + + if (unlikely(!multicast && ((skb->sk && + skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS) || + ctrl_flags & IEEE80211_TX_CTL_REQ_TX_STATUS))) + info_id = ieee80211_store_ack_skb(local, skb, &info_flags, + cookie); + + /* + * If the skb is shared we need to obtain our own copy. + */ + skb = skb_share_check(skb, GFP_ATOMIC); + if (unlikely(!skb)) { + ret = -ENOMEM; + goto free; + } + + hdr.frame_control = fc; + hdr.duration_id = 0; + hdr.seq_ctrl = 0; + + skip_header_bytes = ETH_HLEN; + if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { + encaps_data = bridge_tunnel_header; + encaps_len = sizeof(bridge_tunnel_header); + skip_header_bytes -= 2; + } else if (ethertype >= ETH_P_802_3_MIN) { + encaps_data = rfc1042_header; + encaps_len = sizeof(rfc1042_header); + skip_header_bytes -= 2; + } else { + encaps_data = NULL; + encaps_len = 0; + } + + skb_pull(skb, skip_header_bytes); + head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb); + + /* + * So we need to modify the skb header and hence need a copy of + * that. The head_need variable above doesn't, so far, include + * the needed header space that we don't need right away. If we + * can, then we don't reallocate right now but only after the + * frame arrives at the master device (if it does...) + * + * If we cannot, however, then we will reallocate to include all + * the ever needed space. Also, if we need to reallocate it anyway, + * make it big enough for everything we may ever need. + */ + + if (head_need > 0 || skb_cloned(skb)) { + head_need += IEEE80211_ENCRYPT_HEADROOM; + head_need += local->tx_headroom; + head_need = max_t(int, 0, head_need); + if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) { + ieee80211_free_txskb(&local->hw, skb); + skb = NULL; + return ERR_PTR(-ENOMEM); + } + } + + if (encaps_data) + memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); + +#ifdef CONFIG_MAC80211_MESH + if (meshhdrlen > 0) + memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen); +#endif + + if (ieee80211_is_data_qos(fc)) { + __le16 *qos_control; + + qos_control = skb_push(skb, 2); + memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2); + /* + * Maybe we could actually set some fields here, for now just + * initialise to zero to indicate no special operation. + */ + *qos_control = 0; + } else + memcpy(skb_push(skb, hdrlen), &hdr, hdrlen); + + skb_reset_mac_header(skb); + + info = IEEE80211_SKB_CB(skb); + memset(info, 0, sizeof(*info)); + + info->flags = info_flags; + info->ack_frame_id = info_id; + info->band = band; + + if (likely(!cookie)) { + ctrl_flags |= u32_encode_bits(link_id, + IEEE80211_TX_CTRL_MLO_LINK); + } else { + unsigned int pre_conf_link_id; + + /* + * ctrl_flags already have been set by + * ieee80211_tx_control_port(), here + * we just sanity check that + */ + + pre_conf_link_id = u32_get_bits(ctrl_flags, + IEEE80211_TX_CTRL_MLO_LINK); + + if (pre_conf_link_id != link_id && + link_id != IEEE80211_LINK_UNSPECIFIED) { +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + net_info_ratelimited("%s: dropped frame to %pM with bad link ID request (%d vs. %d)\n", + sdata->name, hdr.addr1, + pre_conf_link_id, link_id); +#endif + ret = -EINVAL; + goto free; + } + } + + info->control.flags = ctrl_flags; + + return skb; + free: + kfree_skb(skb); + return ERR_PTR(ret); +} + +/* + * fast-xmit overview + * + * The core idea of this fast-xmit is to remove per-packet checks by checking + * them out of band. ieee80211_check_fast_xmit() implements the out-of-band + * checks that are needed to get the sta->fast_tx pointer assigned, after which + * much less work can be done per packet. For example, fragmentation must be + * disabled or the fast_tx pointer will not be set. All the conditions are seen + * in the code here. + * + * Once assigned, the fast_tx data structure also caches the per-packet 802.11 + * header and other data to aid packet processing in ieee80211_xmit_fast(). + * + * The most difficult part of this is that when any of these assumptions + * change, an external trigger (i.e. a call to ieee80211_clear_fast_xmit(), + * ieee80211_check_fast_xmit() or friends) is required to reset the data, + * since the per-packet code no longer checks the conditions. This is reflected + * by the calls to these functions throughout the rest of the code, and must be + * maintained if any of the TX path checks change. + */ + +void ieee80211_check_fast_xmit(struct sta_info *sta) +{ + struct ieee80211_fast_tx build = {}, *fast_tx = NULL, *old; + struct ieee80211_local *local = sta->local; + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_hdr *hdr = (void *)build.hdr; + struct ieee80211_chanctx_conf *chanctx_conf; + __le16 fc; + + if (!ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT)) + return; + + if (ieee80211_vif_is_mesh(&sdata->vif)) + mesh_fast_tx_flush_sta(sdata, sta); + + /* Locking here protects both the pointer itself, and against concurrent + * invocations winning data access races to, e.g., the key pointer that + * is used. + * Without it, the invocation of this function right after the key + * pointer changes wouldn't be sufficient, as another CPU could access + * the pointer, then stall, and then do the cache update after the CPU + * that invalidated the key. + * With the locking, such scenarios cannot happen as the check for the + * key and the fast-tx assignment are done atomically, so the CPU that + * modifies the key will either wait or other one will see the key + * cleared/changed already. + */ + spin_lock_bh(&sta->lock); + if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && + !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && + sdata->vif.type == NL80211_IFTYPE_STATION) + goto out; + + if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + goto out; + + if (test_sta_flag(sta, WLAN_STA_PS_STA) || + test_sta_flag(sta, WLAN_STA_PS_DRIVER) || + test_sta_flag(sta, WLAN_STA_PS_DELIVER) || + test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT)) + goto out; + + if (sdata->noack_map) + goto out; + + /* fast-xmit doesn't handle fragmentation at all */ + if (local->hw.wiphy->frag_threshold != (u32)-1 && + !ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG)) + goto out; + + if (!ieee80211_vif_is_mld(&sdata->vif)) { + rcu_read_lock(); + chanctx_conf = + rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + if (!chanctx_conf) { + rcu_read_unlock(); + goto out; + } + build.band = chanctx_conf->def.chan->band; + rcu_read_unlock(); + } else { + /* MLD transmissions must not rely on the band */ + build.band = 0; + } + + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_ADHOC: + /* DA SA BSSID */ + build.da_offs = offsetof(struct ieee80211_hdr, addr1); + build.sa_offs = offsetof(struct ieee80211_hdr, addr2); + memcpy(hdr->addr3, sdata->u.ibss.bssid, ETH_ALEN); + build.hdr_len = 24; + break; + case NL80211_IFTYPE_STATION: + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { + /* For TDLS only one link can be valid with peer STA */ + int tdls_link_id = sta->sta.valid_links ? + __ffs(sta->sta.valid_links) : 0; + struct ieee80211_link_data *link; + + /* DA SA BSSID */ + build.da_offs = offsetof(struct ieee80211_hdr, addr1); + build.sa_offs = offsetof(struct ieee80211_hdr, addr2); + link = rcu_dereference(sdata->link[tdls_link_id]); + if (WARN_ON_ONCE(!link)) + break; + memcpy(hdr->addr3, link->u.mgd.bssid, ETH_ALEN); + build.hdr_len = 24; + break; + } + + if (sdata->u.mgd.use_4addr) { + /* non-regular ethertype cannot use the fastpath */ + fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | + IEEE80211_FCTL_TODS); + /* RA TA DA SA */ + memcpy(hdr->addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN); + memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); + build.da_offs = offsetof(struct ieee80211_hdr, addr3); + build.sa_offs = offsetof(struct ieee80211_hdr, addr4); + build.hdr_len = 30; + break; + } + fc |= cpu_to_le16(IEEE80211_FCTL_TODS); + /* BSSID SA DA */ + memcpy(hdr->addr1, sdata->vif.cfg.ap_addr, ETH_ALEN); + build.da_offs = offsetof(struct ieee80211_hdr, addr3); + build.sa_offs = offsetof(struct ieee80211_hdr, addr2); + build.hdr_len = 24; + break; + case NL80211_IFTYPE_AP_VLAN: + if (sdata->wdev.use_4addr) { + fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | + IEEE80211_FCTL_TODS); + /* RA TA DA SA */ + memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); + memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); + build.da_offs = offsetof(struct ieee80211_hdr, addr3); + build.sa_offs = offsetof(struct ieee80211_hdr, addr4); + build.hdr_len = 30; + break; + } + fallthrough; + case NL80211_IFTYPE_AP: + fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); + /* DA BSSID SA */ + build.da_offs = offsetof(struct ieee80211_hdr, addr1); + if (sta->sta.mlo || !ieee80211_vif_is_mld(&sdata->vif)) { + memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); + } else { + unsigned int link_id = sta->deflink.link_id; + struct ieee80211_link_data *link; + + rcu_read_lock(); + link = rcu_dereference(sdata->link[link_id]); + if (WARN_ON(!link)) { + rcu_read_unlock(); + goto out; + } + memcpy(hdr->addr2, link->conf->addr, ETH_ALEN); + rcu_read_unlock(); + } + build.sa_offs = offsetof(struct ieee80211_hdr, addr3); + build.hdr_len = 24; + break; + default: + /* not handled on fast-xmit */ + goto out; + } + + if (sta->sta.wme) { + build.hdr_len += 2; + fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); + } + + /* We store the key here so there's no point in using rcu_dereference() + * but that's fine because the code that changes the pointers will call + * this function after doing so. For a single CPU that would be enough, + * for multiple see the comment above. + */ + build.key = rcu_access_pointer(sta->ptk[sta->ptk_idx]); + if (!build.key) + build.key = rcu_access_pointer(sdata->default_unicast_key); + if (build.key) { + bool gen_iv, iv_spc, mmic; + + gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV; + iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE; + mmic = build.key->conf.flags & + (IEEE80211_KEY_FLAG_GENERATE_MMIC | + IEEE80211_KEY_FLAG_PUT_MIC_SPACE); + + /* don't handle software crypto */ + if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) + goto out; + + /* Key is being removed */ + if (build.key->flags & KEY_FLAG_TAINTED) + goto out; + + switch (build.key->conf.cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + if (gen_iv) + build.pn_offs = build.hdr_len; + if (gen_iv || iv_spc) + build.hdr_len += IEEE80211_CCMP_HDR_LEN; + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + if (gen_iv) + build.pn_offs = build.hdr_len; + if (gen_iv || iv_spc) + build.hdr_len += IEEE80211_GCMP_HDR_LEN; + break; + case WLAN_CIPHER_SUITE_TKIP: + /* cannot handle MMIC or IV generation in xmit-fast */ + if (mmic || gen_iv) + goto out; + if (iv_spc) + build.hdr_len += IEEE80211_TKIP_IV_LEN; + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + /* cannot handle IV generation in fast-xmit */ + if (gen_iv) + goto out; + if (iv_spc) + build.hdr_len += IEEE80211_WEP_IV_LEN; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + WARN(1, + "management cipher suite 0x%x enabled for data\n", + build.key->conf.cipher); + goto out; + default: + /* we don't know how to generate IVs for this at all */ + if (WARN_ON(gen_iv)) + goto out; + } + + fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); + } + + hdr->frame_control = fc; + + memcpy(build.hdr + build.hdr_len, + rfc1042_header, sizeof(rfc1042_header)); + build.hdr_len += sizeof(rfc1042_header); + + fast_tx = kmemdup(&build, sizeof(build), GFP_ATOMIC); + /* if the kmemdup fails, continue w/o fast_tx */ + + out: + /* we might have raced against another call to this function */ + old = rcu_dereference_protected(sta->fast_tx, + lockdep_is_held(&sta->lock)); + rcu_assign_pointer(sta->fast_tx, fast_tx); + if (old) + kfree_rcu(old, rcu_head); + spin_unlock_bh(&sta->lock); +} + +void ieee80211_check_fast_xmit_all(struct ieee80211_local *local) +{ + struct sta_info *sta; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &local->sta_list, list) + ieee80211_check_fast_xmit(sta); + rcu_read_unlock(); +} + +void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + rcu_read_lock(); + + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sdata != sta->sdata && + (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) + continue; + ieee80211_check_fast_xmit(sta); + } + + rcu_read_unlock(); +} + +void ieee80211_clear_fast_xmit(struct sta_info *sta) +{ + struct ieee80211_fast_tx *fast_tx; + + spin_lock_bh(&sta->lock); + fast_tx = rcu_dereference_protected(sta->fast_tx, + lockdep_is_held(&sta->lock)); + RCU_INIT_POINTER(sta->fast_tx, NULL); + spin_unlock_bh(&sta->lock); + + if (fast_tx) + kfree_rcu(fast_tx, rcu_head); +} + +static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local, + struct sk_buff *skb, int headroom) +{ + if (skb_headroom(skb) < headroom) { + I802_DEBUG_INC(local->tx_expand_skb_head); + + if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { + wiphy_debug(local->hw.wiphy, + "failed to reallocate TX buffer\n"); + return false; + } + } + + return true; +} + +static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata, + struct ieee80211_fast_tx *fast_tx, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr; + struct ethhdr *amsdu_hdr; + int hdr_len = fast_tx->hdr_len - sizeof(rfc1042_header); + int subframe_len = skb->len - hdr_len; + void *data; + u8 *qc, *h_80211_src, *h_80211_dst; + const u8 *bssid; + + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + return false; + + if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) + return true; + + if (!ieee80211_amsdu_realloc_pad(local, skb, + sizeof(*amsdu_hdr) + + local->hw.extra_tx_headroom)) + return false; + + data = skb_push(skb, sizeof(*amsdu_hdr)); + memmove(data, data + sizeof(*amsdu_hdr), hdr_len); + hdr = data; + amsdu_hdr = data + hdr_len; + /* h_80211_src/dst is addr* field within hdr */ + h_80211_src = data + fast_tx->sa_offs; + h_80211_dst = data + fast_tx->da_offs; + + amsdu_hdr->h_proto = cpu_to_be16(subframe_len); + ether_addr_copy(amsdu_hdr->h_source, h_80211_src); + ether_addr_copy(amsdu_hdr->h_dest, h_80211_dst); + + /* according to IEEE 802.11-2012 8.3.2 table 8-19, the outer SA/DA + * fields needs to be changed to BSSID for A-MSDU frames depending + * on FromDS/ToDS values. + */ + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + bssid = sdata->vif.cfg.ap_addr; + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + bssid = sdata->vif.addr; + break; + default: + bssid = NULL; + } + + if (bssid && ieee80211_has_fromds(hdr->frame_control)) + ether_addr_copy(h_80211_src, bssid); + + if (bssid && ieee80211_has_tods(hdr->frame_control)) + ether_addr_copy(h_80211_dst, bssid); + + qc = ieee80211_get_qos_ctl(hdr); + *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; + + info->control.flags |= IEEE80211_TX_CTRL_AMSDU; + + return true; +} + +static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_fast_tx *fast_tx, + struct sk_buff *skb, + const u8 *da, const u8 *sa) +{ + struct ieee80211_local *local = sdata->local; + struct fq *fq = &local->fq; + struct fq_tin *tin; + struct fq_flow *flow; + u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; + struct ieee80211_txq *txq = sta->sta.txq[tid]; + struct txq_info *txqi; + struct sk_buff **frag_tail, *head; + int subframe_len = skb->len - ETH_ALEN; + u8 max_subframes = sta->sta.max_amsdu_subframes; + int max_frags = local->hw.max_tx_fragments; + int max_amsdu_len = sta->sta.cur->max_amsdu_len; + int orig_truesize; + u32 flow_idx; + __be16 len; + void *data; + bool ret = false; + unsigned int orig_len; + int n = 2, nfrags, pad = 0; + u16 hdrlen; + + if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) + return false; + + if (sdata->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) + return false; + + if (ieee80211_vif_is_mesh(&sdata->vif)) + return false; + + if (skb_is_gso(skb)) + return false; + + if (!txq) + return false; + + txqi = to_txq_info(txq); + if (test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags)) + return false; + + if (sta->sta.cur->max_rc_amsdu_len) + max_amsdu_len = min_t(int, max_amsdu_len, + sta->sta.cur->max_rc_amsdu_len); + + if (sta->sta.cur->max_tid_amsdu_len[tid]) + max_amsdu_len = min_t(int, max_amsdu_len, + sta->sta.cur->max_tid_amsdu_len[tid]); + + flow_idx = fq_flow_idx(fq, skb); + + spin_lock_bh(&fq->lock); + + /* TODO: Ideally aggregation should be done on dequeue to remain + * responsive to environment changes. + */ + + tin = &txqi->tin; + flow = fq_flow_classify(fq, tin, flow_idx, skb); + head = skb_peek_tail(&flow->queue); + if (!head || skb_is_gso(head)) + goto out; + + orig_truesize = head->truesize; + orig_len = head->len; + + if (skb->len + head->len > max_amsdu_len) + goto out; + + nfrags = 1 + skb_shinfo(skb)->nr_frags; + nfrags += 1 + skb_shinfo(head)->nr_frags; + frag_tail = &skb_shinfo(head)->frag_list; + while (*frag_tail) { + nfrags += 1 + skb_shinfo(*frag_tail)->nr_frags; + frag_tail = &(*frag_tail)->next; + n++; + } + + if (max_subframes && n > max_subframes) + goto out; + + if (max_frags && nfrags > max_frags) + goto out; + + if (!drv_can_aggregate_in_amsdu(local, head, skb)) + goto out; + + if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) + goto out; + + /* If n == 2, the "while (*frag_tail)" loop above didn't execute + * and frag_tail should be &skb_shinfo(head)->frag_list. + * However, ieee80211_amsdu_prepare_head() can reallocate it. + * Reload frag_tail to have it pointing to the correct place. + */ + if (n == 2) + frag_tail = &skb_shinfo(head)->frag_list; + + /* + * Pad out the previous subframe to a multiple of 4 by adding the + * padding to the next one, that's being added. Note that head->len + * is the length of the full A-MSDU, but that works since each time + * we add a new subframe we pad out the previous one to a multiple + * of 4 and thus it no longer matters in the next round. + */ + hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header); + if ((head->len - hdrlen) & 3) + pad = 4 - ((head->len - hdrlen) & 3); + + if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + + 2 + pad)) + goto out_recalc; + + ret = true; + data = skb_push(skb, ETH_ALEN + 2); + ether_addr_copy(data, da); + ether_addr_copy(data + ETH_ALEN, sa); + + data += 2 * ETH_ALEN; + len = cpu_to_be16(subframe_len); + memcpy(data, &len, 2); + memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header)); + + memset(skb_push(skb, pad), 0, pad); + + head->len += skb->len; + head->data_len += skb->len; + *frag_tail = skb; + +out_recalc: + fq->memory_usage += head->truesize - orig_truesize; + if (head->len != orig_len) { + flow->backlog += head->len - orig_len; + tin->backlog_bytes += head->len - orig_len; + } +out: + spin_unlock_bh(&fq->lock); + + return ret; +} + +/* + * Can be called while the sta lock is held. Anything that can cause packets to + * be generated will cause deadlock! + */ +static ieee80211_tx_result +ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, u8 pn_offs, + struct ieee80211_key *key, + struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb = tx->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; + u8 tid = IEEE80211_NUM_TIDS; + + if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL) && + ieee80211_tx_h_rate_ctrl(tx) != TX_CONTINUE) + return TX_DROP; + + if (key) + info->control.hw_key = &key->conf; + + dev_sw_netstats_tx_add(skb->dev, 1, skb->len); + + if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { + tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; + hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); + } else { + info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; + hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number); + sdata->sequence_number += 0x10; + } + + if (skb_shinfo(skb)->gso_size) + sta->deflink.tx_stats.msdu[tid] += + DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size); + else + sta->deflink.tx_stats.msdu[tid]++; + + info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; + + /* statistics normally done by ieee80211_tx_h_stats (but that + * has to consider fragmentation, so is more complex) + */ + sta->deflink.tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len; + sta->deflink.tx_stats.packets[skb_get_queue_mapping(skb)]++; + + if (pn_offs) { + u64 pn; + u8 *crypto_hdr = skb->data + pn_offs; + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + pn = atomic64_inc_return(&key->conf.tx_pn); + crypto_hdr[0] = pn; + crypto_hdr[1] = pn >> 8; + crypto_hdr[3] = 0x20 | (key->conf.keyidx << 6); + crypto_hdr[4] = pn >> 16; + crypto_hdr[5] = pn >> 24; + crypto_hdr[6] = pn >> 32; + crypto_hdr[7] = pn >> 40; + break; + } + } + + return TX_CONTINUE; +} + +static netdev_features_t +ieee80211_sdata_netdev_features(struct ieee80211_sub_if_data *sdata) +{ + if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN) + return sdata->vif.netdev_features; + + if (!sdata->bss) + return 0; + + sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); + return sdata->vif.netdev_features; +} + +static struct sk_buff * +ieee80211_tx_skb_fixup(struct sk_buff *skb, netdev_features_t features) +{ + if (skb_is_gso(skb)) { + struct sk_buff *segs; + + segs = skb_gso_segment(skb, features); + if (!segs) + return skb; + if (IS_ERR(segs)) + goto free; + + consume_skb(skb); + return segs; + } + + if (skb_needs_linearize(skb, features) && __skb_linearize(skb)) + goto free; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + int ofs = skb_checksum_start_offset(skb); + + if (skb->encapsulation) + skb_set_inner_transport_header(skb, ofs); + else + skb_set_transport_header(skb, ofs); + + if (skb_csum_hwoffload_help(skb, features)) + goto free; + } + + skb_mark_not_on_list(skb); + return skb; + +free: + kfree_skb(skb); + return NULL; +} + +void __ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_fast_tx *fast_tx, + struct sk_buff *skb, bool ampdu, + const u8 *da, const u8 *sa) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_hdr *hdr = (void *)fast_tx->hdr; + struct ieee80211_tx_info *info; + struct ieee80211_tx_data tx; + ieee80211_tx_result r; + int hw_headroom = sdata->local->hw.extra_tx_headroom; + int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2); + + skb = skb_share_check(skb, GFP_ATOMIC); + if (unlikely(!skb)) + return; + + if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) && + ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb, da, sa)) + return; + + /* will not be crypto-handled beyond what we do here, so use false + * as the may-encrypt argument for the resize to not account for + * more room than we already have in 'extra_head' + */ + if (unlikely(ieee80211_skb_resize(sdata, skb, + max_t(int, extra_head + hw_headroom - + skb_headroom(skb), 0), + ENCRYPT_NO))) + goto free; + + hdr = skb_push(skb, extra_head); + memcpy(skb->data, fast_tx->hdr, fast_tx->hdr_len); + memcpy(skb->data + fast_tx->da_offs, da, ETH_ALEN); + memcpy(skb->data + fast_tx->sa_offs, sa, ETH_ALEN); + + info = IEEE80211_SKB_CB(skb); + memset(info, 0, sizeof(*info)); + info->band = fast_tx->band; + info->control.vif = &sdata->vif; + info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT | + IEEE80211_TX_CTL_DONTFRAG; + info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT | + u32_encode_bits(IEEE80211_LINK_UNSPECIFIED, + IEEE80211_TX_CTRL_MLO_LINK); + +#ifdef CONFIG_MAC80211_DEBUGFS + if (local->force_tx_status) + info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; +#endif + + if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { + u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; + + *ieee80211_get_qos_ctl(hdr) = tid; + } + + __skb_queue_head_init(&tx.skbs); + + tx.flags = IEEE80211_TX_UNICAST; + tx.local = local; + tx.sdata = sdata; + tx.sta = sta; + tx.key = fast_tx->key; + + if (ieee80211_queue_skb(local, sdata, sta, skb)) + return; + + tx.skb = skb; + r = ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs, + fast_tx->key, &tx); + tx.skb = NULL; + if (r == TX_DROP) + goto free; + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = container_of(sdata->bss, + struct ieee80211_sub_if_data, u.ap); + + __skb_queue_tail(&tx.skbs, skb); + ieee80211_tx_frags(local, &sdata->vif, sta, &tx.skbs, false); + return; + +free: + kfree_skb(skb); +} + +static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_fast_tx *fast_tx, + struct sk_buff *skb) +{ + u16 ethertype = (skb->data[12] << 8) | skb->data[13]; + struct ieee80211_hdr *hdr = (void *)fast_tx->hdr; + struct tid_ampdu_tx *tid_tx = NULL; + struct sk_buff *next; + struct ethhdr eth; + u8 tid = IEEE80211_NUM_TIDS; + + /* control port protocol needs a lot of special handling */ + if (cpu_to_be16(ethertype) == sdata->control_port_protocol) + return false; + + /* only RFC 1042 SNAP */ + if (ethertype < ETH_P_802_3_MIN) + return false; + + /* don't handle TX status request here either */ + if (skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS) + return false; + + if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { + tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; + tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); + if (tid_tx) { + if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) + return false; + if (tid_tx->timeout) + tid_tx->last_tx = jiffies; + } + } + + memcpy(ð, skb->data, ETH_HLEN - 2); + + /* after this point (skb is modified) we cannot return false */ + skb = ieee80211_tx_skb_fixup(skb, ieee80211_sdata_netdev_features(sdata)); + if (!skb) + return true; + + skb_list_walk_safe(skb, skb, next) { + skb_mark_not_on_list(skb); + __ieee80211_xmit_fast(sdata, sta, fast_tx, skb, tid_tx, + eth.h_dest, eth.h_source); + } + + return true; +} + +struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct txq_info *txqi = container_of(txq, struct txq_info, txq); + struct ieee80211_hdr *hdr; + struct sk_buff *skb = NULL; + struct fq *fq = &local->fq; + struct fq_tin *tin = &txqi->tin; + struct ieee80211_tx_info *info; + struct ieee80211_tx_data tx; + ieee80211_tx_result r; + struct ieee80211_vif *vif = txq->vif; + int q = vif->hw_queue[txq->ac]; + unsigned long flags; + bool q_stopped; + + WARN_ON_ONCE(softirq_count() == 0); + + if (!ieee80211_txq_airtime_check(hw, txq)) + return NULL; + +begin: + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + q_stopped = local->queue_stop_reasons[q]; + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + + if (unlikely(q_stopped)) { + /* mark for waking later */ + set_bit(IEEE80211_TXQ_DIRTY, &txqi->flags); + return NULL; + } + + spin_lock_bh(&fq->lock); + + /* Make sure fragments stay together. */ + skb = __skb_dequeue(&txqi->frags); + if (unlikely(skb)) { + if (!(IEEE80211_SKB_CB(skb)->control.flags & + IEEE80211_TX_INTCFL_NEED_TXPROCESSING)) + goto out; + IEEE80211_SKB_CB(skb)->control.flags &= + ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING; + } else { + if (unlikely(test_bit(IEEE80211_TXQ_STOP, &txqi->flags))) + goto out; + + skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func); + } + + if (!skb) + goto out; + + spin_unlock_bh(&fq->lock); + + hdr = (struct ieee80211_hdr *)skb->data; + info = IEEE80211_SKB_CB(skb); + + memset(&tx, 0, sizeof(tx)); + __skb_queue_head_init(&tx.skbs); + tx.local = local; + tx.skb = skb; + tx.sdata = vif_to_sdata(info->control.vif); + + if (txq->sta) { + tx.sta = container_of(txq->sta, struct sta_info, sta); + /* + * Drop unicast frames to unauthorised stations unless they are + * injected frames or EAPOL frames from the local station. + */ + if (unlikely(!(info->flags & IEEE80211_TX_CTL_INJECTED) && + ieee80211_is_data(hdr->frame_control) && + !ieee80211_vif_is_mesh(&tx.sdata->vif) && + tx.sdata->vif.type != NL80211_IFTYPE_OCB && + !is_multicast_ether_addr(hdr->addr1) && + !test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) && + (!(info->control.flags & + IEEE80211_TX_CTRL_PORT_CTRL_PROTO) || + !ieee80211_is_our_addr(tx.sdata, hdr->addr2, + NULL)))) { + I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); + ieee80211_free_txskb(&local->hw, skb); + goto begin; + } + } + + /* + * The key can be removed while the packet was queued, so need to call + * this here to get the current key. + */ + r = ieee80211_tx_h_select_key(&tx); + if (r != TX_CONTINUE) { + ieee80211_free_txskb(&local->hw, skb); + goto begin; + } + + if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags)) + info->flags |= (IEEE80211_TX_CTL_AMPDU | + IEEE80211_TX_CTL_DONTFRAG); + + if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { + if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) { + r = ieee80211_tx_h_rate_ctrl(&tx); + if (r != TX_CONTINUE) { + ieee80211_free_txskb(&local->hw, skb); + goto begin; + } + } + goto encap_out; + } + + if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) { + struct sta_info *sta = container_of(txq->sta, struct sta_info, + sta); + u8 pn_offs = 0; + + if (tx.key && + (tx.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) + pn_offs = ieee80211_hdrlen(hdr->frame_control); + + r = ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs, + tx.key, &tx); + if (r != TX_CONTINUE) { + ieee80211_free_txskb(&local->hw, skb); + goto begin; + } + } else { + if (invoke_tx_handlers_late(&tx)) + goto begin; + + skb = __skb_dequeue(&tx.skbs); + + if (!skb_queue_empty(&tx.skbs)) { + spin_lock_bh(&fq->lock); + skb_queue_splice_tail(&tx.skbs, &txqi->frags); + spin_unlock_bh(&fq->lock); + } + } + + if (skb_has_frag_list(skb) && + !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) { + if (skb_linearize(skb)) { + ieee80211_free_txskb(&local->hw, skb); + goto begin; + } + } + + switch (tx.sdata->vif.type) { + case NL80211_IFTYPE_MONITOR: + if (tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) { + vif = &tx.sdata->vif; + break; + } + tx.sdata = rcu_dereference(local->monitor_sdata); + if (tx.sdata) { + vif = &tx.sdata->vif; + info->hw_queue = + vif->hw_queue[skb_get_queue_mapping(skb)]; + } else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) { + ieee80211_free_txskb(&local->hw, skb); + goto begin; + } else { + vif = NULL; + } + break; + case NL80211_IFTYPE_AP_VLAN: + tx.sdata = container_of(tx.sdata->bss, + struct ieee80211_sub_if_data, u.ap); + fallthrough; + default: + vif = &tx.sdata->vif; + break; + } + +encap_out: + IEEE80211_SKB_CB(skb)->control.vif = vif; + + if (tx.sta && + wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) { + bool ampdu = txq->ac != IEEE80211_AC_VO; + u32 airtime; + + airtime = ieee80211_calc_expected_tx_airtime(hw, vif, txq->sta, + skb->len, ampdu); + if (airtime) { + airtime = ieee80211_info_set_tx_time_est(info, airtime); + ieee80211_sta_update_pending_airtime(local, tx.sta, + txq->ac, + airtime, + false); + } + } + + return skb; + +out: + spin_unlock_bh(&fq->lock); + + return skb; +} +EXPORT_SYMBOL(ieee80211_tx_dequeue); + +static inline s32 ieee80211_sta_deficit(struct sta_info *sta, u8 ac) +{ + struct airtime_info *air_info = &sta->airtime[ac]; + + return air_info->deficit - atomic_read(&air_info->aql_tx_pending); +} + +static void +ieee80211_txq_set_active(struct txq_info *txqi) +{ + struct sta_info *sta; + + if (!txqi->txq.sta) + return; + + sta = container_of(txqi->txq.sta, struct sta_info, sta); + sta->airtime[txqi->txq.ac].last_active = (u32)jiffies; +} + +static bool +ieee80211_txq_keep_active(struct txq_info *txqi) +{ + struct sta_info *sta; + u32 diff; + + if (!txqi->txq.sta) + return false; + + sta = container_of(txqi->txq.sta, struct sta_info, sta); + if (ieee80211_sta_deficit(sta, txqi->txq.ac) >= 0) + return false; + + diff = (u32)jiffies - sta->airtime[txqi->txq.ac].last_active; + + return diff <= AIRTIME_ACTIVE_DURATION; +} + +struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_txq *ret = NULL; + struct txq_info *txqi = NULL, *head = NULL; + bool found_eligible_txq = false; + + spin_lock_bh(&local->active_txq_lock[ac]); + + if (!local->schedule_round[ac]) + goto out; + + begin: + txqi = list_first_entry_or_null(&local->active_txqs[ac], + struct txq_info, + schedule_order); + if (!txqi) + goto out; + + if (txqi == head) { + if (!found_eligible_txq) + goto out; + else + found_eligible_txq = false; + } + + if (!head) + head = txqi; + + if (txqi->txq.sta) { + struct sta_info *sta = container_of(txqi->txq.sta, + struct sta_info, sta); + bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq); + s32 deficit = ieee80211_sta_deficit(sta, txqi->txq.ac); + + if (aql_check) + found_eligible_txq = true; + + if (deficit < 0) + sta->airtime[txqi->txq.ac].deficit += + sta->airtime_weight; + + if (deficit < 0 || !aql_check) { + list_move_tail(&txqi->schedule_order, + &local->active_txqs[txqi->txq.ac]); + goto begin; + } + } + + if (txqi->schedule_round == local->schedule_round[ac]) + goto out; + + list_del_init(&txqi->schedule_order); + txqi->schedule_round = local->schedule_round[ac]; + ret = &txqi->txq; + +out: + spin_unlock_bh(&local->active_txq_lock[ac]); + return ret; +} +EXPORT_SYMBOL(ieee80211_next_txq); + +void __ieee80211_schedule_txq(struct ieee80211_hw *hw, + struct ieee80211_txq *txq, + bool force) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct txq_info *txqi = to_txq_info(txq); + bool has_queue; + + spin_lock_bh(&local->active_txq_lock[txq->ac]); + + has_queue = force || txq_has_queue(txq); + if (list_empty(&txqi->schedule_order) && + (has_queue || ieee80211_txq_keep_active(txqi))) { + /* If airtime accounting is active, always enqueue STAs at the + * head of the list to ensure that they only get moved to the + * back by the airtime DRR scheduler once they have a negative + * deficit. A station that already has a negative deficit will + * get immediately moved to the back of the list on the next + * call to ieee80211_next_txq(). + */ + if (txqi->txq.sta && local->airtime_flags && has_queue && + wiphy_ext_feature_isset(local->hw.wiphy, + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) + list_add(&txqi->schedule_order, + &local->active_txqs[txq->ac]); + else + list_add_tail(&txqi->schedule_order, + &local->active_txqs[txq->ac]); + if (has_queue) + ieee80211_txq_set_active(txqi); + } + + spin_unlock_bh(&local->active_txq_lock[txq->ac]); +} +EXPORT_SYMBOL(__ieee80211_schedule_txq); + +DEFINE_STATIC_KEY_FALSE(aql_disable); + +bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct sta_info *sta; + struct ieee80211_local *local = hw_to_local(hw); + + if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) + return true; + + if (static_branch_unlikely(&aql_disable)) + return true; + + if (!txq->sta) + return true; + + if (unlikely(txq->tid == IEEE80211_NUM_TIDS)) + return true; + + sta = container_of(txq->sta, struct sta_info, sta); + if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) < + sta->airtime[txq->ac].aql_limit_low) + return true; + + if (atomic_read(&local->aql_total_pending_airtime) < + local->aql_threshold && + atomic_read(&sta->airtime[txq->ac].aql_tx_pending) < + sta->airtime[txq->ac].aql_limit_high) + return true; + + return false; +} +EXPORT_SYMBOL(ieee80211_txq_airtime_check); + +static bool +ieee80211_txq_schedule_airtime_check(struct ieee80211_local *local, u8 ac) +{ + unsigned int num_txq = 0; + struct txq_info *txq; + u32 aql_limit; + + if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) + return true; + + list_for_each_entry(txq, &local->active_txqs[ac], schedule_order) + num_txq++; + + aql_limit = (num_txq - 1) * local->aql_txq_limit_low[ac] / 2 + + local->aql_txq_limit_high[ac]; + + return atomic_read(&local->aql_ac_pending_airtime[ac]) < aql_limit; +} + +bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct txq_info *iter, *tmp, *txqi = to_txq_info(txq); + struct sta_info *sta; + u8 ac = txq->ac; + + spin_lock_bh(&local->active_txq_lock[ac]); + + if (!txqi->txq.sta) + goto out; + + if (list_empty(&txqi->schedule_order)) + goto out; + + if (!ieee80211_txq_schedule_airtime_check(local, ac)) + goto out; + + list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac], + schedule_order) { + if (iter == txqi) + break; + + if (!iter->txq.sta) { + list_move_tail(&iter->schedule_order, + &local->active_txqs[ac]); + continue; + } + sta = container_of(iter->txq.sta, struct sta_info, sta); + if (ieee80211_sta_deficit(sta, ac) < 0) + sta->airtime[ac].deficit += sta->airtime_weight; + list_move_tail(&iter->schedule_order, &local->active_txqs[ac]); + } + + sta = container_of(txqi->txq.sta, struct sta_info, sta); + if (sta->airtime[ac].deficit >= 0) + goto out; + + sta->airtime[ac].deficit += sta->airtime_weight; + list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]); + spin_unlock_bh(&local->active_txq_lock[ac]); + + return false; +out: + if (!list_empty(&txqi->schedule_order)) + list_del_init(&txqi->schedule_order); + spin_unlock_bh(&local->active_txq_lock[ac]); + + return true; +} +EXPORT_SYMBOL(ieee80211_txq_may_transmit); + +void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac) +{ + struct ieee80211_local *local = hw_to_local(hw); + + spin_lock_bh(&local->active_txq_lock[ac]); + + if (ieee80211_txq_schedule_airtime_check(local, ac)) { + local->schedule_round[ac]++; + if (!local->schedule_round[ac]) + local->schedule_round[ac]++; + } else { + local->schedule_round[ac] = 0; + } + + spin_unlock_bh(&local->active_txq_lock[ac]); +} +EXPORT_SYMBOL(ieee80211_txq_schedule_start); + +void __ieee80211_subif_start_xmit(struct sk_buff *skb, + struct net_device *dev, + u32 info_flags, + u32 ctrl_flags, + u64 *cookie) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + struct sk_buff *next; + int len = skb->len; + + if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) { + kfree_skb(skb); + return; + } + + sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift); + + rcu_read_lock(); + + if (ieee80211_vif_is_mesh(&sdata->vif) && + ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT) && + ieee80211_mesh_xmit_fast(sdata, skb, ctrl_flags)) + goto out; + + if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) + goto out_free; + + if (IS_ERR(sta)) + sta = NULL; + + skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, sta, skb)); + ieee80211_aggr_check(sdata, sta, skb); + + if (sta) { + struct ieee80211_fast_tx *fast_tx; + + fast_tx = rcu_dereference(sta->fast_tx); + + if (fast_tx && + ieee80211_xmit_fast(sdata, sta, fast_tx, skb)) + goto out; + } + + /* the frame could be fragmented, software-encrypted, and other + * things so we cannot really handle checksum or GSO offload. + * fix it up in software before we handle anything else. + */ + skb = ieee80211_tx_skb_fixup(skb, 0); + if (!skb) { + len = 0; + goto out; + } + + skb_list_walk_safe(skb, skb, next) { + skb_mark_not_on_list(skb); + + if (skb->protocol == sdata->control_port_protocol) + ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP; + + skb = ieee80211_build_hdr(sdata, skb, info_flags, + sta, ctrl_flags, cookie); + if (IS_ERR(skb)) { + kfree_skb_list(next); + goto out; + } + + dev_sw_netstats_tx_add(dev, 1, skb->len); + + ieee80211_xmit(sdata, sta, skb); + } + goto out; + out_free: + kfree_skb(skb); + len = 0; + out: + if (len) + ieee80211_tpt_led_trig_tx(local, len); + rcu_read_unlock(); +} + +static int ieee80211_change_da(struct sk_buff *skb, struct sta_info *sta) +{ + struct ethhdr *eth; + int err; + + err = skb_ensure_writable(skb, ETH_HLEN); + if (unlikely(err)) + return err; + + eth = (void *)skb->data; + ether_addr_copy(eth->h_dest, sta->sta.addr); + + return 0; +} + +static bool ieee80211_multicast_to_unicast(struct sk_buff *skb, + struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + const struct ethhdr *eth = (void *)skb->data; + const struct vlan_ethhdr *ethvlan = (void *)skb->data; + __be16 ethertype; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + if (sdata->u.vlan.sta) + return false; + if (sdata->wdev.use_4addr) + return false; + fallthrough; + case NL80211_IFTYPE_AP: + /* check runtime toggle for this bss */ + if (!sdata->bss->multicast_to_unicast) + return false; + break; + default: + return false; + } + + /* multicast to unicast conversion only for some payload */ + ethertype = eth->h_proto; + if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN) + ethertype = ethvlan->h_vlan_encapsulated_proto; + switch (ethertype) { + case htons(ETH_P_ARP): + case htons(ETH_P_IP): + case htons(ETH_P_IPV6): + break; + default: + return false; + } + + return true; +} + +static void +ieee80211_convert_to_unicast(struct sk_buff *skb, struct net_device *dev, + struct sk_buff_head *queue) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + const struct ethhdr *eth = (struct ethhdr *)skb->data; + struct sta_info *sta, *first = NULL; + struct sk_buff *cloned_skb; + + rcu_read_lock(); + + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sdata != sta->sdata) + /* AP-VLAN mismatch */ + continue; + if (unlikely(ether_addr_equal(eth->h_source, sta->sta.addr))) + /* do not send back to source */ + continue; + if (!first) { + first = sta; + continue; + } + cloned_skb = skb_clone(skb, GFP_ATOMIC); + if (!cloned_skb) + goto multicast; + if (unlikely(ieee80211_change_da(cloned_skb, sta))) { + dev_kfree_skb(cloned_skb); + goto multicast; + } + __skb_queue_tail(queue, cloned_skb); + } + + if (likely(first)) { + if (unlikely(ieee80211_change_da(skb, first))) + goto multicast; + __skb_queue_tail(queue, skb); + } else { + /* no STA connected, drop */ + kfree_skb(skb); + skb = NULL; + } + + goto out; +multicast: + __skb_queue_purge(queue); + __skb_queue_tail(queue, skb); +out: + rcu_read_unlock(); +} + +static void ieee80211_mlo_multicast_tx_one(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u32 ctrl_flags, + unsigned int link_id) +{ + struct sk_buff *out; + + out = skb_copy(skb, GFP_ATOMIC); + if (!out) + return; + + ctrl_flags |= u32_encode_bits(link_id, IEEE80211_TX_CTRL_MLO_LINK); + __ieee80211_subif_start_xmit(out, sdata->dev, 0, ctrl_flags, NULL); +} + +static void ieee80211_mlo_multicast_tx(struct net_device *dev, + struct sk_buff *skb) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + unsigned long links = sdata->vif.active_links; + unsigned int link; + u32 ctrl_flags = IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX; + + if (hweight16(links) == 1) { + ctrl_flags |= u32_encode_bits(__ffs(links), + IEEE80211_TX_CTRL_MLO_LINK); + + __ieee80211_subif_start_xmit(skb, sdata->dev, 0, ctrl_flags, + NULL); + return; + } + + for_each_set_bit(link, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + ieee80211_mlo_multicast_tx_one(sdata, skb, ctrl_flags, link); + ctrl_flags = 0; + } + kfree_skb(skb); +} + +/** + * ieee80211_subif_start_xmit - netif start_xmit function for 802.3 vifs + * @skb: packet to be sent + * @dev: incoming interface + * + * On failure skb will be freed. + */ +netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + const struct ethhdr *eth = (void *)skb->data; + + if (likely(!is_multicast_ether_addr(eth->h_dest))) + goto normal; + + if (unlikely(!ieee80211_sdata_running(sdata))) { + kfree_skb(skb); + return NETDEV_TX_OK; + } + + if (unlikely(ieee80211_multicast_to_unicast(skb, dev))) { + struct sk_buff_head queue; + + __skb_queue_head_init(&queue); + ieee80211_convert_to_unicast(skb, dev, &queue); + while ((skb = __skb_dequeue(&queue))) + __ieee80211_subif_start_xmit(skb, dev, 0, + IEEE80211_TX_CTRL_MLO_LINK_UNSPEC, + NULL); + } else if (ieee80211_vif_is_mld(&sdata->vif) && + sdata->vif.type == NL80211_IFTYPE_AP && + !ieee80211_hw_check(&sdata->local->hw, MLO_MCAST_MULTI_LINK_TX)) { + ieee80211_mlo_multicast_tx(dev, skb); + } else { +normal: + __ieee80211_subif_start_xmit(skb, dev, 0, + IEEE80211_TX_CTRL_MLO_LINK_UNSPEC, + NULL); + } + + return NETDEV_TX_OK; +} + + + +static bool __ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, struct sta_info *sta, + bool txpending) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_tx_control control = {}; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sta *pubsta = NULL; + unsigned long flags; + int q = info->hw_queue; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + + if (local->queue_stop_reasons[q] || + (!txpending && !skb_queue_empty(&local->pending[q]))) { + if (txpending) + skb_queue_head(&local->pending[q], skb); + else + skb_queue_tail(&local->pending[q], skb); + + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + + return false; + } + + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + + if (sta && sta->uploaded) + pubsta = &sta->sta; + + control.sta = pubsta; + + drv_tx(local, &control, skb); + + return true; +} + +static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, struct sta_info *sta, + bool txpending) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *next; + bool ret = true; + + if (ieee80211_queue_skb(local, sdata, sta, skb)) + return true; + + skb_list_walk_safe(skb, skb, next) { + skb_mark_not_on_list(skb); + if (!__ieee80211_tx_8023(sdata, skb, sta, txpending)) + ret = false; + } + + return ret; +} + +static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata, + struct net_device *dev, struct sta_info *sta, + struct ieee80211_key *key, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info; + struct ieee80211_local *local = sdata->local; + struct tid_ampdu_tx *tid_tx; + struct sk_buff *seg, *next; + unsigned int skbs = 0, len = 0; + u16 queue; + u8 tid; + + queue = ieee80211_select_queue(sdata, sta, skb); + skb_set_queue_mapping(skb, queue); + + if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)) && + test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) + goto out_free; + + skb = skb_share_check(skb, GFP_ATOMIC); + if (unlikely(!skb)) + return; + + ieee80211_aggr_check(sdata, sta, skb); + + tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; + tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); + if (tid_tx) { + if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { + /* fall back to non-offload slow path */ + __ieee80211_subif_start_xmit(skb, dev, 0, + IEEE80211_TX_CTRL_MLO_LINK_UNSPEC, + NULL); + return; + } + + if (tid_tx->timeout) + tid_tx->last_tx = jiffies; + } + + skb = ieee80211_tx_skb_fixup(skb, ieee80211_sdata_netdev_features(sdata)); + if (!skb) + return; + + info = IEEE80211_SKB_CB(skb); + memset(info, 0, sizeof(*info)); + + info->hw_queue = sdata->vif.hw_queue[queue]; + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = container_of(sdata->bss, + struct ieee80211_sub_if_data, u.ap); + + info->flags |= IEEE80211_TX_CTL_HW_80211_ENCAP; + info->control.vif = &sdata->vif; + + if (key) + info->control.hw_key = &key->conf; + + skb_list_walk_safe(skb, seg, next) { + skbs++; + len += seg->len; + if (seg != skb) + memcpy(IEEE80211_SKB_CB(seg), info, sizeof(*info)); + } + + if (unlikely(skb->sk && + skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) + info->ack_frame_id = ieee80211_store_ack_skb(local, skb, + &info->flags, NULL); + + dev_sw_netstats_tx_add(dev, skbs, len); + sta->deflink.tx_stats.packets[queue] += skbs; + sta->deflink.tx_stats.bytes[queue] += len; + + ieee80211_tpt_led_trig_tx(local, len); + + ieee80211_tx_8023(sdata, skb, sta, false); + + return; + +out_free: + kfree_skb(skb); +} + +netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb, + struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ethhdr *ehdr = (struct ethhdr *)skb->data; + struct ieee80211_key *key; + struct sta_info *sta; + + if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) { + kfree_skb(skb); + return NETDEV_TX_OK; + } + + rcu_read_lock(); + + if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) { + kfree_skb(skb); + goto out; + } + + if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded || + !test_sta_flag(sta, WLAN_STA_AUTHORIZED) || + sdata->control_port_protocol == ehdr->h_proto)) + goto skip_offload; + + key = rcu_dereference(sta->ptk[sta->ptk_idx]); + if (!key) + key = rcu_dereference(sdata->default_unicast_key); + + if (key && (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) || + key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)) + goto skip_offload; + + sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift); + ieee80211_8023_xmit(sdata, dev, sta, key, skb); + goto out; + +skip_offload: + ieee80211_subif_start_xmit(skb, dev); +out: + rcu_read_unlock(); + + return NETDEV_TX_OK; +} + +struct sk_buff * +ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u32 info_flags) +{ + struct ieee80211_hdr *hdr; + struct ieee80211_tx_data tx = { + .local = sdata->local, + .sdata = sdata, + }; + struct sta_info *sta; + + rcu_read_lock(); + + if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) { + kfree_skb(skb); + skb = ERR_PTR(-EINVAL); + goto out; + } + + skb = ieee80211_build_hdr(sdata, skb, info_flags, sta, + IEEE80211_TX_CTRL_MLO_LINK_UNSPEC, NULL); + if (IS_ERR(skb)) + goto out; + + hdr = (void *)skb->data; + tx.sta = sta_info_get(sdata, hdr->addr1); + tx.skb = skb; + + if (ieee80211_tx_h_select_key(&tx) != TX_CONTINUE) { + rcu_read_unlock(); + kfree_skb(skb); + return ERR_PTR(-EINVAL); + } + +out: + rcu_read_unlock(); + return skb; +} + +/* + * ieee80211_clear_tx_pending may not be called in a context where + * it is possible that it packets could come in again. + */ +void ieee80211_clear_tx_pending(struct ieee80211_local *local) +{ + struct sk_buff *skb; + int i; + + for (i = 0; i < local->hw.queues; i++) { + while ((skb = skb_dequeue(&local->pending[i])) != NULL) + ieee80211_free_txskb(&local->hw, skb); + } +} + +/* + * Returns false if the frame couldn't be transmitted but was queued instead, + * which in this case means re-queued -- take as an indication to stop sending + * more pending frames. + */ +static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + struct ieee80211_hdr *hdr; + bool result; + struct ieee80211_chanctx_conf *chanctx_conf; + + sdata = vif_to_sdata(info->control.vif); + + if (info->control.flags & IEEE80211_TX_INTCFL_NEED_TXPROCESSING) { + /* update band only for non-MLD */ + if (!ieee80211_vif_is_mld(&sdata->vif)) { + chanctx_conf = + rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + if (unlikely(!chanctx_conf)) { + dev_kfree_skb(skb); + return true; + } + info->band = chanctx_conf->def.chan->band; + } + result = ieee80211_tx(sdata, NULL, skb, true); + } else if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { + if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) { + dev_kfree_skb(skb); + return true; + } + + if (IS_ERR(sta) || (sta && !sta->uploaded)) + sta = NULL; + + result = ieee80211_tx_8023(sdata, skb, sta, true); + } else { + struct sk_buff_head skbs; + + __skb_queue_head_init(&skbs); + __skb_queue_tail(&skbs, skb); + + hdr = (struct ieee80211_hdr *)skb->data; + sta = sta_info_get(sdata, hdr->addr1); + + result = __ieee80211_tx(local, &skbs, sta, true); + } + + return result; +} + +/* + * Transmit all pending packets. Called from tasklet. + */ +void ieee80211_tx_pending(struct tasklet_struct *t) +{ + struct ieee80211_local *local = from_tasklet(local, t, + tx_pending_tasklet); + unsigned long flags; + int i; + bool txok; + + rcu_read_lock(); + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + for (i = 0; i < local->hw.queues; i++) { + /* + * If queue is stopped by something other than due to pending + * frames, or we have no pending frames, proceed to next queue. + */ + if (local->queue_stop_reasons[i] || + skb_queue_empty(&local->pending[i])) + continue; + + while (!skb_queue_empty(&local->pending[i])) { + struct sk_buff *skb = __skb_dequeue(&local->pending[i]); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (WARN_ON(!info->control.vif)) { + ieee80211_free_txskb(&local->hw, skb); + continue; + } + + spin_unlock_irqrestore(&local->queue_stop_reason_lock, + flags); + + txok = ieee80211_tx_pending_skb(local, skb); + spin_lock_irqsave(&local->queue_stop_reason_lock, + flags); + if (!txok) + break; + } + } + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + + rcu_read_unlock(); +} + +/* functions for drivers to get certain frames */ + +static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link, + struct ps_data *ps, struct sk_buff *skb, + bool is_template) +{ + u8 *pos, *tim; + int aid0 = 0; + int i, have_bits = 0, n1, n2; + struct ieee80211_bss_conf *link_conf = link->conf; + + /* Generate bitmap for TIM only if there are any STAs in power save + * mode. */ + if (atomic_read(&ps->num_sta_ps) > 0) + /* in the hope that this is faster than + * checking byte-for-byte */ + have_bits = !bitmap_empty((unsigned long *)ps->tim, + IEEE80211_MAX_AID+1); + if (!is_template) { + if (ps->dtim_count == 0) + ps->dtim_count = link_conf->dtim_period - 1; + else + ps->dtim_count--; + } + + tim = pos = skb_put(skb, 5); + *pos++ = WLAN_EID_TIM; + *pos++ = 3; + *pos++ = ps->dtim_count; + *pos++ = link_conf->dtim_period; + + if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf)) + aid0 = 1; + + ps->dtim_bc_mc = aid0 == 1; + + if (have_bits) { + /* Find largest even number N1 so that bits numbered 1 through + * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits + * (N2 + 1) x 8 through 2007 are 0. */ + n1 = 0; + for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) { + if (ps->tim[i]) { + n1 = i & 0xfe; + break; + } + } + n2 = n1; + for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) { + if (ps->tim[i]) { + n2 = i; + break; + } + } + + /* Bitmap control */ + *pos++ = n1 | aid0; + /* Part Virt Bitmap */ + skb_put_data(skb, ps->tim + n1, n2 - n1 + 1); + + tim[1] = n2 - n1 + 4; + } else { + *pos++ = aid0; /* Bitmap control */ + + if (ieee80211_get_link_sband(link)->band != NL80211_BAND_S1GHZ) { + tim[1] = 4; + /* Part Virt Bitmap */ + skb_put_u8(skb, 0); + } + } +} + +static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link, + struct ps_data *ps, struct sk_buff *skb, + bool is_template) +{ + struct ieee80211_local *local = sdata->local; + + /* + * Not very nice, but we want to allow the driver to call + * ieee80211_beacon_get() as a response to the set_tim() + * callback. That, however, is already invoked under the + * sta_lock to guarantee consistent and race-free update + * of the tim bitmap in mac80211 and the driver. + */ + if (local->tim_in_locked_section) { + __ieee80211_beacon_add_tim(sdata, link, ps, skb, is_template); + } else { + spin_lock_bh(&local->tim_lock); + __ieee80211_beacon_add_tim(sdata, link, ps, skb, is_template); + spin_unlock_bh(&local->tim_lock); + } + + return 0; +} + +static void ieee80211_set_beacon_cntdwn(struct ieee80211_sub_if_data *sdata, + struct beacon_data *beacon, + struct ieee80211_link_data *link) +{ + u8 *beacon_data, count, max_count = 1; + struct probe_resp *resp; + size_t beacon_data_len; + u16 *bcn_offsets; + int i; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP: + beacon_data = beacon->tail; + beacon_data_len = beacon->tail_len; + break; + case NL80211_IFTYPE_ADHOC: + beacon_data = beacon->head; + beacon_data_len = beacon->head_len; + break; + case NL80211_IFTYPE_MESH_POINT: + beacon_data = beacon->head; + beacon_data_len = beacon->head_len; + break; + default: + return; + } + + resp = rcu_dereference(link->u.ap.probe_resp); + + bcn_offsets = beacon->cntdwn_counter_offsets; + count = beacon->cntdwn_current_counter; + if (link->conf->csa_active) + max_count = IEEE80211_MAX_CNTDWN_COUNTERS_NUM; + + for (i = 0; i < max_count; ++i) { + if (bcn_offsets[i]) { + if (WARN_ON_ONCE(bcn_offsets[i] >= beacon_data_len)) + return; + beacon_data[bcn_offsets[i]] = count; + } + + if (sdata->vif.type == NL80211_IFTYPE_AP && resp) { + u16 *resp_offsets = resp->cntdwn_counter_offsets; + + resp->data[resp_offsets[i]] = count; + } + } +} + +static u8 __ieee80211_beacon_update_cntdwn(struct beacon_data *beacon) +{ + beacon->cntdwn_current_counter--; + + /* the counter should never reach 0 */ + WARN_ON_ONCE(!beacon->cntdwn_current_counter); + + return beacon->cntdwn_current_counter; +} + +u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct beacon_data *beacon = NULL; + u8 count = 0; + + rcu_read_lock(); + + if (sdata->vif.type == NL80211_IFTYPE_AP) + beacon = rcu_dereference(sdata->deflink.u.ap.beacon); + else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) + beacon = rcu_dereference(sdata->u.ibss.presp); + else if (ieee80211_vif_is_mesh(&sdata->vif)) + beacon = rcu_dereference(sdata->u.mesh.beacon); + + if (!beacon) + goto unlock; + + count = __ieee80211_beacon_update_cntdwn(beacon); + +unlock: + rcu_read_unlock(); + return count; +} +EXPORT_SYMBOL(ieee80211_beacon_update_cntdwn); + +void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct beacon_data *beacon = NULL; + + rcu_read_lock(); + + if (sdata->vif.type == NL80211_IFTYPE_AP) + beacon = rcu_dereference(sdata->deflink.u.ap.beacon); + else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) + beacon = rcu_dereference(sdata->u.ibss.presp); + else if (ieee80211_vif_is_mesh(&sdata->vif)) + beacon = rcu_dereference(sdata->u.mesh.beacon); + + if (!beacon) + goto unlock; + + if (counter < beacon->cntdwn_current_counter) + beacon->cntdwn_current_counter = counter; + +unlock: + rcu_read_unlock(); +} +EXPORT_SYMBOL(ieee80211_beacon_set_cntdwn); + +bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct beacon_data *beacon = NULL; + u8 *beacon_data; + size_t beacon_data_len; + int ret = false; + + if (!ieee80211_sdata_running(sdata)) + return false; + + rcu_read_lock(); + if (vif->type == NL80211_IFTYPE_AP) { + beacon = rcu_dereference(sdata->deflink.u.ap.beacon); + if (WARN_ON(!beacon || !beacon->tail)) + goto out; + beacon_data = beacon->tail; + beacon_data_len = beacon->tail_len; + } else if (vif->type == NL80211_IFTYPE_ADHOC) { + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + + beacon = rcu_dereference(ifibss->presp); + if (!beacon) + goto out; + + beacon_data = beacon->head; + beacon_data_len = beacon->head_len; + } else if (vif->type == NL80211_IFTYPE_MESH_POINT) { + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + beacon = rcu_dereference(ifmsh->beacon); + if (!beacon) + goto out; + + beacon_data = beacon->head; + beacon_data_len = beacon->head_len; + } else { + WARN_ON(1); + goto out; + } + + if (!beacon->cntdwn_counter_offsets[0]) + goto out; + + if (WARN_ON_ONCE(beacon->cntdwn_counter_offsets[0] > beacon_data_len)) + goto out; + + if (beacon_data[beacon->cntdwn_counter_offsets[0]] == 1) + ret = true; + + out: + rcu_read_unlock(); + + return ret; +} +EXPORT_SYMBOL(ieee80211_beacon_cntdwn_is_complete); + +static int ieee80211_beacon_protect(struct sk_buff *skb, + struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link) +{ + ieee80211_tx_result res; + struct ieee80211_tx_data tx; + struct sk_buff *check_skb; + + memset(&tx, 0, sizeof(tx)); + tx.key = rcu_dereference(link->default_beacon_key); + if (!tx.key) + return 0; + + if (unlikely(tx.key->flags & KEY_FLAG_TAINTED)) { + tx.key = NULL; + return -EINVAL; + } + + if (!(tx.key->conf.flags & IEEE80211_KEY_FLAG_SW_MGMT_TX) && + tx.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) + IEEE80211_SKB_CB(skb)->control.hw_key = &tx.key->conf; + + tx.local = local; + tx.sdata = sdata; + __skb_queue_head_init(&tx.skbs); + __skb_queue_tail(&tx.skbs, skb); + res = ieee80211_tx_h_encrypt(&tx); + check_skb = __skb_dequeue(&tx.skbs); + /* we may crash after this, but it'd be a bug in crypto */ + WARN_ON(check_skb != skb); + if (WARN_ON_ONCE(res != TX_CONTINUE)) + return -EINVAL; + + return 0; +} + +static void +ieee80211_beacon_get_finish(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_data *link, + struct ieee80211_mutable_offsets *offs, + struct beacon_data *beacon, + struct sk_buff *skb, + struct ieee80211_chanctx_conf *chanctx_conf, + u16 csa_off_base) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_tx_info *info; + enum nl80211_band band; + struct ieee80211_tx_rate_control txrc; + + /* CSA offsets */ + if (offs && beacon) { + u16 i; + + for (i = 0; i < IEEE80211_MAX_CNTDWN_COUNTERS_NUM; i++) { + u16 csa_off = beacon->cntdwn_counter_offsets[i]; + + if (!csa_off) + continue; + + offs->cntdwn_counter_offs[i] = csa_off_base + csa_off; + } + } + + band = chanctx_conf->def.chan->band; + info = IEEE80211_SKB_CB(skb); + info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + info->flags |= IEEE80211_TX_CTL_NO_ACK; + info->band = band; + + memset(&txrc, 0, sizeof(txrc)); + txrc.hw = hw; + txrc.sband = local->hw.wiphy->bands[band]; + txrc.bss_conf = link->conf; + txrc.skb = skb; + txrc.reported_rate.idx = -1; + if (sdata->beacon_rate_set && sdata->beacon_rateidx_mask[band]) + txrc.rate_idx_mask = sdata->beacon_rateidx_mask[band]; + else + txrc.rate_idx_mask = sdata->rc_rateidx_mask[band]; + txrc.bss = true; + rate_control_get_rate(sdata, NULL, &txrc); + + info->control.vif = vif; + info->control.flags |= u32_encode_bits(link->link_id, + IEEE80211_TX_CTRL_MLO_LINK); + info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT | + IEEE80211_TX_CTL_ASSIGN_SEQ | + IEEE80211_TX_CTL_FIRST_FRAGMENT; +} + +static void +ieee80211_beacon_add_mbssid(struct sk_buff *skb, struct beacon_data *beacon, + u8 i) +{ + if (!beacon->mbssid_ies || !beacon->mbssid_ies->cnt || + i > beacon->mbssid_ies->cnt) + return; + + if (i < beacon->mbssid_ies->cnt) { + skb_put_data(skb, beacon->mbssid_ies->elem[i].data, + beacon->mbssid_ies->elem[i].len); + + if (beacon->rnr_ies && beacon->rnr_ies->cnt) { + skb_put_data(skb, beacon->rnr_ies->elem[i].data, + beacon->rnr_ies->elem[i].len); + + for (i = beacon->mbssid_ies->cnt; i < beacon->rnr_ies->cnt; i++) + skb_put_data(skb, beacon->rnr_ies->elem[i].data, + beacon->rnr_ies->elem[i].len); + } + return; + } + + /* i == beacon->mbssid_ies->cnt, include all MBSSID elements */ + for (i = 0; i < beacon->mbssid_ies->cnt; i++) + skb_put_data(skb, beacon->mbssid_ies->elem[i].data, + beacon->mbssid_ies->elem[i].len); +} + +static struct sk_buff * +ieee80211_beacon_get_ap(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_data *link, + struct ieee80211_mutable_offsets *offs, + bool is_template, + struct beacon_data *beacon, + struct ieee80211_chanctx_conf *chanctx_conf, + u8 ema_index) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_if_ap *ap = &sdata->u.ap; + struct sk_buff *skb = NULL; + u16 csa_off_base = 0; + int mbssid_len; + + if (beacon->cntdwn_counter_offsets[0]) { + if (!is_template) + ieee80211_beacon_update_cntdwn(vif); + + ieee80211_set_beacon_cntdwn(sdata, beacon, link); + } + + /* headroom, head length, + * tail length, maximum TIM length and multiple BSSID length + */ + mbssid_len = ieee80211_get_mbssid_beacon_len(beacon->mbssid_ies, + beacon->rnr_ies, + ema_index); + + skb = dev_alloc_skb(local->tx_headroom + beacon->head_len + + beacon->tail_len + 256 + + local->hw.extra_beacon_tailroom + mbssid_len); + if (!skb) + return NULL; + + skb_reserve(skb, local->tx_headroom); + skb_put_data(skb, beacon->head, beacon->head_len); + + ieee80211_beacon_add_tim(sdata, link, &ap->ps, skb, is_template); + + if (offs) { + offs->tim_offset = beacon->head_len; + offs->tim_length = skb->len - beacon->head_len; + offs->cntdwn_counter_offs[0] = beacon->cntdwn_counter_offsets[0]; + + if (mbssid_len) { + ieee80211_beacon_add_mbssid(skb, beacon, ema_index); + offs->mbssid_off = skb->len - mbssid_len; + } + + /* for AP the csa offsets are from tail */ + csa_off_base = skb->len; + } + + if (beacon->tail) + skb_put_data(skb, beacon->tail, beacon->tail_len); + + if (ieee80211_beacon_protect(skb, local, sdata, link) < 0) + return NULL; + + ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb, + chanctx_conf, csa_off_base); + return skb; +} + +static struct ieee80211_ema_beacons * +ieee80211_beacon_get_ap_ema_list(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_data *link, + struct ieee80211_mutable_offsets *offs, + bool is_template, struct beacon_data *beacon, + struct ieee80211_chanctx_conf *chanctx_conf) +{ + struct ieee80211_ema_beacons *ema = NULL; + + if (!beacon->mbssid_ies || !beacon->mbssid_ies->cnt) + return NULL; + + ema = kzalloc(struct_size(ema, bcn, beacon->mbssid_ies->cnt), + GFP_ATOMIC); + if (!ema) + return NULL; + + for (ema->cnt = 0; ema->cnt < beacon->mbssid_ies->cnt; ema->cnt++) { + ema->bcn[ema->cnt].skb = + ieee80211_beacon_get_ap(hw, vif, link, + &ema->bcn[ema->cnt].offs, + is_template, beacon, + chanctx_conf, ema->cnt); + if (!ema->bcn[ema->cnt].skb) + break; + } + + if (ema->cnt == beacon->mbssid_ies->cnt) + return ema; + + ieee80211_beacon_free_ema_list(ema); + return NULL; +} + +#define IEEE80211_INCLUDE_ALL_MBSSID_ELEMS -1 + +static struct sk_buff * +__ieee80211_beacon_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_mutable_offsets *offs, + bool is_template, + unsigned int link_id, + int ema_index, + struct ieee80211_ema_beacons **ema_beacons) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct beacon_data *beacon = NULL; + struct sk_buff *skb = NULL; + struct ieee80211_sub_if_data *sdata = NULL; + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_link_data *link; + + rcu_read_lock(); + + sdata = vif_to_sdata(vif); + link = rcu_dereference(sdata->link[link_id]); + if (!link) + goto out; + chanctx_conf = + rcu_dereference(link->conf->chanctx_conf); + + if (!ieee80211_sdata_running(sdata) || !chanctx_conf) + goto out; + + if (offs) + memset(offs, 0, sizeof(*offs)); + + if (sdata->vif.type == NL80211_IFTYPE_AP) { + beacon = rcu_dereference(link->u.ap.beacon); + if (!beacon) + goto out; + + if (ema_beacons) { + *ema_beacons = + ieee80211_beacon_get_ap_ema_list(hw, vif, link, + offs, + is_template, + beacon, + chanctx_conf); + } else { + if (beacon->mbssid_ies && beacon->mbssid_ies->cnt) { + if (ema_index >= beacon->mbssid_ies->cnt) + goto out; /* End of MBSSID elements */ + + if (ema_index <= IEEE80211_INCLUDE_ALL_MBSSID_ELEMS) + ema_index = beacon->mbssid_ies->cnt; + } else { + ema_index = 0; + } + + skb = ieee80211_beacon_get_ap(hw, vif, link, offs, + is_template, beacon, + chanctx_conf, + ema_index); + } + } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_hdr *hdr; + + beacon = rcu_dereference(ifibss->presp); + if (!beacon) + goto out; + + if (beacon->cntdwn_counter_offsets[0]) { + if (!is_template) + __ieee80211_beacon_update_cntdwn(beacon); + + ieee80211_set_beacon_cntdwn(sdata, beacon, link); + } + + skb = dev_alloc_skb(local->tx_headroom + beacon->head_len + + local->hw.extra_beacon_tailroom); + if (!skb) + goto out; + skb_reserve(skb, local->tx_headroom); + skb_put_data(skb, beacon->head, beacon->head_len); + + hdr = (struct ieee80211_hdr *) skb->data; + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_BEACON); + + ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb, + chanctx_conf, 0); + } else if (ieee80211_vif_is_mesh(&sdata->vif)) { + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + beacon = rcu_dereference(ifmsh->beacon); + if (!beacon) + goto out; + + if (beacon->cntdwn_counter_offsets[0]) { + if (!is_template) + /* TODO: For mesh csa_counter is in TU, so + * decrementing it by one isn't correct, but + * for now we leave it consistent with overall + * mac80211's behavior. + */ + __ieee80211_beacon_update_cntdwn(beacon); + + ieee80211_set_beacon_cntdwn(sdata, beacon, link); + } + + if (ifmsh->sync_ops) + ifmsh->sync_ops->adjust_tsf(sdata, beacon); + + skb = dev_alloc_skb(local->tx_headroom + + beacon->head_len + + 256 + /* TIM IE */ + beacon->tail_len + + local->hw.extra_beacon_tailroom); + if (!skb) + goto out; + skb_reserve(skb, local->tx_headroom); + skb_put_data(skb, beacon->head, beacon->head_len); + ieee80211_beacon_add_tim(sdata, link, &ifmsh->ps, skb, + is_template); + + if (offs) { + offs->tim_offset = beacon->head_len; + offs->tim_length = skb->len - beacon->head_len; + } + + skb_put_data(skb, beacon->tail, beacon->tail_len); + ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb, + chanctx_conf, 0); + } else { + WARN_ON(1); + goto out; + } + + out: + rcu_read_unlock(); + return skb; + +} + +struct sk_buff * +ieee80211_beacon_get_template(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_mutable_offsets *offs, + unsigned int link_id) +{ + return __ieee80211_beacon_get(hw, vif, offs, true, link_id, + IEEE80211_INCLUDE_ALL_MBSSID_ELEMS, NULL); +} +EXPORT_SYMBOL(ieee80211_beacon_get_template); + +struct sk_buff * +ieee80211_beacon_get_template_ema_index(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_mutable_offsets *offs, + unsigned int link_id, u8 ema_index) +{ + return __ieee80211_beacon_get(hw, vif, offs, true, link_id, ema_index, + NULL); +} +EXPORT_SYMBOL(ieee80211_beacon_get_template_ema_index); + +void ieee80211_beacon_free_ema_list(struct ieee80211_ema_beacons *ema_beacons) +{ + u8 i; + + if (!ema_beacons) + return; + + for (i = 0; i < ema_beacons->cnt; i++) + kfree_skb(ema_beacons->bcn[i].skb); + + kfree(ema_beacons); +} +EXPORT_SYMBOL(ieee80211_beacon_free_ema_list); + +struct ieee80211_ema_beacons * +ieee80211_beacon_get_template_ema_list(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + unsigned int link_id) +{ + struct ieee80211_ema_beacons *ema_beacons = NULL; + + WARN_ON(__ieee80211_beacon_get(hw, vif, NULL, true, link_id, 0, + &ema_beacons)); + + return ema_beacons; +} +EXPORT_SYMBOL(ieee80211_beacon_get_template_ema_list); + +struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u16 *tim_offset, u16 *tim_length, + unsigned int link_id) +{ + struct ieee80211_mutable_offsets offs = {}; + struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false, + link_id, + IEEE80211_INCLUDE_ALL_MBSSID_ELEMS, + NULL); + struct sk_buff *copy; + int shift; + + if (!bcn) + return bcn; + + if (tim_offset) + *tim_offset = offs.tim_offset; + + if (tim_length) + *tim_length = offs.tim_length; + + if (ieee80211_hw_check(hw, BEACON_TX_STATUS) || + !hw_to_local(hw)->monitors) + return bcn; + + /* send a copy to monitor interfaces */ + copy = skb_copy(bcn, GFP_ATOMIC); + if (!copy) + return bcn; + + shift = ieee80211_vif_get_shift(vif); + ieee80211_tx_monitor(hw_to_local(hw), copy, 1, shift, false, NULL); + + return bcn; +} +EXPORT_SYMBOL(ieee80211_beacon_get_tim); + +struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct sk_buff *skb = NULL; + struct probe_resp *presp = NULL; + struct ieee80211_hdr *hdr; + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + if (sdata->vif.type != NL80211_IFTYPE_AP) + return NULL; + + rcu_read_lock(); + presp = rcu_dereference(sdata->deflink.u.ap.probe_resp); + if (!presp) + goto out; + + skb = dev_alloc_skb(presp->len); + if (!skb) + goto out; + + skb_put_data(skb, presp->data, presp->len); + + hdr = (struct ieee80211_hdr *) skb->data; + memset(hdr->addr1, 0, sizeof(hdr->addr1)); + +out: + rcu_read_unlock(); + return skb; +} +EXPORT_SYMBOL(ieee80211_proberesp_get); + +struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct sk_buff *skb = NULL; + struct fils_discovery_data *tmpl = NULL; + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + if (sdata->vif.type != NL80211_IFTYPE_AP) + return NULL; + + rcu_read_lock(); + tmpl = rcu_dereference(sdata->deflink.u.ap.fils_discovery); + if (!tmpl) { + rcu_read_unlock(); + return NULL; + } + + skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + tmpl->len); + if (skb) { + skb_reserve(skb, sdata->local->hw.extra_tx_headroom); + skb_put_data(skb, tmpl->data, tmpl->len); + } + + rcu_read_unlock(); + return skb; +} +EXPORT_SYMBOL(ieee80211_get_fils_discovery_tmpl); + +struct sk_buff * +ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct sk_buff *skb = NULL; + struct unsol_bcast_probe_resp_data *tmpl = NULL; + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + if (sdata->vif.type != NL80211_IFTYPE_AP) + return NULL; + + rcu_read_lock(); + tmpl = rcu_dereference(sdata->deflink.u.ap.unsol_bcast_probe_resp); + if (!tmpl) { + rcu_read_unlock(); + return NULL; + } + + skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + tmpl->len); + if (skb) { + skb_reserve(skb, sdata->local->hw.extra_tx_headroom); + skb_put_data(skb, tmpl->data, tmpl->len); + } + + rcu_read_unlock(); + return skb; +} +EXPORT_SYMBOL(ieee80211_get_unsol_bcast_probe_resp_tmpl); + +struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_pspoll *pspoll; + struct ieee80211_local *local; + struct sk_buff *skb; + + if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) + return NULL; + + sdata = vif_to_sdata(vif); + local = sdata->local; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll)); + if (!skb) + return NULL; + + skb_reserve(skb, local->hw.extra_tx_headroom); + + pspoll = skb_put_zero(skb, sizeof(*pspoll)); + pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | + IEEE80211_STYPE_PSPOLL); + pspoll->aid = cpu_to_le16(sdata->vif.cfg.aid); + + /* aid in PS-Poll has its two MSBs each set to 1 */ + pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14); + + memcpy(pspoll->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN); + memcpy(pspoll->ta, vif->addr, ETH_ALEN); + + return skb; +} +EXPORT_SYMBOL(ieee80211_pspoll_get); + +struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + int link_id, bool qos_ok) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + struct ieee80211_link_data *link = NULL; + struct ieee80211_hdr_3addr *nullfunc; + struct sk_buff *skb; + bool qos = false; + + if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) + return NULL; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + + sizeof(*nullfunc) + 2); + if (!skb) + return NULL; + + rcu_read_lock(); + if (qos_ok) { + struct sta_info *sta; + + sta = sta_info_get(sdata, vif->cfg.ap_addr); + qos = sta && sta->sta.wme; + } + + if (link_id >= 0) { + link = rcu_dereference(sdata->link[link_id]); + if (WARN_ON_ONCE(!link)) { + rcu_read_unlock(); + kfree_skb(skb); + return NULL; + } + } + + skb_reserve(skb, local->hw.extra_tx_headroom); + + nullfunc = skb_put_zero(skb, sizeof(*nullfunc)); + nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_NULLFUNC | + IEEE80211_FCTL_TODS); + if (qos) { + __le16 qoshdr = cpu_to_le16(7); + + BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC | + IEEE80211_STYPE_NULLFUNC) != + IEEE80211_STYPE_QOS_NULLFUNC); + nullfunc->frame_control |= + cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC); + skb->priority = 7; + skb_set_queue_mapping(skb, IEEE80211_AC_VO); + skb_put_data(skb, &qoshdr, sizeof(qoshdr)); + } + + if (link) { + memcpy(nullfunc->addr1, link->conf->bssid, ETH_ALEN); + memcpy(nullfunc->addr2, link->conf->addr, ETH_ALEN); + memcpy(nullfunc->addr3, link->conf->bssid, ETH_ALEN); + } else { + memcpy(nullfunc->addr1, vif->cfg.ap_addr, ETH_ALEN); + memcpy(nullfunc->addr2, vif->addr, ETH_ALEN); + memcpy(nullfunc->addr3, vif->cfg.ap_addr, ETH_ALEN); + } + rcu_read_unlock(); + + return skb; +} +EXPORT_SYMBOL(ieee80211_nullfunc_get); + +struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, + const u8 *src_addr, + const u8 *ssid, size_t ssid_len, + size_t tailroom) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_hdr_3addr *hdr; + struct sk_buff *skb; + size_t ie_ssid_len; + u8 *pos; + + ie_ssid_len = 2 + ssid_len; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) + + ie_ssid_len + tailroom); + if (!skb) + return NULL; + + skb_reserve(skb, local->hw.extra_tx_headroom); + + hdr = skb_put_zero(skb, sizeof(*hdr)); + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_REQ); + eth_broadcast_addr(hdr->addr1); + memcpy(hdr->addr2, src_addr, ETH_ALEN); + eth_broadcast_addr(hdr->addr3); + + pos = skb_put(skb, ie_ssid_len); + *pos++ = WLAN_EID_SSID; + *pos++ = ssid_len; + if (ssid_len) + memcpy(pos, ssid, ssid_len); + pos += ssid_len; + + return skb; +} +EXPORT_SYMBOL(ieee80211_probereq_get); + +void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const void *frame, size_t frame_len, + const struct ieee80211_tx_info *frame_txctl, + struct ieee80211_rts *rts) +{ + const struct ieee80211_hdr *hdr = frame; + + rts->frame_control = + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); + rts->duration = ieee80211_rts_duration(hw, vif, frame_len, + frame_txctl); + memcpy(rts->ra, hdr->addr1, sizeof(rts->ra)); + memcpy(rts->ta, hdr->addr2, sizeof(rts->ta)); +} +EXPORT_SYMBOL(ieee80211_rts_get); + +void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const void *frame, size_t frame_len, + const struct ieee80211_tx_info *frame_txctl, + struct ieee80211_cts *cts) +{ + const struct ieee80211_hdr *hdr = frame; + + cts->frame_control = + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); + cts->duration = ieee80211_ctstoself_duration(hw, vif, + frame_len, frame_txctl); + memcpy(cts->ra, hdr->addr1, sizeof(cts->ra)); +} +EXPORT_SYMBOL(ieee80211_ctstoself_get); + +struct sk_buff * +ieee80211_get_buffered_bc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct sk_buff *skb = NULL; + struct ieee80211_tx_data tx; + struct ieee80211_sub_if_data *sdata; + struct ps_data *ps; + struct ieee80211_tx_info *info; + struct ieee80211_chanctx_conf *chanctx_conf; + + sdata = vif_to_sdata(vif); + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + + if (!chanctx_conf) + goto out; + + if (sdata->vif.type == NL80211_IFTYPE_AP) { + struct beacon_data *beacon = + rcu_dereference(sdata->deflink.u.ap.beacon); + + if (!beacon || !beacon->head) + goto out; + + ps = &sdata->u.ap.ps; + } else if (ieee80211_vif_is_mesh(&sdata->vif)) { + ps = &sdata->u.mesh.ps; + } else { + goto out; + } + + if (ps->dtim_count != 0 || !ps->dtim_bc_mc) + goto out; /* send buffered bc/mc only after DTIM beacon */ + + while (1) { + skb = skb_dequeue(&ps->bc_buf); + if (!skb) + goto out; + local->total_ps_buffered--; + + if (!skb_queue_empty(&ps->bc_buf) && skb->len >= 2) { + struct ieee80211_hdr *hdr = + (struct ieee80211_hdr *) skb->data; + /* more buffered multicast/broadcast frames ==> set + * MoreData flag in IEEE 802.11 header to inform PS + * STAs */ + hdr->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } + + if (sdata->vif.type == NL80211_IFTYPE_AP) + sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); + if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb)) + break; + ieee80211_free_txskb(hw, skb); + } + + info = IEEE80211_SKB_CB(skb); + + tx.flags |= IEEE80211_TX_PS_BUFFERED; + info->band = chanctx_conf->def.chan->band; + + if (invoke_tx_handlers(&tx)) + skb = NULL; + out: + rcu_read_unlock(); + + return skb; +} +EXPORT_SYMBOL(ieee80211_get_buffered_bc); + +int ieee80211_reserve_tid(struct ieee80211_sta *pubsta, u8 tid) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + int ret; + u32 queues; + + lockdep_assert_held(&local->sta_mtx); + + /* only some cases are supported right now */ + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + break; + default: + WARN_ON(1); + return -EINVAL; + } + + if (WARN_ON(tid >= IEEE80211_NUM_UPS)) + return -EINVAL; + + if (sta->reserved_tid == tid) { + ret = 0; + goto out; + } + + if (sta->reserved_tid != IEEE80211_TID_UNRESERVED) { + sdata_err(sdata, "TID reservation already active\n"); + ret = -EALREADY; + goto out; + } + + ieee80211_stop_vif_queues(sdata->local, sdata, + IEEE80211_QUEUE_STOP_REASON_RESERVE_TID); + + synchronize_net(); + + /* Tear down BA sessions so we stop aggregating on this TID */ + if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) { + set_sta_flag(sta, WLAN_STA_BLOCK_BA); + __ieee80211_stop_tx_ba_session(sta, tid, + AGG_STOP_LOCAL_REQUEST); + } + + queues = BIT(sdata->vif.hw_queue[ieee802_1d_to_ac[tid]]); + __ieee80211_flush_queues(local, sdata, queues, false); + + sta->reserved_tid = tid; + + ieee80211_wake_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_RESERVE_TID); + + if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) + clear_sta_flag(sta, WLAN_STA_BLOCK_BA); + + ret = 0; + out: + return ret; +} +EXPORT_SYMBOL(ieee80211_reserve_tid); + +void ieee80211_unreserve_tid(struct ieee80211_sta *pubsta, u8 tid) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_sub_if_data *sdata = sta->sdata; + + lockdep_assert_held(&sdata->local->sta_mtx); + + /* only some cases are supported right now */ + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + break; + default: + WARN_ON(1); + return; + } + + if (tid != sta->reserved_tid) { + sdata_err(sdata, "TID to unreserve (%d) isn't reserved\n", tid); + return; + } + + sta->reserved_tid = IEEE80211_TID_UNRESERVED; +} +EXPORT_SYMBOL(ieee80211_unreserve_tid); + +void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, int tid, int link_id, + enum nl80211_band band) +{ + const struct ieee80211_hdr *hdr = (void *)skb->data; + int ac = ieee80211_ac_from_tid(tid); + unsigned int link; + + skb_reset_mac_header(skb); + skb_set_queue_mapping(skb, ac); + skb->priority = tid; + + skb->dev = sdata->dev; + + BUILD_BUG_ON(IEEE80211_LINK_UNSPECIFIED < IEEE80211_MLD_MAX_NUM_LINKS); + BUILD_BUG_ON(!FIELD_FIT(IEEE80211_TX_CTRL_MLO_LINK, + IEEE80211_LINK_UNSPECIFIED)); + + if (!ieee80211_vif_is_mld(&sdata->vif)) { + link = 0; + } else if (link_id >= 0) { + link = link_id; + } else if (memcmp(sdata->vif.addr, hdr->addr2, ETH_ALEN) == 0) { + /* address from the MLD */ + link = IEEE80211_LINK_UNSPECIFIED; + } else { + /* otherwise must be addressed from a link */ + rcu_read_lock(); + for (link = 0; link < ARRAY_SIZE(sdata->vif.link_conf); link++) { + struct ieee80211_bss_conf *link_conf; + + link_conf = rcu_dereference(sdata->vif.link_conf[link]); + if (!link_conf) + continue; + if (memcmp(link_conf->addr, hdr->addr2, ETH_ALEN) == 0) + break; + } + rcu_read_unlock(); + + if (WARN_ON_ONCE(link == ARRAY_SIZE(sdata->vif.link_conf))) + link = ffs(sdata->vif.active_links) - 1; + } + + IEEE80211_SKB_CB(skb)->control.flags |= + u32_encode_bits(link, IEEE80211_TX_CTRL_MLO_LINK); + + /* + * The other path calling ieee80211_xmit is from the tasklet, + * and while we can handle concurrent transmissions locking + * requirements are that we do not come into tx with bhs on. + */ + local_bh_disable(); + IEEE80211_SKB_CB(skb)->band = band; + ieee80211_xmit(sdata, NULL, skb); + local_bh_enable(); +} + +void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, int tid, int link_id) +{ + struct ieee80211_chanctx_conf *chanctx_conf; + enum nl80211_band band; + + rcu_read_lock(); + if (!ieee80211_vif_is_mld(&sdata->vif)) { + WARN_ON(link_id >= 0); + chanctx_conf = + rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + if (WARN_ON(!chanctx_conf)) { + rcu_read_unlock(); + kfree_skb(skb); + return; + } + band = chanctx_conf->def.chan->band; + } else { + WARN_ON(link_id >= 0 && + !(sdata->vif.active_links & BIT(link_id))); + /* MLD transmissions must not rely on the band */ + band = 0; + } + + __ieee80211_tx_skb_tid_band(sdata, skb, tid, link_id, band); + rcu_read_unlock(); +} + +int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, + const u8 *buf, size_t len, + const u8 *dest, __be16 proto, bool unencrypted, + int link_id, u64 *cookie) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + struct sk_buff *skb; + struct ethhdr *ehdr; + u32 ctrl_flags = 0; + u32 flags = 0; + int err; + + /* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE + * or Pre-Authentication + */ + if (proto != sdata->control_port_protocol && + proto != cpu_to_be16(ETH_P_PREAUTH)) + return -EINVAL; + + if (proto == sdata->control_port_protocol) + ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO | + IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP; + + if (unencrypted) + flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + + if (cookie) + ctrl_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + + flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + + sizeof(struct ethhdr) + len); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, local->hw.extra_tx_headroom + sizeof(struct ethhdr)); + + skb_put_data(skb, buf, len); + + ehdr = skb_push(skb, sizeof(struct ethhdr)); + memcpy(ehdr->h_dest, dest, ETH_ALEN); + + /* we may override the SA for MLO STA later */ + if (link_id < 0) { + ctrl_flags |= u32_encode_bits(IEEE80211_LINK_UNSPECIFIED, + IEEE80211_TX_CTRL_MLO_LINK); + memcpy(ehdr->h_source, sdata->vif.addr, ETH_ALEN); + } else { + struct ieee80211_bss_conf *link_conf; + + ctrl_flags |= u32_encode_bits(link_id, + IEEE80211_TX_CTRL_MLO_LINK); + + rcu_read_lock(); + link_conf = rcu_dereference(sdata->vif.link_conf[link_id]); + if (!link_conf) { + dev_kfree_skb(skb); + rcu_read_unlock(); + return -ENOLINK; + } + memcpy(ehdr->h_source, link_conf->addr, ETH_ALEN); + rcu_read_unlock(); + } + + ehdr->h_proto = proto; + + skb->dev = dev; + skb->protocol = proto; + skb_reset_network_header(skb); + skb_reset_mac_header(skb); + + if (local->hw.queues < IEEE80211_NUM_ACS) + goto start_xmit; + + /* update QoS header to prioritize control port frames if possible, + * priorization also happens for control port frames send over + * AF_PACKET + */ + rcu_read_lock(); + err = ieee80211_lookup_ra_sta(sdata, skb, &sta); + if (err) { + dev_kfree_skb(skb); + rcu_read_unlock(); + return err; + } + + if (!IS_ERR(sta)) { + u16 queue = ieee80211_select_queue(sdata, sta, skb); + + skb_set_queue_mapping(skb, queue); + + /* + * for MLO STA, the SA should be the AP MLD address, but + * the link ID has been selected already + */ + if (sta && sta->sta.mlo) + memcpy(ehdr->h_source, sdata->vif.addr, ETH_ALEN); + } + rcu_read_unlock(); + +start_xmit: + /* mutex lock is only needed for incrementing the cookie counter */ + mutex_lock(&local->mtx); + + local_bh_disable(); + __ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags, cookie); + local_bh_enable(); + + mutex_unlock(&local->mtx); + + return 0; +} + +int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev, + const u8 *buf, size_t len) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + len + + 30 + /* header size */ + 18); /* 11s header size */ + if (!skb) + return -ENOMEM; + + skb_reserve(skb, local->hw.extra_tx_headroom); + skb_put_data(skb, buf, len); + + skb->dev = dev; + skb->protocol = htons(ETH_P_802_3); + skb_reset_network_header(skb); + skb_reset_mac_header(skb); + + local_bh_disable(); + __ieee80211_subif_start_xmit(skb, skb->dev, 0, + IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP, + NULL); + local_bh_enable(); + + return 0; +} diff --git a/net/mac80211/util.c b/net/mac80211/util.c new file mode 100644 index 0000000000..172173b2a9 --- /dev/null +++ b/net/mac80211/util.c @@ -0,0 +1,5147 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2023 Intel Corporation + * + * utilities for mac80211 + */ + +#include <net/mac80211.h> +#include <linux/netdevice.h> +#include <linux/export.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/skbuff.h> +#include <linux/etherdevice.h> +#include <linux/if_arp.h> +#include <linux/bitmap.h> +#include <linux/crc32.h> +#include <net/net_namespace.h> +#include <net/cfg80211.h> +#include <net/rtnetlink.h> + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" +#include "mesh.h" +#include "wme.h" +#include "led.h" +#include "wep.h" + +/* privid for wiphys to determine whether they belong to us or not */ +const void *const mac80211_wiphy_privid = &mac80211_wiphy_privid; + +struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy) +{ + struct ieee80211_local *local; + + local = wiphy_priv(wiphy); + return &local->hw; +} +EXPORT_SYMBOL(wiphy_to_ieee80211_hw); + +u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, + enum nl80211_iftype type) +{ + __le16 fc = hdr->frame_control; + + if (ieee80211_is_data(fc)) { + if (len < 24) /* drop incorrect hdr len (data) */ + return NULL; + + if (ieee80211_has_a4(fc)) + return NULL; + if (ieee80211_has_tods(fc)) + return hdr->addr1; + if (ieee80211_has_fromds(fc)) + return hdr->addr2; + + return hdr->addr3; + } + + if (ieee80211_is_s1g_beacon(fc)) { + struct ieee80211_ext *ext = (void *) hdr; + + return ext->u.s1g_beacon.sa; + } + + if (ieee80211_is_mgmt(fc)) { + if (len < 24) /* drop incorrect hdr len (mgmt) */ + return NULL; + return hdr->addr3; + } + + if (ieee80211_is_ctl(fc)) { + if (ieee80211_is_pspoll(fc)) + return hdr->addr1; + + if (ieee80211_is_back_req(fc)) { + switch (type) { + case NL80211_IFTYPE_STATION: + return hdr->addr2; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + return hdr->addr1; + default: + break; /* fall through to the return */ + } + } + } + + return NULL; +} +EXPORT_SYMBOL(ieee80211_get_bssid); + +void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + + skb_queue_walk(&tx->skbs, skb) { + hdr = (struct ieee80211_hdr *) skb->data; + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); + } +} + +int ieee80211_frame_duration(enum nl80211_band band, size_t len, + int rate, int erp, int short_preamble, + int shift) +{ + int dur; + + /* calculate duration (in microseconds, rounded up to next higher + * integer if it includes a fractional microsecond) to send frame of + * len bytes (does not include FCS) at the given rate. Duration will + * also include SIFS. + * + * rate is in 100 kbps, so divident is multiplied by 10 in the + * DIV_ROUND_UP() operations. + * + * shift may be 2 for 5 MHz channels or 1 for 10 MHz channels, and + * is assumed to be 0 otherwise. + */ + + if (band == NL80211_BAND_5GHZ || erp) { + /* + * OFDM: + * + * N_DBPS = DATARATE x 4 + * N_SYM = Ceiling((16+8xLENGTH+6) / N_DBPS) + * (16 = SIGNAL time, 6 = tail bits) + * TXTIME = T_PREAMBLE + T_SIGNAL + T_SYM x N_SYM + Signal Ext + * + * T_SYM = 4 usec + * 802.11a - 18.5.2: aSIFSTime = 16 usec + * 802.11g - 19.8.4: aSIFSTime = 10 usec + + * signal ext = 6 usec + */ + dur = 16; /* SIFS + signal ext */ + dur += 16; /* IEEE 802.11-2012 18.3.2.4: T_PREAMBLE = 16 usec */ + dur += 4; /* IEEE 802.11-2012 18.3.2.4: T_SIGNAL = 4 usec */ + + /* IEEE 802.11-2012 18.3.2.4: all values above are: + * * times 4 for 5 MHz + * * times 2 for 10 MHz + */ + dur *= 1 << shift; + + /* rates should already consider the channel bandwidth, + * don't apply divisor again. + */ + dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10, + 4 * rate); /* T_SYM x N_SYM */ + } else { + /* + * 802.11b or 802.11g with 802.11b compatibility: + * 18.3.4: TXTIME = PreambleLength + PLCPHeaderTime + + * Ceiling(((LENGTH+PBCC)x8)/DATARATE). PBCC=0. + * + * 802.11 (DS): 15.3.3, 802.11b: 18.3.4 + * aSIFSTime = 10 usec + * aPreambleLength = 144 usec or 72 usec with short preamble + * aPLCPHeaderLength = 48 usec or 24 usec with short preamble + */ + dur = 10; /* aSIFSTime = 10 usec */ + dur += short_preamble ? (72 + 24) : (144 + 48); + + dur += DIV_ROUND_UP(8 * (len + 4) * 10, rate); + } + + return dur; +} + +/* Exported duration function for driver use */ +__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_band band, + size_t frame_len, + struct ieee80211_rate *rate) +{ + struct ieee80211_sub_if_data *sdata; + u16 dur; + int erp, shift = 0; + bool short_preamble = false; + + erp = 0; + if (vif) { + sdata = vif_to_sdata(vif); + short_preamble = sdata->vif.bss_conf.use_short_preamble; + if (sdata->deflink.operating_11g_mode) + erp = rate->flags & IEEE80211_RATE_ERP_G; + shift = ieee80211_vif_get_shift(vif); + } + + dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp, + short_preamble, shift); + + return cpu_to_le16(dur); +} +EXPORT_SYMBOL(ieee80211_generic_frame_duration); + +__le16 ieee80211_rts_duration(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, size_t frame_len, + const struct ieee80211_tx_info *frame_txctl) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_rate *rate; + struct ieee80211_sub_if_data *sdata; + bool short_preamble; + int erp, shift = 0, bitrate; + u16 dur; + struct ieee80211_supported_band *sband; + + sband = local->hw.wiphy->bands[frame_txctl->band]; + + short_preamble = false; + + rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; + + erp = 0; + if (vif) { + sdata = vif_to_sdata(vif); + short_preamble = sdata->vif.bss_conf.use_short_preamble; + if (sdata->deflink.operating_11g_mode) + erp = rate->flags & IEEE80211_RATE_ERP_G; + shift = ieee80211_vif_get_shift(vif); + } + + bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift); + + /* CTS duration */ + dur = ieee80211_frame_duration(sband->band, 10, bitrate, + erp, short_preamble, shift); + /* Data frame duration */ + dur += ieee80211_frame_duration(sband->band, frame_len, bitrate, + erp, short_preamble, shift); + /* ACK duration */ + dur += ieee80211_frame_duration(sband->band, 10, bitrate, + erp, short_preamble, shift); + + return cpu_to_le16(dur); +} +EXPORT_SYMBOL(ieee80211_rts_duration); + +__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + size_t frame_len, + const struct ieee80211_tx_info *frame_txctl) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_rate *rate; + struct ieee80211_sub_if_data *sdata; + bool short_preamble; + int erp, shift = 0, bitrate; + u16 dur; + struct ieee80211_supported_band *sband; + + sband = local->hw.wiphy->bands[frame_txctl->band]; + + short_preamble = false; + + rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; + erp = 0; + if (vif) { + sdata = vif_to_sdata(vif); + short_preamble = sdata->vif.bss_conf.use_short_preamble; + if (sdata->deflink.operating_11g_mode) + erp = rate->flags & IEEE80211_RATE_ERP_G; + shift = ieee80211_vif_get_shift(vif); + } + + bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift); + + /* Data frame duration */ + dur = ieee80211_frame_duration(sband->band, frame_len, bitrate, + erp, short_preamble, shift); + if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) { + /* ACK duration */ + dur += ieee80211_frame_duration(sband->band, 10, bitrate, + erp, short_preamble, shift); + } + + return cpu_to_le16(dur); +} +EXPORT_SYMBOL(ieee80211_ctstoself_duration); + +static void wake_tx_push_queue(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_txq *queue) +{ + struct ieee80211_tx_control control = { + .sta = queue->sta, + }; + struct sk_buff *skb; + + while (1) { + skb = ieee80211_tx_dequeue(&local->hw, queue); + if (!skb) + break; + + drv_tx(local, &control, skb); + } +} + +/* wake_tx_queue handler for driver not implementing a custom one*/ +void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif); + struct ieee80211_txq *queue; + + spin_lock(&local->handle_wake_tx_queue_lock); + + /* Use ieee80211_next_txq() for airtime fairness accounting */ + ieee80211_txq_schedule_start(hw, txq->ac); + while ((queue = ieee80211_next_txq(hw, txq->ac))) { + wake_tx_push_queue(local, sdata, queue); + ieee80211_return_txq(hw, queue, false); + } + ieee80211_txq_schedule_end(hw, txq->ac); + spin_unlock(&local->handle_wake_tx_queue_lock); +} +EXPORT_SYMBOL(ieee80211_handle_wake_tx_queue); + +static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_vif *vif = &sdata->vif; + struct fq *fq = &local->fq; + struct ps_data *ps = NULL; + struct txq_info *txqi; + struct sta_info *sta; + int i; + + local_bh_disable(); + spin_lock(&fq->lock); + + if (!test_bit(SDATA_STATE_RUNNING, &sdata->state)) + goto out; + + if (sdata->vif.type == NL80211_IFTYPE_AP) + ps = &sdata->bss->ps; + + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sdata != sta->sdata) + continue; + + for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { + struct ieee80211_txq *txq = sta->sta.txq[i]; + + if (!txq) + continue; + + txqi = to_txq_info(txq); + + if (ac != txq->ac) + continue; + + if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY, + &txqi->flags)) + continue; + + spin_unlock(&fq->lock); + drv_wake_tx_queue(local, txqi); + spin_lock(&fq->lock); + } + } + + if (!vif->txq) + goto out; + + txqi = to_txq_info(vif->txq); + + if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) || + (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac) + goto out; + + spin_unlock(&fq->lock); + + drv_wake_tx_queue(local, txqi); + local_bh_enable(); + return; +out: + spin_unlock(&fq->lock); + local_bh_enable(); +} + +static void +__releases(&local->queue_stop_reason_lock) +__acquires(&local->queue_stop_reason_lock) +_ieee80211_wake_txqs(struct ieee80211_local *local, unsigned long *flags) +{ + struct ieee80211_sub_if_data *sdata; + int n_acs = IEEE80211_NUM_ACS; + int i; + + rcu_read_lock(); + + if (local->hw.queues < IEEE80211_NUM_ACS) + n_acs = 1; + + for (i = 0; i < local->hw.queues; i++) { + if (local->queue_stop_reasons[i]) + continue; + + spin_unlock_irqrestore(&local->queue_stop_reason_lock, *flags); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + int ac; + + for (ac = 0; ac < n_acs; ac++) { + int ac_queue = sdata->vif.hw_queue[ac]; + + if (ac_queue == i || + sdata->vif.cab_queue == i) + __ieee80211_wake_txqs(sdata, ac); + } + } + spin_lock_irqsave(&local->queue_stop_reason_lock, *flags); + } + + rcu_read_unlock(); +} + +void ieee80211_wake_txqs(struct tasklet_struct *t) +{ + struct ieee80211_local *local = from_tasklet(local, t, + wake_txqs_tasklet); + unsigned long flags; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + _ieee80211_wake_txqs(local, &flags); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); +} + +static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, + enum queue_stop_reason reason, + bool refcounted, + unsigned long *flags) +{ + struct ieee80211_local *local = hw_to_local(hw); + + trace_wake_queue(local, queue, reason); + + if (WARN_ON(queue >= hw->queues)) + return; + + if (!test_bit(reason, &local->queue_stop_reasons[queue])) + return; + + if (!refcounted) { + local->q_stop_reasons[queue][reason] = 0; + } else { + local->q_stop_reasons[queue][reason]--; + if (WARN_ON(local->q_stop_reasons[queue][reason] < 0)) + local->q_stop_reasons[queue][reason] = 0; + } + + if (local->q_stop_reasons[queue][reason] == 0) + __clear_bit(reason, &local->queue_stop_reasons[queue]); + + if (local->queue_stop_reasons[queue] != 0) + /* someone still has this queue stopped */ + return; + + if (!skb_queue_empty(&local->pending[queue])) + tasklet_schedule(&local->tx_pending_tasklet); + + /* + * Calling _ieee80211_wake_txqs here can be a problem because it may + * release queue_stop_reason_lock which has been taken by + * __ieee80211_wake_queue's caller. It is certainly not very nice to + * release someone's lock, but it is fine because all the callers of + * __ieee80211_wake_queue call it right before releasing the lock. + */ + if (reason == IEEE80211_QUEUE_STOP_REASON_DRIVER) + tasklet_schedule(&local->wake_txqs_tasklet); + else + _ieee80211_wake_txqs(local, flags); +} + +void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, + enum queue_stop_reason reason, + bool refcounted) +{ + struct ieee80211_local *local = hw_to_local(hw); + unsigned long flags; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + __ieee80211_wake_queue(hw, queue, reason, refcounted, &flags); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); +} + +void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) +{ + ieee80211_wake_queue_by_reason(hw, queue, + IEEE80211_QUEUE_STOP_REASON_DRIVER, + false); +} +EXPORT_SYMBOL(ieee80211_wake_queue); + +static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, + enum queue_stop_reason reason, + bool refcounted) +{ + struct ieee80211_local *local = hw_to_local(hw); + + trace_stop_queue(local, queue, reason); + + if (WARN_ON(queue >= hw->queues)) + return; + + if (!refcounted) + local->q_stop_reasons[queue][reason] = 1; + else + local->q_stop_reasons[queue][reason]++; + + set_bit(reason, &local->queue_stop_reasons[queue]); +} + +void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, + enum queue_stop_reason reason, + bool refcounted) +{ + struct ieee80211_local *local = hw_to_local(hw); + unsigned long flags; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + __ieee80211_stop_queue(hw, queue, reason, refcounted); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); +} + +void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue) +{ + ieee80211_stop_queue_by_reason(hw, queue, + IEEE80211_QUEUE_STOP_REASON_DRIVER, + false); +} +EXPORT_SYMBOL(ieee80211_stop_queue); + +void ieee80211_add_pending_skb(struct ieee80211_local *local, + struct sk_buff *skb) +{ + struct ieee80211_hw *hw = &local->hw; + unsigned long flags; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int queue = info->hw_queue; + + if (WARN_ON(!info->control.vif)) { + ieee80211_free_txskb(&local->hw, skb); + return; + } + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD, + false); + __skb_queue_tail(&local->pending[queue], skb); + __ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD, + false, &flags); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); +} + +void ieee80211_add_pending_skbs(struct ieee80211_local *local, + struct sk_buff_head *skbs) +{ + struct ieee80211_hw *hw = &local->hw; + struct sk_buff *skb; + unsigned long flags; + int queue, i; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + while ((skb = skb_dequeue(skbs))) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (WARN_ON(!info->control.vif)) { + ieee80211_free_txskb(&local->hw, skb); + continue; + } + + queue = info->hw_queue; + + __ieee80211_stop_queue(hw, queue, + IEEE80211_QUEUE_STOP_REASON_SKB_ADD, + false); + + __skb_queue_tail(&local->pending[queue], skb); + } + + for (i = 0; i < hw->queues; i++) + __ieee80211_wake_queue(hw, i, + IEEE80211_QUEUE_STOP_REASON_SKB_ADD, + false, &flags); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); +} + +void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, + unsigned long queues, + enum queue_stop_reason reason, + bool refcounted) +{ + struct ieee80211_local *local = hw_to_local(hw); + unsigned long flags; + int i; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + + for_each_set_bit(i, &queues, hw->queues) + __ieee80211_stop_queue(hw, i, reason, refcounted); + + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); +} + +void ieee80211_stop_queues(struct ieee80211_hw *hw) +{ + ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, + IEEE80211_QUEUE_STOP_REASON_DRIVER, + false); +} +EXPORT_SYMBOL(ieee80211_stop_queues); + +int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) +{ + struct ieee80211_local *local = hw_to_local(hw); + unsigned long flags; + int ret; + + if (WARN_ON(queue >= hw->queues)) + return true; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + ret = test_bit(IEEE80211_QUEUE_STOP_REASON_DRIVER, + &local->queue_stop_reasons[queue]); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + return ret; +} +EXPORT_SYMBOL(ieee80211_queue_stopped); + +void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, + unsigned long queues, + enum queue_stop_reason reason, + bool refcounted) +{ + struct ieee80211_local *local = hw_to_local(hw); + unsigned long flags; + int i; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + + for_each_set_bit(i, &queues, hw->queues) + __ieee80211_wake_queue(hw, i, reason, refcounted, &flags); + + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); +} + +void ieee80211_wake_queues(struct ieee80211_hw *hw) +{ + ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, + IEEE80211_QUEUE_STOP_REASON_DRIVER, + false); +} +EXPORT_SYMBOL(ieee80211_wake_queues); + +static unsigned int +ieee80211_get_vif_queues(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + unsigned int queues; + + if (sdata && ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) { + int ac; + + queues = 0; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + queues |= BIT(sdata->vif.hw_queue[ac]); + if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE) + queues |= BIT(sdata->vif.cab_queue); + } else { + /* all queues */ + queues = BIT(local->hw.queues) - 1; + } + + return queues; +} + +void __ieee80211_flush_queues(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + unsigned int queues, bool drop) +{ + if (!local->ops->flush) + return; + + /* + * If no queue was set, or if the HW doesn't support + * IEEE80211_HW_QUEUE_CONTROL - flush all queues + */ + if (!queues || !ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) + queues = ieee80211_get_vif_queues(local, sdata); + + ieee80211_stop_queues_by_reason(&local->hw, queues, + IEEE80211_QUEUE_STOP_REASON_FLUSH, + false); + + drv_flush(local, sdata, queues, drop); + + ieee80211_wake_queues_by_reason(&local->hw, queues, + IEEE80211_QUEUE_STOP_REASON_FLUSH, + false); +} + +void ieee80211_flush_queues(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, bool drop) +{ + __ieee80211_flush_queues(local, sdata, 0, drop); +} + +void ieee80211_stop_vif_queues(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum queue_stop_reason reason) +{ + ieee80211_stop_queues_by_reason(&local->hw, + ieee80211_get_vif_queues(local, sdata), + reason, true); +} + +void ieee80211_wake_vif_queues(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum queue_stop_reason reason) +{ + ieee80211_wake_queues_by_reason(&local->hw, + ieee80211_get_vif_queues(local, sdata), + reason, true); +} + +static void __iterate_interfaces(struct ieee80211_local *local, + u32 iter_flags, + void (*iterator)(void *data, u8 *mac, + struct ieee80211_vif *vif), + void *data) +{ + struct ieee80211_sub_if_data *sdata; + bool active_only = iter_flags & IEEE80211_IFACE_ITER_ACTIVE; + + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + switch (sdata->vif.type) { + case NL80211_IFTYPE_MONITOR: + if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE)) + continue; + break; + case NL80211_IFTYPE_AP_VLAN: + continue; + default: + break; + } + if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) && + active_only && !(sdata->flags & IEEE80211_SDATA_IN_DRIVER)) + continue; + if ((iter_flags & IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER) && + !(sdata->flags & IEEE80211_SDATA_IN_DRIVER)) + continue; + if (ieee80211_sdata_running(sdata) || !active_only) + iterator(data, sdata->vif.addr, + &sdata->vif); + } + + sdata = rcu_dereference_check(local->monitor_sdata, + lockdep_is_held(&local->iflist_mtx) || + lockdep_is_held(&local->hw.wiphy->mtx)); + if (sdata && + (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || !active_only || + sdata->flags & IEEE80211_SDATA_IN_DRIVER)) + iterator(data, sdata->vif.addr, &sdata->vif); +} + +void ieee80211_iterate_interfaces( + struct ieee80211_hw *hw, u32 iter_flags, + void (*iterator)(void *data, u8 *mac, + struct ieee80211_vif *vif), + void *data) +{ + struct ieee80211_local *local = hw_to_local(hw); + + mutex_lock(&local->iflist_mtx); + __iterate_interfaces(local, iter_flags, iterator, data); + mutex_unlock(&local->iflist_mtx); +} +EXPORT_SYMBOL_GPL(ieee80211_iterate_interfaces); + +void ieee80211_iterate_active_interfaces_atomic( + struct ieee80211_hw *hw, u32 iter_flags, + void (*iterator)(void *data, u8 *mac, + struct ieee80211_vif *vif), + void *data) +{ + struct ieee80211_local *local = hw_to_local(hw); + + rcu_read_lock(); + __iterate_interfaces(local, iter_flags | IEEE80211_IFACE_ITER_ACTIVE, + iterator, data); + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic); + +void ieee80211_iterate_active_interfaces_mtx( + struct ieee80211_hw *hw, u32 iter_flags, + void (*iterator)(void *data, u8 *mac, + struct ieee80211_vif *vif), + void *data) +{ + struct ieee80211_local *local = hw_to_local(hw); + + lockdep_assert_wiphy(hw->wiphy); + + __iterate_interfaces(local, iter_flags | IEEE80211_IFACE_ITER_ACTIVE, + iterator, data); +} +EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_mtx); + +static void __iterate_stations(struct ieee80211_local *local, + void (*iterator)(void *data, + struct ieee80211_sta *sta), + void *data) +{ + struct sta_info *sta; + + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (!sta->uploaded) + continue; + + iterator(data, &sta->sta); + } +} + +void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw, + void (*iterator)(void *data, + struct ieee80211_sta *sta), + void *data) +{ + struct ieee80211_local *local = hw_to_local(hw); + + rcu_read_lock(); + __iterate_stations(local, iterator, data); + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(ieee80211_iterate_stations_atomic); + +struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + + if (!ieee80211_sdata_running(sdata) || + !(sdata->flags & IEEE80211_SDATA_IN_DRIVER)) + return NULL; + return &sdata->vif; +} +EXPORT_SYMBOL_GPL(wdev_to_ieee80211_vif); + +struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif) +{ + if (!vif) + return NULL; + + return &vif_to_sdata(vif)->wdev; +} +EXPORT_SYMBOL_GPL(ieee80211_vif_to_wdev); + +/* + * Nothing should have been stuffed into the workqueue during + * the suspend->resume cycle. Since we can't check each caller + * of this function if we are already quiescing / suspended, + * check here and don't WARN since this can actually happen when + * the rx path (for example) is racing against __ieee80211_suspend + * and suspending / quiescing was set after the rx path checked + * them. + */ +static bool ieee80211_can_queue_work(struct ieee80211_local *local) +{ + if (local->quiescing || (local->suspended && !local->resuming)) { + pr_warn("queueing ieee80211 work while going to suspend\n"); + return false; + } + + return true; +} + +void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work) +{ + struct ieee80211_local *local = hw_to_local(hw); + + if (!ieee80211_can_queue_work(local)) + return; + + queue_work(local->workqueue, work); +} +EXPORT_SYMBOL(ieee80211_queue_work); + +void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, + struct delayed_work *dwork, + unsigned long delay) +{ + struct ieee80211_local *local = hw_to_local(hw); + + if (!ieee80211_can_queue_work(local)) + return; + + queue_delayed_work(local->workqueue, dwork, delay); +} +EXPORT_SYMBOL(ieee80211_queue_delayed_work); + +static void +ieee80211_parse_extension_element(u32 *crc, + const struct element *elem, + struct ieee802_11_elems *elems, + struct ieee80211_elems_parse_params *params) +{ + const void *data = elem->data + 1; + bool calc_crc = false; + u8 len; + + if (!elem->datalen) + return; + + len = elem->datalen - 1; + + switch (elem->data[0]) { + case WLAN_EID_EXT_HE_MU_EDCA: + calc_crc = true; + if (len >= sizeof(*elems->mu_edca_param_set)) + elems->mu_edca_param_set = data; + break; + case WLAN_EID_EXT_HE_CAPABILITY: + if (ieee80211_he_capa_size_ok(data, len)) { + elems->he_cap = data; + elems->he_cap_len = len; + } + break; + case WLAN_EID_EXT_HE_OPERATION: + calc_crc = true; + if (len >= sizeof(*elems->he_operation) && + len >= ieee80211_he_oper_size(data) - 1) + elems->he_operation = data; + break; + case WLAN_EID_EXT_UORA: + if (len >= 1) + elems->uora_element = data; + break; + case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME: + if (len == 3) + elems->max_channel_switch_time = data; + break; + case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION: + if (len >= sizeof(*elems->mbssid_config_ie)) + elems->mbssid_config_ie = data; + break; + case WLAN_EID_EXT_HE_SPR: + if (len >= sizeof(*elems->he_spr) && + len >= ieee80211_he_spr_size(data)) + elems->he_spr = data; + break; + case WLAN_EID_EXT_HE_6GHZ_CAPA: + if (len >= sizeof(*elems->he_6ghz_capa)) + elems->he_6ghz_capa = data; + break; + case WLAN_EID_EXT_EHT_CAPABILITY: + if (ieee80211_eht_capa_size_ok(elems->he_cap, + data, len, + params->from_ap)) { + elems->eht_cap = data; + elems->eht_cap_len = len; + } + break; + case WLAN_EID_EXT_EHT_OPERATION: + if (ieee80211_eht_oper_size_ok(data, len)) + elems->eht_operation = data; + calc_crc = true; + break; + case WLAN_EID_EXT_EHT_MULTI_LINK: + calc_crc = true; + + if (ieee80211_mle_size_ok(data, len)) { + const struct ieee80211_multi_link_elem *mle = + (void *)data; + + switch (le16_get_bits(mle->control, + IEEE80211_ML_CONTROL_TYPE)) { + case IEEE80211_ML_CONTROL_TYPE_BASIC: + elems->ml_basic_elem = (void *)elem; + elems->ml_basic = data; + elems->ml_basic_len = len; + break; + case IEEE80211_ML_CONTROL_TYPE_RECONF: + elems->ml_reconf_elem = (void *)elem; + elems->ml_reconf = data; + elems->ml_reconf_len = len; + break; + default: + break; + } + } + break; + } + + if (crc && calc_crc) + *crc = crc32_be(*crc, (void *)elem, elem->datalen + 2); +} + +static u32 +_ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params, + struct ieee802_11_elems *elems, + const struct element *check_inherit) +{ + const struct element *elem; + bool calc_crc = params->filter != 0; + DECLARE_BITMAP(seen_elems, 256); + u32 crc = params->crc; + const u8 *ie; + + bitmap_zero(seen_elems, 256); + + for_each_element(elem, params->start, params->len) { + bool elem_parse_failed; + u8 id = elem->id; + u8 elen = elem->datalen; + const u8 *pos = elem->data; + + if (check_inherit && + !cfg80211_is_element_inherited(elem, + check_inherit)) + continue; + + switch (id) { + case WLAN_EID_SSID: + case WLAN_EID_SUPP_RATES: + case WLAN_EID_FH_PARAMS: + case WLAN_EID_DS_PARAMS: + case WLAN_EID_CF_PARAMS: + case WLAN_EID_TIM: + case WLAN_EID_IBSS_PARAMS: + case WLAN_EID_CHALLENGE: + case WLAN_EID_RSN: + case WLAN_EID_ERP_INFO: + case WLAN_EID_EXT_SUPP_RATES: + case WLAN_EID_HT_CAPABILITY: + case WLAN_EID_HT_OPERATION: + case WLAN_EID_VHT_CAPABILITY: + case WLAN_EID_VHT_OPERATION: + case WLAN_EID_MESH_ID: + case WLAN_EID_MESH_CONFIG: + case WLAN_EID_PEER_MGMT: + case WLAN_EID_PREQ: + case WLAN_EID_PREP: + case WLAN_EID_PERR: + case WLAN_EID_RANN: + case WLAN_EID_CHANNEL_SWITCH: + case WLAN_EID_EXT_CHANSWITCH_ANN: + case WLAN_EID_COUNTRY: + case WLAN_EID_PWR_CONSTRAINT: + case WLAN_EID_TIMEOUT_INTERVAL: + case WLAN_EID_SECONDARY_CHANNEL_OFFSET: + case WLAN_EID_WIDE_BW_CHANNEL_SWITCH: + case WLAN_EID_CHAN_SWITCH_PARAM: + case WLAN_EID_EXT_CAPABILITY: + case WLAN_EID_CHAN_SWITCH_TIMING: + case WLAN_EID_LINK_ID: + case WLAN_EID_BSS_MAX_IDLE_PERIOD: + case WLAN_EID_RSNX: + case WLAN_EID_S1G_BCN_COMPAT: + case WLAN_EID_S1G_CAPABILITIES: + case WLAN_EID_S1G_OPERATION: + case WLAN_EID_AID_RESPONSE: + case WLAN_EID_S1G_SHORT_BCN_INTERVAL: + /* + * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible + * that if the content gets bigger it might be needed more than once + */ + if (test_bit(id, seen_elems)) { + elems->parse_error = true; + continue; + } + break; + } + + if (calc_crc && id < 64 && (params->filter & (1ULL << id))) + crc = crc32_be(crc, pos - 2, elen + 2); + + elem_parse_failed = false; + + switch (id) { + case WLAN_EID_LINK_ID: + if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) { + elem_parse_failed = true; + break; + } + elems->lnk_id = (void *)(pos - 2); + break; + case WLAN_EID_CHAN_SWITCH_TIMING: + if (elen < sizeof(struct ieee80211_ch_switch_timing)) { + elem_parse_failed = true; + break; + } + elems->ch_sw_timing = (void *)pos; + break; + case WLAN_EID_EXT_CAPABILITY: + elems->ext_capab = pos; + elems->ext_capab_len = elen; + break; + case WLAN_EID_SSID: + elems->ssid = pos; + elems->ssid_len = elen; + break; + case WLAN_EID_SUPP_RATES: + elems->supp_rates = pos; + elems->supp_rates_len = elen; + break; + case WLAN_EID_DS_PARAMS: + if (elen >= 1) + elems->ds_params = pos; + else + elem_parse_failed = true; + break; + case WLAN_EID_TIM: + if (elen >= sizeof(struct ieee80211_tim_ie)) { + elems->tim = (void *)pos; + elems->tim_len = elen; + } else + elem_parse_failed = true; + break; + case WLAN_EID_VENDOR_SPECIFIC: + if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && + pos[2] == 0xf2) { + /* Microsoft OUI (00:50:F2) */ + + if (calc_crc) + crc = crc32_be(crc, pos - 2, elen + 2); + + if (elen >= 5 && pos[3] == 2) { + /* OUI Type 2 - WMM IE */ + if (pos[4] == 0) { + elems->wmm_info = pos; + elems->wmm_info_len = elen; + } else if (pos[4] == 1) { + elems->wmm_param = pos; + elems->wmm_param_len = elen; + } + } + } + break; + case WLAN_EID_RSN: + elems->rsn = pos; + elems->rsn_len = elen; + break; + case WLAN_EID_ERP_INFO: + if (elen >= 1) + elems->erp_info = pos; + else + elem_parse_failed = true; + break; + case WLAN_EID_EXT_SUPP_RATES: + elems->ext_supp_rates = pos; + elems->ext_supp_rates_len = elen; + break; + case WLAN_EID_HT_CAPABILITY: + if (elen >= sizeof(struct ieee80211_ht_cap)) + elems->ht_cap_elem = (void *)pos; + else + elem_parse_failed = true; + break; + case WLAN_EID_HT_OPERATION: + if (elen >= sizeof(struct ieee80211_ht_operation)) + elems->ht_operation = (void *)pos; + else + elem_parse_failed = true; + break; + case WLAN_EID_VHT_CAPABILITY: + if (elen >= sizeof(struct ieee80211_vht_cap)) + elems->vht_cap_elem = (void *)pos; + else + elem_parse_failed = true; + break; + case WLAN_EID_VHT_OPERATION: + if (elen >= sizeof(struct ieee80211_vht_operation)) { + elems->vht_operation = (void *)pos; + if (calc_crc) + crc = crc32_be(crc, pos - 2, elen + 2); + break; + } + elem_parse_failed = true; + break; + case WLAN_EID_OPMODE_NOTIF: + if (elen > 0) { + elems->opmode_notif = pos; + if (calc_crc) + crc = crc32_be(crc, pos - 2, elen + 2); + break; + } + elem_parse_failed = true; + break; + case WLAN_EID_MESH_ID: + elems->mesh_id = pos; + elems->mesh_id_len = elen; + break; + case WLAN_EID_MESH_CONFIG: + if (elen >= sizeof(struct ieee80211_meshconf_ie)) + elems->mesh_config = (void *)pos; + else + elem_parse_failed = true; + break; + case WLAN_EID_PEER_MGMT: + elems->peering = pos; + elems->peering_len = elen; + break; + case WLAN_EID_MESH_AWAKE_WINDOW: + if (elen >= 2) + elems->awake_window = (void *)pos; + break; + case WLAN_EID_PREQ: + elems->preq = pos; + elems->preq_len = elen; + break; + case WLAN_EID_PREP: + elems->prep = pos; + elems->prep_len = elen; + break; + case WLAN_EID_PERR: + elems->perr = pos; + elems->perr_len = elen; + break; + case WLAN_EID_RANN: + if (elen >= sizeof(struct ieee80211_rann_ie)) + elems->rann = (void *)pos; + else + elem_parse_failed = true; + break; + case WLAN_EID_CHANNEL_SWITCH: + if (elen != sizeof(struct ieee80211_channel_sw_ie)) { + elem_parse_failed = true; + break; + } + elems->ch_switch_ie = (void *)pos; + break; + case WLAN_EID_EXT_CHANSWITCH_ANN: + if (elen != sizeof(struct ieee80211_ext_chansw_ie)) { + elem_parse_failed = true; + break; + } + elems->ext_chansw_ie = (void *)pos; + break; + case WLAN_EID_SECONDARY_CHANNEL_OFFSET: + if (elen != sizeof(struct ieee80211_sec_chan_offs_ie)) { + elem_parse_failed = true; + break; + } + elems->sec_chan_offs = (void *)pos; + break; + case WLAN_EID_CHAN_SWITCH_PARAM: + if (elen < + sizeof(*elems->mesh_chansw_params_ie)) { + elem_parse_failed = true; + break; + } + elems->mesh_chansw_params_ie = (void *)pos; + break; + case WLAN_EID_WIDE_BW_CHANNEL_SWITCH: + if (!params->action || + elen < sizeof(*elems->wide_bw_chansw_ie)) { + elem_parse_failed = true; + break; + } + elems->wide_bw_chansw_ie = (void *)pos; + break; + case WLAN_EID_CHANNEL_SWITCH_WRAPPER: + if (params->action) { + elem_parse_failed = true; + break; + } + /* + * This is a bit tricky, but as we only care about + * the wide bandwidth channel switch element, so + * just parse it out manually. + */ + ie = cfg80211_find_ie(WLAN_EID_WIDE_BW_CHANNEL_SWITCH, + pos, elen); + if (ie) { + if (ie[1] >= sizeof(*elems->wide_bw_chansw_ie)) + elems->wide_bw_chansw_ie = + (void *)(ie + 2); + else + elem_parse_failed = true; + } + break; + case WLAN_EID_COUNTRY: + elems->country_elem = pos; + elems->country_elem_len = elen; + break; + case WLAN_EID_PWR_CONSTRAINT: + if (elen != 1) { + elem_parse_failed = true; + break; + } + elems->pwr_constr_elem = pos; + break; + case WLAN_EID_CISCO_VENDOR_SPECIFIC: + /* Lots of different options exist, but we only care + * about the Dynamic Transmit Power Control element. + * First check for the Cisco OUI, then for the DTPC + * tag (0x00). + */ + if (elen < 4) { + elem_parse_failed = true; + break; + } + + if (pos[0] != 0x00 || pos[1] != 0x40 || + pos[2] != 0x96 || pos[3] != 0x00) + break; + + if (elen != 6) { + elem_parse_failed = true; + break; + } + + if (calc_crc) + crc = crc32_be(crc, pos - 2, elen + 2); + + elems->cisco_dtpc_elem = pos; + break; + case WLAN_EID_ADDBA_EXT: + if (elen < sizeof(struct ieee80211_addba_ext_ie)) { + elem_parse_failed = true; + break; + } + elems->addba_ext_ie = (void *)pos; + break; + case WLAN_EID_TIMEOUT_INTERVAL: + if (elen >= sizeof(struct ieee80211_timeout_interval_ie)) + elems->timeout_int = (void *)pos; + else + elem_parse_failed = true; + break; + case WLAN_EID_BSS_MAX_IDLE_PERIOD: + if (elen >= sizeof(*elems->max_idle_period_ie)) + elems->max_idle_period_ie = (void *)pos; + break; + case WLAN_EID_RSNX: + elems->rsnx = pos; + elems->rsnx_len = elen; + break; + case WLAN_EID_TX_POWER_ENVELOPE: + if (elen < 1 || + elen > sizeof(struct ieee80211_tx_pwr_env)) + break; + + if (elems->tx_pwr_env_num >= ARRAY_SIZE(elems->tx_pwr_env)) + break; + + elems->tx_pwr_env[elems->tx_pwr_env_num] = (void *)pos; + elems->tx_pwr_env_len[elems->tx_pwr_env_num] = elen; + elems->tx_pwr_env_num++; + break; + case WLAN_EID_EXTENSION: + ieee80211_parse_extension_element(calc_crc ? + &crc : NULL, + elem, elems, params); + break; + case WLAN_EID_S1G_CAPABILITIES: + if (elen >= sizeof(*elems->s1g_capab)) + elems->s1g_capab = (void *)pos; + else + elem_parse_failed = true; + break; + case WLAN_EID_S1G_OPERATION: + if (elen == sizeof(*elems->s1g_oper)) + elems->s1g_oper = (void *)pos; + else + elem_parse_failed = true; + break; + case WLAN_EID_S1G_BCN_COMPAT: + if (elen == sizeof(*elems->s1g_bcn_compat)) + elems->s1g_bcn_compat = (void *)pos; + else + elem_parse_failed = true; + break; + case WLAN_EID_AID_RESPONSE: + if (elen == sizeof(struct ieee80211_aid_response_ie)) + elems->aid_resp = (void *)pos; + else + elem_parse_failed = true; + break; + default: + break; + } + + if (elem_parse_failed) + elems->parse_error = true; + else + __set_bit(id, seen_elems); + } + + if (!for_each_element_completed(elem, params->start, params->len)) + elems->parse_error = true; + + return crc; +} + +static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, + struct ieee802_11_elems *elems, + struct cfg80211_bss *bss, + u8 *nontransmitted_profile) +{ + const struct element *elem, *sub; + size_t profile_len = 0; + bool found = false; + + if (!bss || !bss->transmitted_bss) + return profile_len; + + for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) { + if (elem->datalen < 2) + continue; + if (elem->data[0] < 1 || elem->data[0] > 8) + continue; + + for_each_element(sub, elem->data + 1, elem->datalen - 1) { + u8 new_bssid[ETH_ALEN]; + const u8 *index; + + if (sub->id != 0 || sub->datalen < 4) { + /* not a valid BSS profile */ + continue; + } + + if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP || + sub->data[1] != 2) { + /* The first element of the + * Nontransmitted BSSID Profile is not + * the Nontransmitted BSSID Capability + * element. + */ + continue; + } + + memset(nontransmitted_profile, 0, len); + profile_len = cfg80211_merge_profile(start, len, + elem, + sub, + nontransmitted_profile, + len); + + /* found a Nontransmitted BSSID Profile */ + index = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, + nontransmitted_profile, + profile_len); + if (!index || index[1] < 1 || index[2] == 0) { + /* Invalid MBSSID Index element */ + continue; + } + + cfg80211_gen_new_bssid(bss->transmitted_bss->bssid, + elem->data[0], + index[2], + new_bssid); + if (ether_addr_equal(new_bssid, bss->bssid)) { + found = true; + elems->bssid_index_len = index[1]; + elems->bssid_index = (void *)&index[2]; + break; + } + } + } + + return found ? profile_len : 0; +} + +static void ieee80211_mle_get_sta_prof(struct ieee802_11_elems *elems, + u8 link_id) +{ + const struct ieee80211_multi_link_elem *ml = elems->ml_basic; + ssize_t ml_len = elems->ml_basic_len; + const struct element *sub; + + if (!ml || !ml_len) + return; + + if (le16_get_bits(ml->control, IEEE80211_ML_CONTROL_TYPE) != + IEEE80211_ML_CONTROL_TYPE_BASIC) + return; + + for_each_mle_subelement(sub, (u8 *)ml, ml_len) { + struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data; + ssize_t sta_prof_len; + u16 control; + + if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE) + continue; + + if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data, + sub->datalen)) + return; + + control = le16_to_cpu(prof->control); + + if (link_id != u16_get_bits(control, + IEEE80211_MLE_STA_CONTROL_LINK_ID)) + continue; + + if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE)) + return; + + /* the sub element can be fragmented */ + sta_prof_len = + cfg80211_defragment_element(sub, + (u8 *)ml, ml_len, + elems->scratch_pos, + elems->scratch + + elems->scratch_len - + elems->scratch_pos, + IEEE80211_MLE_SUBELEM_FRAGMENT); + + if (sta_prof_len < 0) + return; + + elems->prof = (void *)elems->scratch_pos; + elems->sta_prof_len = sta_prof_len; + elems->scratch_pos += sta_prof_len; + + return; + } +} + +static void ieee80211_mle_parse_link(struct ieee802_11_elems *elems, + struct ieee80211_elems_parse_params *params) +{ + struct ieee80211_mle_per_sta_profile *prof; + struct ieee80211_elems_parse_params sub = { + .action = params->action, + .from_ap = params->from_ap, + .link_id = -1, + }; + ssize_t ml_len = elems->ml_basic_len; + const struct element *non_inherit = NULL; + const u8 *end; + + if (params->link_id == -1) + return; + + ml_len = cfg80211_defragment_element(elems->ml_basic_elem, + elems->ie_start, + elems->total_len, + elems->scratch_pos, + elems->scratch + + elems->scratch_len - + elems->scratch_pos, + WLAN_EID_FRAGMENT); + + if (ml_len < 0) + return; + + elems->ml_basic = (const void *)elems->scratch_pos; + elems->ml_basic_len = ml_len; + + ieee80211_mle_get_sta_prof(elems, params->link_id); + prof = elems->prof; + + if (!prof) + return; + + /* check if we have the 4 bytes for the fixed part in assoc response */ + if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) { + elems->prof = NULL; + elems->sta_prof_len = 0; + return; + } + + /* + * Skip the capability information and the status code that are expected + * as part of the station profile in association response frames. Note + * the -1 is because the 'sta_info_len' is accounted to as part of the + * per-STA profile, but not part of the 'u8 variable[]' portion. + */ + sub.start = prof->variable + prof->sta_info_len - 1 + 4; + end = (const u8 *)prof + elems->sta_prof_len; + sub.len = end - sub.start; + + non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, + sub.start, sub.len); + _ieee802_11_parse_elems_full(&sub, elems, non_inherit); +} + +struct ieee802_11_elems * +ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params) +{ + struct ieee802_11_elems *elems; + const struct element *non_inherit = NULL; + u8 *nontransmitted_profile; + int nontransmitted_profile_len = 0; + size_t scratch_len = 3 * params->len; + + elems = kzalloc(sizeof(*elems) + scratch_len, GFP_ATOMIC); + if (!elems) + return NULL; + elems->ie_start = params->start; + elems->total_len = params->len; + elems->scratch_len = scratch_len; + elems->scratch_pos = elems->scratch; + + nontransmitted_profile = elems->scratch_pos; + nontransmitted_profile_len = + ieee802_11_find_bssid_profile(params->start, params->len, + elems, params->bss, + nontransmitted_profile); + elems->scratch_pos += nontransmitted_profile_len; + elems->scratch_len -= nontransmitted_profile_len; + non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, + nontransmitted_profile, + nontransmitted_profile_len); + + elems->crc = _ieee802_11_parse_elems_full(params, elems, non_inherit); + + /* Override with nontransmitted profile, if found */ + if (nontransmitted_profile_len) { + struct ieee80211_elems_parse_params sub = { + .start = nontransmitted_profile, + .len = nontransmitted_profile_len, + .action = params->action, + .link_id = params->link_id, + }; + + _ieee802_11_parse_elems_full(&sub, elems, NULL); + } + + ieee80211_mle_parse_link(elems, params); + + if (elems->tim && !elems->parse_error) { + const struct ieee80211_tim_ie *tim_ie = elems->tim; + + elems->dtim_period = tim_ie->dtim_period; + elems->dtim_count = tim_ie->dtim_count; + } + + /* Override DTIM period and count if needed */ + if (elems->bssid_index && + elems->bssid_index_len >= + offsetofend(struct ieee80211_bssid_index, dtim_period)) + elems->dtim_period = elems->bssid_index->dtim_period; + + if (elems->bssid_index && + elems->bssid_index_len >= + offsetofend(struct ieee80211_bssid_index, dtim_count)) + elems->dtim_count = elems->bssid_index->dtim_count; + + return elems; +} + +void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, + struct ieee80211_tx_queue_params + *qparam, int ac) +{ + struct ieee80211_chanctx_conf *chanctx_conf; + const struct ieee80211_reg_rule *rrule; + const struct ieee80211_wmm_ac *wmm_ac; + u16 center_freq = 0; + + if (sdata->vif.type != NL80211_IFTYPE_AP && + sdata->vif.type != NL80211_IFTYPE_STATION) + return; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); + if (chanctx_conf) + center_freq = chanctx_conf->def.chan->center_freq; + + if (!center_freq) { + rcu_read_unlock(); + return; + } + + rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq)); + + if (IS_ERR_OR_NULL(rrule) || !rrule->has_wmm) { + rcu_read_unlock(); + return; + } + + if (sdata->vif.type == NL80211_IFTYPE_AP) + wmm_ac = &rrule->wmm_rule.ap[ac]; + else + wmm_ac = &rrule->wmm_rule.client[ac]; + qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min); + qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max); + qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn); + qparam->txop = min_t(u16, qparam->txop, wmm_ac->cot / 32); + rcu_read_unlock(); +} + +void ieee80211_set_wmm_default(struct ieee80211_link_data *link, + bool bss_notify, bool enable_qos) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_tx_queue_params qparam; + struct ieee80211_chanctx_conf *chanctx_conf; + int ac; + bool use_11b; + bool is_ocb; /* Use another EDCA parameters if dot11OCBActivated=true */ + int aCWmin, aCWmax; + + if (!local->ops->conf_tx) + return; + + if (local->hw.queues < IEEE80211_NUM_ACS) + return; + + memset(&qparam, 0, sizeof(qparam)); + + rcu_read_lock(); + chanctx_conf = rcu_dereference(link->conf->chanctx_conf); + use_11b = (chanctx_conf && + chanctx_conf->def.chan->band == NL80211_BAND_2GHZ) && + !link->operating_11g_mode; + rcu_read_unlock(); + + is_ocb = (sdata->vif.type == NL80211_IFTYPE_OCB); + + /* Set defaults according to 802.11-2007 Table 7-37 */ + aCWmax = 1023; + if (use_11b) + aCWmin = 31; + else + aCWmin = 15; + + /* Confiure old 802.11b/g medium access rules. */ + qparam.cw_max = aCWmax; + qparam.cw_min = aCWmin; + qparam.txop = 0; + qparam.aifs = 2; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + /* Update if QoS is enabled. */ + if (enable_qos) { + switch (ac) { + case IEEE80211_AC_BK: + qparam.cw_max = aCWmax; + qparam.cw_min = aCWmin; + qparam.txop = 0; + if (is_ocb) + qparam.aifs = 9; + else + qparam.aifs = 7; + break; + /* never happens but let's not leave undefined */ + default: + case IEEE80211_AC_BE: + qparam.cw_max = aCWmax; + qparam.cw_min = aCWmin; + qparam.txop = 0; + if (is_ocb) + qparam.aifs = 6; + else + qparam.aifs = 3; + break; + case IEEE80211_AC_VI: + qparam.cw_max = aCWmin; + qparam.cw_min = (aCWmin + 1) / 2 - 1; + if (is_ocb) + qparam.txop = 0; + else if (use_11b) + qparam.txop = 6016/32; + else + qparam.txop = 3008/32; + + if (is_ocb) + qparam.aifs = 3; + else + qparam.aifs = 2; + break; + case IEEE80211_AC_VO: + qparam.cw_max = (aCWmin + 1) / 2 - 1; + qparam.cw_min = (aCWmin + 1) / 4 - 1; + if (is_ocb) + qparam.txop = 0; + else if (use_11b) + qparam.txop = 3264/32; + else + qparam.txop = 1504/32; + qparam.aifs = 2; + break; + } + } + ieee80211_regulatory_limit_wmm_params(sdata, &qparam, ac); + + qparam.uapsd = false; + + link->tx_conf[ac] = qparam; + drv_conf_tx(local, link, ac, &qparam); + } + + if (sdata->vif.type != NL80211_IFTYPE_MONITOR && + sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && + sdata->vif.type != NL80211_IFTYPE_NAN) { + link->conf->qos = enable_qos; + if (bss_notify) + ieee80211_link_info_change_notify(sdata, link, + BSS_CHANGED_QOS); + } +} + +void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, + u16 transaction, u16 auth_alg, u16 status, + const u8 *extra, size_t extra_len, const u8 *da, + const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx, + u32 tx_flags) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + bool multi_link = ieee80211_vif_is_mld(&sdata->vif); + struct { + u8 id; + u8 len; + u8 ext_id; + struct ieee80211_multi_link_elem ml; + struct ieee80211_mle_basic_common_info basic; + } __packed mle = { + .id = WLAN_EID_EXTENSION, + .len = sizeof(mle) - 2, + .ext_id = WLAN_EID_EXT_EHT_MULTI_LINK, + .ml.control = cpu_to_le16(IEEE80211_ML_CONTROL_TYPE_BASIC), + .basic.len = sizeof(mle.basic), + }; + int err; + + memcpy(mle.basic.mld_mac_addr, sdata->vif.addr, ETH_ALEN); + + /* 24 + 6 = header + auth_algo + auth_transaction + status_code */ + skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN + + 24 + 6 + extra_len + IEEE80211_WEP_ICV_LEN + + multi_link * sizeof(mle)); + if (!skb) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN); + + mgmt = skb_put_zero(skb, 24 + 6); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_AUTH); + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, bssid, ETH_ALEN); + mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg); + mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); + mgmt->u.auth.status_code = cpu_to_le16(status); + if (extra) + skb_put_data(skb, extra, extra_len); + if (multi_link) + skb_put_data(skb, &mle, sizeof(mle)); + + if (auth_alg == WLAN_AUTH_SHARED_KEY && transaction == 3) { + mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); + err = ieee80211_wep_encrypt(local, skb, key, key_len, key_idx); + if (WARN_ON(err)) { + kfree_skb(skb); + return; + } + } + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | + tx_flags; + ieee80211_tx_skb(sdata, skb); +} + +void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, + const u8 *da, const u8 *bssid, + u16 stype, u16 reason, + bool send_frame, u8 *frame_buf) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt = (void *)frame_buf; + + /* build frame */ + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype); + mgmt->duration = 0; /* initialize only */ + mgmt->seq_ctrl = 0; /* initialize only */ + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, bssid, ETH_ALEN); + /* u.deauth.reason_code == u.disassoc.reason_code */ + mgmt->u.deauth.reason_code = cpu_to_le16(reason); + + if (send_frame) { + skb = dev_alloc_skb(local->hw.extra_tx_headroom + + IEEE80211_DEAUTH_FRAME_LEN); + if (!skb) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom); + + /* copy in frame */ + skb_put_data(skb, mgmt, IEEE80211_DEAUTH_FRAME_LEN); + + if (sdata->vif.type != NL80211_IFTYPE_STATION || + !(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED)) + IEEE80211_SKB_CB(skb)->flags |= + IEEE80211_TX_INTFL_DONT_ENCRYPT; + + ieee80211_tx_skb(sdata, skb); + } +} + +u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end) +{ + if ((end - pos) < 5) + return pos; + + *pos++ = WLAN_EID_EXTENSION; + *pos++ = 1 + sizeof(cap); + *pos++ = WLAN_EID_EXT_HE_6GHZ_CAPA; + memcpy(pos, &cap, sizeof(cap)); + + return pos + 2; +} + +static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata, + u8 *buffer, size_t buffer_len, + const u8 *ie, size_t ie_len, + enum nl80211_band band, + u32 rate_mask, + struct cfg80211_chan_def *chandef, + size_t *offset, u32 flags) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + const struct ieee80211_sta_he_cap *he_cap; + const struct ieee80211_sta_eht_cap *eht_cap; + u8 *pos = buffer, *end = buffer + buffer_len; + size_t noffset; + int supp_rates_len, i; + u8 rates[32]; + int num_rates; + int ext_rates_len; + int shift; + u32 rate_flags; + bool have_80mhz = false; + + *offset = 0; + + sband = local->hw.wiphy->bands[band]; + if (WARN_ON_ONCE(!sband)) + return 0; + + rate_flags = ieee80211_chandef_rate_flags(chandef); + shift = ieee80211_chandef_get_shift(chandef); + + /* For direct scan add S1G IE and consider its override bits */ + if (band == NL80211_BAND_S1GHZ) { + if (end - pos < 2 + sizeof(struct ieee80211_s1g_cap)) + goto out_err; + pos = ieee80211_ie_build_s1g_cap(pos, &sband->s1g_cap); + goto done; + } + + num_rates = 0; + for (i = 0; i < sband->n_bitrates; i++) { + if ((BIT(i) & rate_mask) == 0) + continue; /* skip rate */ + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + + rates[num_rates++] = + (u8) DIV_ROUND_UP(sband->bitrates[i].bitrate, + (1 << shift) * 5); + } + + supp_rates_len = min_t(int, num_rates, 8); + + if (end - pos < 2 + supp_rates_len) + goto out_err; + *pos++ = WLAN_EID_SUPP_RATES; + *pos++ = supp_rates_len; + memcpy(pos, rates, supp_rates_len); + pos += supp_rates_len; + + /* insert "request information" if in custom IEs */ + if (ie && ie_len) { + static const u8 before_extrates[] = { + WLAN_EID_SSID, + WLAN_EID_SUPP_RATES, + WLAN_EID_REQUEST, + }; + noffset = ieee80211_ie_split(ie, ie_len, + before_extrates, + ARRAY_SIZE(before_extrates), + *offset); + if (end - pos < noffset - *offset) + goto out_err; + memcpy(pos, ie + *offset, noffset - *offset); + pos += noffset - *offset; + *offset = noffset; + } + + ext_rates_len = num_rates - supp_rates_len; + if (ext_rates_len > 0) { + if (end - pos < 2 + ext_rates_len) + goto out_err; + *pos++ = WLAN_EID_EXT_SUPP_RATES; + *pos++ = ext_rates_len; + memcpy(pos, rates + supp_rates_len, ext_rates_len); + pos += ext_rates_len; + } + + if (chandef->chan && sband->band == NL80211_BAND_2GHZ) { + if (end - pos < 3) + goto out_err; + *pos++ = WLAN_EID_DS_PARAMS; + *pos++ = 1; + *pos++ = ieee80211_frequency_to_channel( + chandef->chan->center_freq); + } + + if (flags & IEEE80211_PROBE_FLAG_MIN_CONTENT) + goto done; + + /* insert custom IEs that go before HT */ + if (ie && ie_len) { + static const u8 before_ht[] = { + /* + * no need to list the ones split off already + * (or generated here) + */ + WLAN_EID_DS_PARAMS, + WLAN_EID_SUPPORTED_REGULATORY_CLASSES, + }; + noffset = ieee80211_ie_split(ie, ie_len, + before_ht, ARRAY_SIZE(before_ht), + *offset); + if (end - pos < noffset - *offset) + goto out_err; + memcpy(pos, ie + *offset, noffset - *offset); + pos += noffset - *offset; + *offset = noffset; + } + + if (sband->ht_cap.ht_supported) { + if (end - pos < 2 + sizeof(struct ieee80211_ht_cap)) + goto out_err; + pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, + sband->ht_cap.cap); + } + + /* insert custom IEs that go before VHT */ + if (ie && ie_len) { + static const u8 before_vht[] = { + /* + * no need to list the ones split off already + * (or generated here) + */ + WLAN_EID_BSS_COEX_2040, + WLAN_EID_EXT_CAPABILITY, + WLAN_EID_SSID_LIST, + WLAN_EID_CHANNEL_USAGE, + WLAN_EID_INTERWORKING, + WLAN_EID_MESH_ID, + /* 60 GHz (Multi-band, DMG, MMS) can't happen */ + }; + noffset = ieee80211_ie_split(ie, ie_len, + before_vht, ARRAY_SIZE(before_vht), + *offset); + if (end - pos < noffset - *offset) + goto out_err; + memcpy(pos, ie + *offset, noffset - *offset); + pos += noffset - *offset; + *offset = noffset; + } + + /* Check if any channel in this sband supports at least 80 MHz */ + for (i = 0; i < sband->n_channels; i++) { + if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED | + IEEE80211_CHAN_NO_80MHZ)) + continue; + + have_80mhz = true; + break; + } + + if (sband->vht_cap.vht_supported && have_80mhz) { + if (end - pos < 2 + sizeof(struct ieee80211_vht_cap)) + goto out_err; + pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap, + sband->vht_cap.cap); + } + + /* insert custom IEs that go before HE */ + if (ie && ie_len) { + static const u8 before_he[] = { + /* + * no need to list the ones split off before VHT + * or generated here + */ + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_REQ_PARAMS, + WLAN_EID_AP_CSN, + /* TODO: add 11ah/11aj/11ak elements */ + }; + noffset = ieee80211_ie_split(ie, ie_len, + before_he, ARRAY_SIZE(before_he), + *offset); + if (end - pos < noffset - *offset) + goto out_err; + memcpy(pos, ie + *offset, noffset - *offset); + pos += noffset - *offset; + *offset = noffset; + } + + he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); + if (he_cap && + cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band), + IEEE80211_CHAN_NO_HE)) { + pos = ieee80211_ie_build_he_cap(0, pos, he_cap, end); + if (!pos) + goto out_err; + } + + eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif); + + if (eht_cap && + cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band), + IEEE80211_CHAN_NO_HE | + IEEE80211_CHAN_NO_EHT)) { + pos = ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, end, + sdata->vif.type == NL80211_IFTYPE_AP); + if (!pos) + goto out_err; + } + + if (cfg80211_any_usable_channels(local->hw.wiphy, + BIT(NL80211_BAND_6GHZ), + IEEE80211_CHAN_NO_HE)) { + struct ieee80211_supported_band *sband6; + + sband6 = local->hw.wiphy->bands[NL80211_BAND_6GHZ]; + he_cap = ieee80211_get_he_iftype_cap_vif(sband6, &sdata->vif); + + if (he_cap) { + enum nl80211_iftype iftype = + ieee80211_vif_type_p2p(&sdata->vif); + __le16 cap = ieee80211_get_he_6ghz_capa(sband6, iftype); + + pos = ieee80211_write_he_6ghz_cap(pos, cap, end); + } + } + + /* + * If adding more here, adjust code in main.c + * that calculates local->scan_ies_len. + */ + + return pos - buffer; + out_err: + WARN_ONCE(1, "not enough space for preq IEs\n"); + done: + return pos - buffer; +} + +int ieee80211_build_preq_ies(struct ieee80211_sub_if_data *sdata, u8 *buffer, + size_t buffer_len, + struct ieee80211_scan_ies *ie_desc, + const u8 *ie, size_t ie_len, + u8 bands_used, u32 *rate_masks, + struct cfg80211_chan_def *chandef, + u32 flags) +{ + size_t pos = 0, old_pos = 0, custom_ie_offset = 0; + int i; + + memset(ie_desc, 0, sizeof(*ie_desc)); + + for (i = 0; i < NUM_NL80211_BANDS; i++) { + if (bands_used & BIT(i)) { + pos += ieee80211_build_preq_ies_band(sdata, + buffer + pos, + buffer_len - pos, + ie, ie_len, i, + rate_masks[i], + chandef, + &custom_ie_offset, + flags); + ie_desc->ies[i] = buffer + old_pos; + ie_desc->len[i] = pos - old_pos; + old_pos = pos; + } + } + + /* add any remaining custom IEs */ + if (ie && ie_len) { + if (WARN_ONCE(buffer_len - pos < ie_len - custom_ie_offset, + "not enough space for preq custom IEs\n")) + return pos; + memcpy(buffer + pos, ie + custom_ie_offset, + ie_len - custom_ie_offset); + ie_desc->common_ies = buffer + pos; + ie_desc->common_ie_len = ie_len - custom_ie_offset; + pos += ie_len - custom_ie_offset; + } + + return pos; +}; + +struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, + const u8 *src, const u8 *dst, + u32 ratemask, + struct ieee80211_channel *chan, + const u8 *ssid, size_t ssid_len, + const u8 *ie, size_t ie_len, + u32 flags) +{ + struct ieee80211_local *local = sdata->local; + struct cfg80211_chan_def chandef; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + int ies_len; + u32 rate_masks[NUM_NL80211_BANDS] = {}; + struct ieee80211_scan_ies dummy_ie_desc; + + /* + * Do not send DS Channel parameter for directed probe requests + * in order to maximize the chance that we get a response. Some + * badly-behaved APs don't respond when this parameter is included. + */ + chandef.width = sdata->vif.bss_conf.chandef.width; + if (flags & IEEE80211_PROBE_FLAG_DIRECTED) + chandef.chan = NULL; + else + chandef.chan = chan; + + skb = ieee80211_probereq_get(&local->hw, src, ssid, ssid_len, + local->scan_ies_len + ie_len); + if (!skb) + return NULL; + + rate_masks[chan->band] = ratemask; + ies_len = ieee80211_build_preq_ies(sdata, skb_tail_pointer(skb), + skb_tailroom(skb), &dummy_ie_desc, + ie, ie_len, BIT(chan->band), + rate_masks, &chandef, flags); + skb_put(skb, ies_len); + + if (dst) { + mgmt = (struct ieee80211_mgmt *) skb->data; + memcpy(mgmt->da, dst, ETH_ALEN); + memcpy(mgmt->bssid, dst, ETH_ALEN); + } + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + + return skb; +} + +u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *elems, + enum nl80211_band band, u32 *basic_rates) +{ + struct ieee80211_supported_band *sband; + size_t num_rates; + u32 supp_rates, rate_flags; + int i, j, shift; + + sband = sdata->local->hw.wiphy->bands[band]; + if (WARN_ON(!sband)) + return 1; + + rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); + shift = ieee80211_vif_get_shift(&sdata->vif); + + num_rates = sband->n_bitrates; + supp_rates = 0; + for (i = 0; i < elems->supp_rates_len + + elems->ext_supp_rates_len; i++) { + u8 rate = 0; + int own_rate; + bool is_basic; + if (i < elems->supp_rates_len) + rate = elems->supp_rates[i]; + else if (elems->ext_supp_rates) + rate = elems->ext_supp_rates + [i - elems->supp_rates_len]; + own_rate = 5 * (rate & 0x7f); + is_basic = !!(rate & 0x80); + + if (is_basic && (rate & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY) + continue; + + for (j = 0; j < num_rates; j++) { + int brate; + if ((rate_flags & sband->bitrates[j].flags) + != rate_flags) + continue; + + brate = DIV_ROUND_UP(sband->bitrates[j].bitrate, + 1 << shift); + + if (brate == own_rate) { + supp_rates |= BIT(j); + if (basic_rates && is_basic) + *basic_rates |= BIT(j); + } + } + } + return supp_rates; +} + +void ieee80211_stop_device(struct ieee80211_local *local) +{ + ieee80211_led_radio(local, false); + ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO); + + cancel_work_sync(&local->reconfig_filter); + + flush_workqueue(local->workqueue); + drv_stop(local); +} + +static void ieee80211_flush_completed_scan(struct ieee80211_local *local, + bool aborted) +{ + /* It's possible that we don't handle the scan completion in + * time during suspend, so if it's still marked as completed + * here, queue the work and flush it to clean things up. + * Instead of calling the worker function directly here, we + * really queue it to avoid potential races with other flows + * scheduling the same work. + */ + if (test_bit(SCAN_COMPLETED, &local->scanning)) { + /* If coming from reconfiguration failure, abort the scan so + * we don't attempt to continue a partial HW scan - which is + * possible otherwise if (e.g.) the 2.4 GHz portion was the + * completed scan, and a 5 GHz portion is still pending. + */ + if (aborted) + set_bit(SCAN_ABORTED, &local->scanning); + wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0); + wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work); + } +} + +static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_chanctx *ctx; + + /* + * We get here if during resume the device can't be restarted properly. + * We might also get here if this happens during HW reset, which is a + * slightly different situation and we need to drop all connections in + * the latter case. + * + * Ask cfg80211 to turn off all interfaces, this will result in more + * warnings but at least we'll then get into a clean stopped state. + */ + + local->resuming = false; + local->suspended = false; + local->in_reconfig = false; + local->reconfig_failure = true; + + ieee80211_flush_completed_scan(local, true); + + /* scheduled scan clearly can't be running any more, but tell + * cfg80211 and clear local state + */ + ieee80211_sched_scan_end(local); + + list_for_each_entry(sdata, &local->interfaces, list) + sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER; + + /* Mark channel contexts as not being in the driver any more to avoid + * removing them from the driver during the shutdown process... + */ + mutex_lock(&local->chanctx_mtx); + list_for_each_entry(ctx, &local->chanctx_list, list) + ctx->driver_present = false; + mutex_unlock(&local->chanctx_mtx); +} + +static void ieee80211_assign_chanctx(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link) +{ + struct ieee80211_chanctx_conf *conf; + struct ieee80211_chanctx *ctx; + + if (!local->use_chanctx) + return; + + mutex_lock(&local->chanctx_mtx); + conf = rcu_dereference_protected(link->conf->chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + if (conf) { + ctx = container_of(conf, struct ieee80211_chanctx, conf); + drv_assign_vif_chanctx(local, sdata, link->conf, ctx); + } + mutex_unlock(&local->chanctx_mtx); +} + +static void ieee80211_reconfig_stations(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + /* add STAs back */ + mutex_lock(&local->sta_mtx); + list_for_each_entry(sta, &local->sta_list, list) { + enum ieee80211_sta_state state; + + if (!sta->uploaded || sta->sdata != sdata) + continue; + + for (state = IEEE80211_STA_NOTEXIST; + state < sta->sta_state; state++) + WARN_ON(drv_sta_state(local, sta->sdata, sta, state, + state + 1)); + } + mutex_unlock(&local->sta_mtx); +} + +static int ieee80211_reconfig_nan(struct ieee80211_sub_if_data *sdata) +{ + struct cfg80211_nan_func *func, **funcs; + int res, id, i = 0; + + res = drv_start_nan(sdata->local, sdata, + &sdata->u.nan.conf); + if (WARN_ON(res)) + return res; + + funcs = kcalloc(sdata->local->hw.max_nan_de_entries + 1, + sizeof(*funcs), + GFP_KERNEL); + if (!funcs) + return -ENOMEM; + + /* Add all the functions: + * This is a little bit ugly. We need to call a potentially sleeping + * callback for each NAN function, so we can't hold the spinlock. + */ + spin_lock_bh(&sdata->u.nan.func_lock); + + idr_for_each_entry(&sdata->u.nan.function_inst_ids, func, id) + funcs[i++] = func; + + spin_unlock_bh(&sdata->u.nan.func_lock); + + for (i = 0; funcs[i]; i++) { + res = drv_add_nan_func(sdata->local, sdata, funcs[i]); + if (WARN_ON(res)) + ieee80211_nan_func_terminated(&sdata->vif, + funcs[i]->instance_id, + NL80211_NAN_FUNC_TERM_REASON_ERROR, + GFP_KERNEL); + } + + kfree(funcs); + + return 0; +} + +static void ieee80211_reconfig_ap_links(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u64 changed) +{ + int link_id; + + for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { + struct ieee80211_link_data *link; + + if (!(sdata->vif.active_links & BIT(link_id))) + continue; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (!link) + continue; + + if (rcu_access_pointer(link->u.ap.beacon)) + drv_start_ap(local, sdata, link->conf); + + if (!link->conf->enable_beacon) + continue; + + changed |= BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED; + + ieee80211_link_info_change_notify(sdata, link, changed); + } +} + +int ieee80211_reconfig(struct ieee80211_local *local) +{ + struct ieee80211_hw *hw = &local->hw; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_chanctx *ctx; + struct sta_info *sta; + int res, i; + bool reconfig_due_to_wowlan = false; + struct ieee80211_sub_if_data *sched_scan_sdata; + struct cfg80211_sched_scan_request *sched_scan_req; + bool sched_scan_stopped = false; + bool suspended = local->suspended; + bool in_reconfig = false; + + /* nothing to do if HW shouldn't run */ + if (!local->open_count) + goto wake_up; + +#ifdef CONFIG_PM + if (suspended) + local->resuming = true; + + if (local->wowlan) { + /* + * In the wowlan case, both mac80211 and the device + * are functional when the resume op is called, so + * clear local->suspended so the device could operate + * normally (e.g. pass rx frames). + */ + local->suspended = false; + res = drv_resume(local); + local->wowlan = false; + if (res < 0) { + local->resuming = false; + return res; + } + if (res == 0) + goto wake_up; + WARN_ON(res > 1); + /* + * res is 1, which means the driver requested + * to go through a regular reset on wakeup. + * restore local->suspended in this case. + */ + reconfig_due_to_wowlan = true; + local->suspended = true; + } +#endif + + /* + * In case of hw_restart during suspend (without wowlan), + * cancel restart work, as we are reconfiguring the device + * anyway. + * Note that restart_work is scheduled on a frozen workqueue, + * so we can't deadlock in this case. + */ + if (suspended && local->in_reconfig && !reconfig_due_to_wowlan) + cancel_work_sync(&local->restart_work); + + local->started = false; + + /* + * Upon resume hardware can sometimes be goofy due to + * various platform / driver / bus issues, so restarting + * the device may at times not work immediately. Propagate + * the error. + */ + res = drv_start(local); + if (res) { + if (suspended) + WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n"); + else + WARN(1, "Hardware became unavailable during restart.\n"); + ieee80211_handle_reconfig_failure(local); + return res; + } + + /* setup fragmentation threshold */ + drv_set_frag_threshold(local, hw->wiphy->frag_threshold); + + /* setup RTS threshold */ + drv_set_rts_threshold(local, hw->wiphy->rts_threshold); + + /* reset coverage class */ + drv_set_coverage_class(local, hw->wiphy->coverage_class); + + ieee80211_led_radio(local, true); + ieee80211_mod_tpt_led_trig(local, + IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); + + /* add interfaces */ + sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata); + if (sdata) { + /* in HW restart it exists already */ + WARN_ON(local->resuming); + res = drv_add_interface(local, sdata); + if (WARN_ON(res)) { + RCU_INIT_POINTER(local->monitor_sdata, NULL); + synchronize_net(); + kfree(sdata); + } + } + + list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && + sdata->vif.type != NL80211_IFTYPE_MONITOR && + ieee80211_sdata_running(sdata)) { + res = drv_add_interface(local, sdata); + if (WARN_ON(res)) + break; + } + } + + /* If adding any of the interfaces failed above, roll back and + * report failure. + */ + if (res) { + list_for_each_entry_continue_reverse(sdata, &local->interfaces, + list) + if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && + sdata->vif.type != NL80211_IFTYPE_MONITOR && + ieee80211_sdata_running(sdata)) + drv_remove_interface(local, sdata); + ieee80211_handle_reconfig_failure(local); + return res; + } + + /* add channel contexts */ + if (local->use_chanctx) { + mutex_lock(&local->chanctx_mtx); + list_for_each_entry(ctx, &local->chanctx_list, list) + if (ctx->replace_state != + IEEE80211_CHANCTX_REPLACES_OTHER) + WARN_ON(drv_add_chanctx(local, ctx)); + mutex_unlock(&local->chanctx_mtx); + + sdata = wiphy_dereference(local->hw.wiphy, + local->monitor_sdata); + if (sdata && ieee80211_sdata_running(sdata)) + ieee80211_assign_chanctx(local, sdata, &sdata->deflink); + } + + /* reconfigure hardware */ + ieee80211_hw_config(local, ~0); + + ieee80211_configure_filter(local); + + /* Finally also reconfigure all the BSS information */ + list_for_each_entry(sdata, &local->interfaces, list) { + /* common change flags for all interface types - link only */ + u64 changed = BSS_CHANGED_ERP_CTS_PROT | + BSS_CHANGED_ERP_PREAMBLE | + BSS_CHANGED_ERP_SLOT | + BSS_CHANGED_HT | + BSS_CHANGED_BASIC_RATES | + BSS_CHANGED_BEACON_INT | + BSS_CHANGED_BSSID | + BSS_CHANGED_CQM | + BSS_CHANGED_QOS | + BSS_CHANGED_TXPOWER | + BSS_CHANGED_MCAST_RATE; + struct ieee80211_link_data *link = NULL; + unsigned int link_id; + u32 active_links = 0; + + if (!ieee80211_sdata_running(sdata)) + continue; + + sdata_lock(sdata); + if (ieee80211_vif_is_mld(&sdata->vif)) { + struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS] = { + [0] = &sdata->vif.bss_conf, + }; + + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + /* start with a single active link */ + active_links = sdata->vif.active_links; + link_id = ffs(active_links) - 1; + sdata->vif.active_links = BIT(link_id); + } + + drv_change_vif_links(local, sdata, 0, + sdata->vif.active_links, + old); + } + + for (link_id = 0; + link_id < ARRAY_SIZE(sdata->vif.link_conf); + link_id++) { + if (ieee80211_vif_is_mld(&sdata->vif) && + !(sdata->vif.active_links & BIT(link_id))) + continue; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (!link) + continue; + + ieee80211_assign_chanctx(local, sdata, link); + } + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_MONITOR: + break; + case NL80211_IFTYPE_ADHOC: + if (sdata->vif.cfg.ibss_joined) + WARN_ON(drv_join_ibss(local, sdata)); + fallthrough; + default: + ieee80211_reconfig_stations(sdata); + fallthrough; + case NL80211_IFTYPE_AP: /* AP stations are handled later */ + for (i = 0; i < IEEE80211_NUM_ACS; i++) + drv_conf_tx(local, &sdata->deflink, i, + &sdata->deflink.tx_conf[i]); + break; + } + + if (sdata->vif.bss_conf.mu_mimo_owner) + changed |= BSS_CHANGED_MU_GROUPS; + + if (!ieee80211_vif_is_mld(&sdata->vif)) + changed |= BSS_CHANGED_IDLE; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + if (!ieee80211_vif_is_mld(&sdata->vif)) { + changed |= BSS_CHANGED_ASSOC | + BSS_CHANGED_ARP_FILTER | + BSS_CHANGED_PS; + + /* Re-send beacon info report to the driver */ + if (sdata->deflink.u.mgd.have_beacon) + changed |= BSS_CHANGED_BEACON_INFO; + + if (sdata->vif.bss_conf.max_idle_period || + sdata->vif.bss_conf.protected_keep_alive) + changed |= BSS_CHANGED_KEEP_ALIVE; + + if (sdata->vif.bss_conf.eht_puncturing) + changed |= BSS_CHANGED_EHT_PUNCTURING; + + ieee80211_bss_info_change_notify(sdata, + changed); + } else if (!WARN_ON(!link)) { + ieee80211_link_info_change_notify(sdata, link, + changed); + changed = BSS_CHANGED_ASSOC | + BSS_CHANGED_IDLE | + BSS_CHANGED_PS | + BSS_CHANGED_ARP_FILTER; + ieee80211_vif_cfg_change_notify(sdata, changed); + } + break; + case NL80211_IFTYPE_OCB: + changed |= BSS_CHANGED_OCB; + ieee80211_bss_info_change_notify(sdata, changed); + break; + case NL80211_IFTYPE_ADHOC: + changed |= BSS_CHANGED_IBSS; + fallthrough; + case NL80211_IFTYPE_AP: + changed |= BSS_CHANGED_P2P_PS; + + if (ieee80211_vif_is_mld(&sdata->vif)) + ieee80211_vif_cfg_change_notify(sdata, + BSS_CHANGED_SSID); + else + changed |= BSS_CHANGED_SSID; + + if (sdata->vif.bss_conf.ftm_responder == 1 && + wiphy_ext_feature_isset(sdata->local->hw.wiphy, + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER)) + changed |= BSS_CHANGED_FTM_RESPONDER; + + if (sdata->vif.type == NL80211_IFTYPE_AP) { + changed |= BSS_CHANGED_AP_PROBE_RESP; + + if (ieee80211_vif_is_mld(&sdata->vif)) { + ieee80211_reconfig_ap_links(local, + sdata, + changed); + break; + } + + if (rcu_access_pointer(sdata->deflink.u.ap.beacon)) + drv_start_ap(local, sdata, + sdata->deflink.conf); + } + fallthrough; + case NL80211_IFTYPE_MESH_POINT: + if (sdata->vif.bss_conf.enable_beacon) { + changed |= BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED; + ieee80211_bss_info_change_notify(sdata, changed); + } + break; + case NL80211_IFTYPE_NAN: + res = ieee80211_reconfig_nan(sdata); + if (res < 0) { + sdata_unlock(sdata); + ieee80211_handle_reconfig_failure(local); + return res; + } + break; + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_P2P_DEVICE: + /* nothing to do */ + break; + case NL80211_IFTYPE_UNSPECIFIED: + case NUM_NL80211_IFTYPES: + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_WDS: + WARN_ON(1); + break; + } + sdata_unlock(sdata); + + if (active_links) + ieee80211_set_active_links(&sdata->vif, active_links); + } + + ieee80211_recalc_ps(local); + + /* + * The sta might be in psm against the ap (e.g. because + * this was the state before a hw restart), so we + * explicitly send a null packet in order to make sure + * it'll sync against the ap (and get out of psm). + */ + if (!(local->hw.conf.flags & IEEE80211_CONF_PS)) { + list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata->vif.type != NL80211_IFTYPE_STATION) + continue; + if (!sdata->u.mgd.associated) + continue; + + ieee80211_send_nullfunc(local, sdata, false); + } + } + + /* APs are now beaconing, add back stations */ + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + + sdata_lock(sdata); + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_AP: + ieee80211_reconfig_stations(sdata); + break; + default: + break; + } + sdata_unlock(sdata); + } + + /* add back keys */ + list_for_each_entry(sdata, &local->interfaces, list) + ieee80211_reenable_keys(sdata); + + /* Reconfigure sched scan if it was interrupted by FW restart */ + mutex_lock(&local->mtx); + sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata, + lockdep_is_held(&local->mtx)); + sched_scan_req = rcu_dereference_protected(local->sched_scan_req, + lockdep_is_held(&local->mtx)); + if (sched_scan_sdata && sched_scan_req) + /* + * Sched scan stopped, but we don't want to report it. Instead, + * we're trying to reschedule. However, if more than one scan + * plan was set, we cannot reschedule since we don't know which + * scan plan was currently running (and some scan plans may have + * already finished). + */ + if (sched_scan_req->n_scan_plans > 1 || + __ieee80211_request_sched_scan_start(sched_scan_sdata, + sched_scan_req)) { + RCU_INIT_POINTER(local->sched_scan_sdata, NULL); + RCU_INIT_POINTER(local->sched_scan_req, NULL); + sched_scan_stopped = true; + } + mutex_unlock(&local->mtx); + + if (sched_scan_stopped) + cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0); + + wake_up: + + if (local->monitors == local->open_count && local->monitors > 0) + ieee80211_add_virtual_monitor(local); + + /* + * Clear the WLAN_STA_BLOCK_BA flag so new aggregation + * sessions can be established after a resume. + * + * Also tear down aggregation sessions since reconfiguring + * them in a hardware restart scenario is not easily done + * right now, and the hardware will have lost information + * about the sessions, but we and the AP still think they + * are active. This is really a workaround though. + */ + if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) { + mutex_lock(&local->sta_mtx); + + list_for_each_entry(sta, &local->sta_list, list) { + if (!local->resuming) + ieee80211_sta_tear_down_BA_sessions( + sta, AGG_STOP_LOCAL_REQUEST); + clear_sta_flag(sta, WLAN_STA_BLOCK_BA); + } + + mutex_unlock(&local->sta_mtx); + } + + /* + * If this is for hw restart things are still running. + * We may want to change that later, however. + */ + if (local->open_count && (!suspended || reconfig_due_to_wowlan)) + drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART); + + if (local->in_reconfig) { + in_reconfig = local->in_reconfig; + local->in_reconfig = false; + barrier(); + + /* Restart deferred ROCs */ + mutex_lock(&local->mtx); + ieee80211_start_next_roc(local); + mutex_unlock(&local->mtx); + + /* Requeue all works */ + list_for_each_entry(sdata, &local->interfaces, list) + wiphy_work_queue(local->hw.wiphy, &sdata->work); + } + + ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, + IEEE80211_QUEUE_STOP_REASON_SUSPEND, + false); + + if (in_reconfig) { + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + if (sdata->vif.type == NL80211_IFTYPE_STATION) + ieee80211_sta_restart(sdata); + } + } + + if (!suspended) + return 0; + +#ifdef CONFIG_PM + /* first set suspended false, then resuming */ + local->suspended = false; + mb(); + local->resuming = false; + + ieee80211_flush_completed_scan(local, false); + + if (local->open_count && !reconfig_due_to_wowlan) + drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND); + + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + if (sdata->vif.type == NL80211_IFTYPE_STATION) + ieee80211_sta_restart(sdata); + } + + mod_timer(&local->sta_cleanup, jiffies + 1); +#else + WARN_ON(1); +#endif + + return 0; +} + +static void ieee80211_reconfig_disconnect(struct ieee80211_vif *vif, u8 flag) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_local *local; + struct ieee80211_key *key; + + if (WARN_ON(!vif)) + return; + + sdata = vif_to_sdata(vif); + local = sdata->local; + + if (WARN_ON(flag & IEEE80211_SDATA_DISCONNECT_RESUME && + !local->resuming)) + return; + + if (WARN_ON(flag & IEEE80211_SDATA_DISCONNECT_HW_RESTART && + !local->in_reconfig)) + return; + + if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) + return; + + sdata->flags |= flag; + + mutex_lock(&local->key_mtx); + list_for_each_entry(key, &sdata->key_list, list) + key->flags |= KEY_FLAG_TAINTED; + mutex_unlock(&local->key_mtx); +} + +void ieee80211_hw_restart_disconnect(struct ieee80211_vif *vif) +{ + ieee80211_reconfig_disconnect(vif, IEEE80211_SDATA_DISCONNECT_HW_RESTART); +} +EXPORT_SYMBOL_GPL(ieee80211_hw_restart_disconnect); + +void ieee80211_resume_disconnect(struct ieee80211_vif *vif) +{ + ieee80211_reconfig_disconnect(vif, IEEE80211_SDATA_DISCONNECT_RESUME); +} +EXPORT_SYMBOL_GPL(ieee80211_resume_disconnect); + +void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_chanctx *chanctx; + + mutex_lock(&local->chanctx_mtx); + + chanctx_conf = rcu_dereference_protected(link->conf->chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + + /* + * This function can be called from a work, thus it may be possible + * that the chanctx_conf is removed (due to a disconnection, for + * example). + * So nothing should be done in such case. + */ + if (!chanctx_conf) + goto unlock; + + chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf); + ieee80211_recalc_smps_chanctx(local, chanctx); + unlock: + mutex_unlock(&local->chanctx_mtx); +} + +void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata, + int link_id) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_chanctx *chanctx; + int i; + + mutex_lock(&local->chanctx_mtx); + + for (i = 0; i < ARRAY_SIZE(sdata->vif.link_conf); i++) { + struct ieee80211_bss_conf *bss_conf; + + if (link_id >= 0 && link_id != i) + continue; + + rcu_read_lock(); + bss_conf = rcu_dereference(sdata->vif.link_conf[i]); + if (!bss_conf) { + rcu_read_unlock(); + continue; + } + + chanctx_conf = rcu_dereference_protected(bss_conf->chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + /* + * Since we hold the chanctx_mtx (checked above) + * we can take the chanctx_conf pointer out of the + * RCU critical section, it cannot go away without + * the mutex. Just the way we reached it could - in + * theory - go away, but we don't really care and + * it really shouldn't happen anyway. + */ + rcu_read_unlock(); + + if (!chanctx_conf) + goto unlock; + + chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, + conf); + ieee80211_recalc_chanctx_min_def(local, chanctx, NULL); + } + unlock: + mutex_unlock(&local->chanctx_mtx); +} + +size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset) +{ + size_t pos = offset; + + while (pos < ielen && ies[pos] != WLAN_EID_VENDOR_SPECIFIC) + pos += 2 + ies[pos + 1]; + + return pos; +} + +u8 *ieee80211_ie_build_s1g_cap(u8 *pos, struct ieee80211_sta_s1g_cap *s1g_cap) +{ + *pos++ = WLAN_EID_S1G_CAPABILITIES; + *pos++ = sizeof(struct ieee80211_s1g_cap); + memset(pos, 0, sizeof(struct ieee80211_s1g_cap)); + + memcpy(pos, &s1g_cap->cap, sizeof(s1g_cap->cap)); + pos += sizeof(s1g_cap->cap); + + memcpy(pos, &s1g_cap->nss_mcs, sizeof(s1g_cap->nss_mcs)); + pos += sizeof(s1g_cap->nss_mcs); + + return pos; +} + +u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, + u16 cap) +{ + __le16 tmp; + + *pos++ = WLAN_EID_HT_CAPABILITY; + *pos++ = sizeof(struct ieee80211_ht_cap); + memset(pos, 0, sizeof(struct ieee80211_ht_cap)); + + /* capability flags */ + tmp = cpu_to_le16(cap); + memcpy(pos, &tmp, sizeof(u16)); + pos += sizeof(u16); + + /* AMPDU parameters */ + *pos++ = ht_cap->ampdu_factor | + (ht_cap->ampdu_density << + IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); + + /* MCS set */ + memcpy(pos, &ht_cap->mcs, sizeof(ht_cap->mcs)); + pos += sizeof(ht_cap->mcs); + + /* extended capabilities */ + pos += sizeof(__le16); + + /* BF capabilities */ + pos += sizeof(__le32); + + /* antenna selection */ + pos += sizeof(u8); + + return pos; +} + +u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, + u32 cap) +{ + __le32 tmp; + + *pos++ = WLAN_EID_VHT_CAPABILITY; + *pos++ = sizeof(struct ieee80211_vht_cap); + memset(pos, 0, sizeof(struct ieee80211_vht_cap)); + + /* capability flags */ + tmp = cpu_to_le32(cap); + memcpy(pos, &tmp, sizeof(u32)); + pos += sizeof(u32); + + /* VHT MCS set */ + memcpy(pos, &vht_cap->vht_mcs, sizeof(vht_cap->vht_mcs)); + pos += sizeof(vht_cap->vht_mcs); + + return pos; +} + +u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype) +{ + const struct ieee80211_sta_he_cap *he_cap; + struct ieee80211_supported_band *sband; + u8 n; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return 0; + + he_cap = ieee80211_get_he_iftype_cap(sband, iftype); + if (!he_cap) + return 0; + + n = ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem); + return 2 + 1 + + sizeof(he_cap->he_cap_elem) + n + + ieee80211_he_ppe_size(he_cap->ppe_thres[0], + he_cap->he_cap_elem.phy_cap_info); +} + +u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos, + const struct ieee80211_sta_he_cap *he_cap, + u8 *end) +{ + struct ieee80211_he_cap_elem elem; + u8 n; + u8 ie_len; + u8 *orig_pos = pos; + + /* Make sure we have place for the IE */ + /* + * TODO: the 1 added is because this temporarily is under the EXTENSION + * IE. Get rid of it when it moves. + */ + if (!he_cap) + return orig_pos; + + /* modify on stack first to calculate 'n' and 'ie_len' correctly */ + elem = he_cap->he_cap_elem; + + if (disable_flags & IEEE80211_CONN_DISABLE_40MHZ) + elem.phy_cap_info[0] &= + ~(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G); + + if (disable_flags & IEEE80211_CONN_DISABLE_160MHZ) + elem.phy_cap_info[0] &= + ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; + + if (disable_flags & IEEE80211_CONN_DISABLE_80P80MHZ) + elem.phy_cap_info[0] &= + ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G; + + n = ieee80211_he_mcs_nss_size(&elem); + ie_len = 2 + 1 + + sizeof(he_cap->he_cap_elem) + n + + ieee80211_he_ppe_size(he_cap->ppe_thres[0], + he_cap->he_cap_elem.phy_cap_info); + + if ((end - pos) < ie_len) + return orig_pos; + + *pos++ = WLAN_EID_EXTENSION; + pos++; /* We'll set the size later below */ + *pos++ = WLAN_EID_EXT_HE_CAPABILITY; + + /* Fixed data */ + memcpy(pos, &elem, sizeof(elem)); + pos += sizeof(elem); + + memcpy(pos, &he_cap->he_mcs_nss_supp, n); + pos += n; + + /* Check if PPE Threshold should be present */ + if ((he_cap->he_cap_elem.phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0) + goto end; + + /* + * Calculate how many PPET16/PPET8 pairs are to come. Algorithm: + * (NSS_M1 + 1) x (num of 1 bits in RU_INDEX_BITMASK) + */ + n = hweight8(he_cap->ppe_thres[0] & + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); + n *= (1 + ((he_cap->ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) >> + IEEE80211_PPE_THRES_NSS_POS)); + + /* + * Each pair is 6 bits, and we need to add the 7 "header" bits to the + * total size. + */ + n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7; + n = DIV_ROUND_UP(n, 8); + + /* Copy PPE Thresholds */ + memcpy(pos, &he_cap->ppe_thres, n); + pos += n; + +end: + orig_pos[1] = (pos - orig_pos) - 2; + return pos; +} + +void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata, + enum ieee80211_smps_mode smps_mode, + struct sk_buff *skb) +{ + struct ieee80211_supported_band *sband; + const struct ieee80211_sband_iftype_data *iftd; + enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif); + u8 *pos; + u16 cap; + + if (!cfg80211_any_usable_channels(sdata->local->hw.wiphy, + BIT(NL80211_BAND_6GHZ), + IEEE80211_CHAN_NO_HE)) + return; + + sband = sdata->local->hw.wiphy->bands[NL80211_BAND_6GHZ]; + + iftd = ieee80211_get_sband_iftype_data(sband, iftype); + if (!iftd) + return; + + /* Check for device HE 6 GHz capability before adding element */ + if (!iftd->he_6ghz_capa.capa) + return; + + cap = le16_to_cpu(iftd->he_6ghz_capa.capa); + cap &= ~IEEE80211_HE_6GHZ_CAP_SM_PS; + + switch (smps_mode) { + case IEEE80211_SMPS_AUTOMATIC: + case IEEE80211_SMPS_NUM_MODES: + WARN_ON(1); + fallthrough; + case IEEE80211_SMPS_OFF: + cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED, + IEEE80211_HE_6GHZ_CAP_SM_PS); + break; + case IEEE80211_SMPS_STATIC: + cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_STATIC, + IEEE80211_HE_6GHZ_CAP_SM_PS); + break; + case IEEE80211_SMPS_DYNAMIC: + cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC, + IEEE80211_HE_6GHZ_CAP_SM_PS); + break; + } + + pos = skb_put(skb, 2 + 1 + sizeof(cap)); + ieee80211_write_he_6ghz_cap(pos, cpu_to_le16(cap), + pos + 2 + 1 + sizeof(cap)); +} + +u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, + const struct cfg80211_chan_def *chandef, + u16 prot_mode, bool rifs_mode) +{ + struct ieee80211_ht_operation *ht_oper; + /* Build HT Information */ + *pos++ = WLAN_EID_HT_OPERATION; + *pos++ = sizeof(struct ieee80211_ht_operation); + ht_oper = (struct ieee80211_ht_operation *)pos; + ht_oper->primary_chan = ieee80211_frequency_to_channel( + chandef->chan->center_freq); + switch (chandef->width) { + case NL80211_CHAN_WIDTH_160: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_40: + if (chandef->center_freq1 > chandef->chan->center_freq) + ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + else + ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; + break; + case NL80211_CHAN_WIDTH_320: + /* HT information element should not be included on 6GHz */ + WARN_ON(1); + return pos; + default: + ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; + break; + } + if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && + chandef->width != NL80211_CHAN_WIDTH_20_NOHT && + chandef->width != NL80211_CHAN_WIDTH_20) + ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; + + if (rifs_mode) + ht_oper->ht_param |= IEEE80211_HT_PARAM_RIFS_MODE; + + ht_oper->operation_mode = cpu_to_le16(prot_mode); + ht_oper->stbc_param = 0x0000; + + /* It seems that Basic MCS set and Supported MCS set + are identical for the first 10 bytes */ + memset(&ht_oper->basic_set, 0, 16); + memcpy(&ht_oper->basic_set, &ht_cap->mcs, 10); + + return pos + sizeof(struct ieee80211_ht_operation); +} + +void ieee80211_ie_build_wide_bw_cs(u8 *pos, + const struct cfg80211_chan_def *chandef) +{ + *pos++ = WLAN_EID_WIDE_BW_CHANNEL_SWITCH; /* EID */ + *pos++ = 3; /* IE length */ + /* New channel width */ + switch (chandef->width) { + case NL80211_CHAN_WIDTH_80: + *pos++ = IEEE80211_VHT_CHANWIDTH_80MHZ; + break; + case NL80211_CHAN_WIDTH_160: + *pos++ = IEEE80211_VHT_CHANWIDTH_160MHZ; + break; + case NL80211_CHAN_WIDTH_80P80: + *pos++ = IEEE80211_VHT_CHANWIDTH_80P80MHZ; + break; + case NL80211_CHAN_WIDTH_320: + /* The behavior is not defined for 320 MHz channels */ + WARN_ON(1); + fallthrough; + default: + *pos++ = IEEE80211_VHT_CHANWIDTH_USE_HT; + } + + /* new center frequency segment 0 */ + *pos++ = ieee80211_frequency_to_channel(chandef->center_freq1); + /* new center frequency segment 1 */ + if (chandef->center_freq2) + *pos++ = ieee80211_frequency_to_channel(chandef->center_freq2); + else + *pos++ = 0; +} + +u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, + const struct cfg80211_chan_def *chandef) +{ + struct ieee80211_vht_operation *vht_oper; + + *pos++ = WLAN_EID_VHT_OPERATION; + *pos++ = sizeof(struct ieee80211_vht_operation); + vht_oper = (struct ieee80211_vht_operation *)pos; + vht_oper->center_freq_seg0_idx = ieee80211_frequency_to_channel( + chandef->center_freq1); + if (chandef->center_freq2) + vht_oper->center_freq_seg1_idx = + ieee80211_frequency_to_channel(chandef->center_freq2); + else + vht_oper->center_freq_seg1_idx = 0x00; + + switch (chandef->width) { + case NL80211_CHAN_WIDTH_160: + /* + * Convert 160 MHz channel width to new style as interop + * workaround. + */ + vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ; + vht_oper->center_freq_seg1_idx = vht_oper->center_freq_seg0_idx; + if (chandef->chan->center_freq < chandef->center_freq1) + vht_oper->center_freq_seg0_idx -= 8; + else + vht_oper->center_freq_seg0_idx += 8; + break; + case NL80211_CHAN_WIDTH_80P80: + /* + * Convert 80+80 MHz channel width to new style as interop + * workaround. + */ + vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ; + break; + case NL80211_CHAN_WIDTH_80: + vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ; + break; + case NL80211_CHAN_WIDTH_320: + /* VHT information element should not be included on 6GHz */ + WARN_ON(1); + return pos; + default: + vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT; + break; + } + + /* don't require special VHT peer rates */ + vht_oper->basic_mcs_set = cpu_to_le16(0xffff); + + return pos + sizeof(struct ieee80211_vht_operation); +} + +u8 *ieee80211_ie_build_he_oper(u8 *pos, struct cfg80211_chan_def *chandef) +{ + struct ieee80211_he_operation *he_oper; + struct ieee80211_he_6ghz_oper *he_6ghz_op; + u32 he_oper_params; + u8 ie_len = 1 + sizeof(struct ieee80211_he_operation); + + if (chandef->chan->band == NL80211_BAND_6GHZ) + ie_len += sizeof(struct ieee80211_he_6ghz_oper); + + *pos++ = WLAN_EID_EXTENSION; + *pos++ = ie_len; + *pos++ = WLAN_EID_EXT_HE_OPERATION; + + he_oper_params = 0; + he_oper_params |= u32_encode_bits(1023, /* disabled */ + IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK); + he_oper_params |= u32_encode_bits(1, + IEEE80211_HE_OPERATION_ER_SU_DISABLE); + he_oper_params |= u32_encode_bits(1, + IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED); + if (chandef->chan->band == NL80211_BAND_6GHZ) + he_oper_params |= u32_encode_bits(1, + IEEE80211_HE_OPERATION_6GHZ_OP_INFO); + + he_oper = (struct ieee80211_he_operation *)pos; + he_oper->he_oper_params = cpu_to_le32(he_oper_params); + + /* don't require special HE peer rates */ + he_oper->he_mcs_nss_set = cpu_to_le16(0xffff); + pos += sizeof(struct ieee80211_he_operation); + + if (chandef->chan->band != NL80211_BAND_6GHZ) + goto out; + + /* TODO add VHT operational */ + he_6ghz_op = (struct ieee80211_he_6ghz_oper *)pos; + he_6ghz_op->minrate = 6; /* 6 Mbps */ + he_6ghz_op->primary = + ieee80211_frequency_to_channel(chandef->chan->center_freq); + he_6ghz_op->ccfs0 = + ieee80211_frequency_to_channel(chandef->center_freq1); + if (chandef->center_freq2) + he_6ghz_op->ccfs1 = + ieee80211_frequency_to_channel(chandef->center_freq2); + else + he_6ghz_op->ccfs1 = 0; + + switch (chandef->width) { + case NL80211_CHAN_WIDTH_320: + /* + * TODO: mesh operation is not defined over 6GHz 320 MHz + * channels. + */ + WARN_ON(1); + break; + case NL80211_CHAN_WIDTH_160: + /* Convert 160 MHz channel width to new style as interop + * workaround. + */ + he_6ghz_op->control = + IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ; + he_6ghz_op->ccfs1 = he_6ghz_op->ccfs0; + if (chandef->chan->center_freq < chandef->center_freq1) + he_6ghz_op->ccfs0 -= 8; + else + he_6ghz_op->ccfs0 += 8; + fallthrough; + case NL80211_CHAN_WIDTH_80P80: + he_6ghz_op->control = + IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ; + break; + case NL80211_CHAN_WIDTH_80: + he_6ghz_op->control = + IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ; + break; + case NL80211_CHAN_WIDTH_40: + he_6ghz_op->control = + IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ; + break; + default: + he_6ghz_op->control = + IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ; + break; + } + + pos += sizeof(struct ieee80211_he_6ghz_oper); + +out: + return pos; +} + +u8 *ieee80211_ie_build_eht_oper(u8 *pos, struct cfg80211_chan_def *chandef, + const struct ieee80211_sta_eht_cap *eht_cap) + +{ + const struct ieee80211_eht_mcs_nss_supp_20mhz_only *eht_mcs_nss = + &eht_cap->eht_mcs_nss_supp.only_20mhz; + struct ieee80211_eht_operation *eht_oper; + struct ieee80211_eht_operation_info *eht_oper_info; + u8 eht_oper_len = offsetof(struct ieee80211_eht_operation, optional); + u8 eht_oper_info_len = + offsetof(struct ieee80211_eht_operation_info, optional); + u8 chan_width = 0; + + *pos++ = WLAN_EID_EXTENSION; + *pos++ = 1 + eht_oper_len + eht_oper_info_len; + *pos++ = WLAN_EID_EXT_EHT_OPERATION; + + eht_oper = (struct ieee80211_eht_operation *)pos; + + memcpy(&eht_oper->basic_mcs_nss, eht_mcs_nss, sizeof(*eht_mcs_nss)); + eht_oper->params |= IEEE80211_EHT_OPER_INFO_PRESENT; + pos += eht_oper_len; + + eht_oper_info = + (struct ieee80211_eht_operation_info *)eht_oper->optional; + + eht_oper_info->ccfs0 = + ieee80211_frequency_to_channel(chandef->center_freq1); + if (chandef->center_freq2) + eht_oper_info->ccfs1 = + ieee80211_frequency_to_channel(chandef->center_freq2); + else + eht_oper_info->ccfs1 = 0; + + switch (chandef->width) { + case NL80211_CHAN_WIDTH_320: + chan_width = IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ; + eht_oper_info->ccfs1 = eht_oper_info->ccfs0; + if (chandef->chan->center_freq < chandef->center_freq1) + eht_oper_info->ccfs0 -= 16; + else + eht_oper_info->ccfs0 += 16; + break; + case NL80211_CHAN_WIDTH_160: + eht_oper_info->ccfs1 = eht_oper_info->ccfs0; + if (chandef->chan->center_freq < chandef->center_freq1) + eht_oper_info->ccfs0 -= 8; + else + eht_oper_info->ccfs0 += 8; + fallthrough; + case NL80211_CHAN_WIDTH_80P80: + chan_width = IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ; + break; + case NL80211_CHAN_WIDTH_80: + chan_width = IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ; + break; + case NL80211_CHAN_WIDTH_40: + chan_width = IEEE80211_EHT_OPER_CHAN_WIDTH_40MHZ; + break; + default: + chan_width = IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ; + break; + } + eht_oper_info->control = chan_width; + pos += eht_oper_info_len; + + /* TODO: eht_oper_info->optional */ + + return pos; +} + +bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper, + struct cfg80211_chan_def *chandef) +{ + enum nl80211_channel_type channel_type; + + if (!ht_oper) + return false; + + switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { + case IEEE80211_HT_PARAM_CHA_SEC_NONE: + channel_type = NL80211_CHAN_HT20; + break; + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + channel_type = NL80211_CHAN_HT40PLUS; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + channel_type = NL80211_CHAN_HT40MINUS; + break; + default: + return false; + } + + cfg80211_chandef_create(chandef, chandef->chan, channel_type); + return true; +} + +bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info, + const struct ieee80211_vht_operation *oper, + const struct ieee80211_ht_operation *htop, + struct cfg80211_chan_def *chandef) +{ + struct cfg80211_chan_def new = *chandef; + int cf0, cf1; + int ccfs0, ccfs1, ccfs2; + int ccf0, ccf1; + u32 vht_cap; + bool support_80_80 = false; + bool support_160 = false; + u8 ext_nss_bw_supp = u32_get_bits(vht_cap_info, + IEEE80211_VHT_CAP_EXT_NSS_BW_MASK); + u8 supp_chwidth = u32_get_bits(vht_cap_info, + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK); + + if (!oper || !htop) + return false; + + vht_cap = hw->wiphy->bands[chandef->chan->band]->vht_cap.cap; + support_160 = (vht_cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK | + IEEE80211_VHT_CAP_EXT_NSS_BW_MASK)); + support_80_80 = ((vht_cap & + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) || + (vht_cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ && + vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) || + ((vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) >> + IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT > 1)); + ccfs0 = oper->center_freq_seg0_idx; + ccfs1 = oper->center_freq_seg1_idx; + ccfs2 = (le16_to_cpu(htop->operation_mode) & + IEEE80211_HT_OP_MODE_CCFS2_MASK) + >> IEEE80211_HT_OP_MODE_CCFS2_SHIFT; + + ccf0 = ccfs0; + + /* if not supported, parse as though we didn't understand it */ + if (!ieee80211_hw_check(hw, SUPPORTS_VHT_EXT_NSS_BW)) + ext_nss_bw_supp = 0; + + /* + * Cf. IEEE 802.11 Table 9-250 + * + * We really just consider that because it's inefficient to connect + * at a higher bandwidth than we'll actually be able to use. + */ + switch ((supp_chwidth << 4) | ext_nss_bw_supp) { + default: + case 0x00: + ccf1 = 0; + support_160 = false; + support_80_80 = false; + break; + case 0x01: + support_80_80 = false; + fallthrough; + case 0x02: + case 0x03: + ccf1 = ccfs2; + break; + case 0x10: + ccf1 = ccfs1; + break; + case 0x11: + case 0x12: + if (!ccfs1) + ccf1 = ccfs2; + else + ccf1 = ccfs1; + break; + case 0x13: + case 0x20: + case 0x23: + ccf1 = ccfs1; + break; + } + + cf0 = ieee80211_channel_to_frequency(ccf0, chandef->chan->band); + cf1 = ieee80211_channel_to_frequency(ccf1, chandef->chan->band); + + switch (oper->chan_width) { + case IEEE80211_VHT_CHANWIDTH_USE_HT: + /* just use HT information directly */ + break; + case IEEE80211_VHT_CHANWIDTH_80MHZ: + new.width = NL80211_CHAN_WIDTH_80; + new.center_freq1 = cf0; + /* If needed, adjust based on the newer interop workaround. */ + if (ccf1) { + unsigned int diff; + + diff = abs(ccf1 - ccf0); + if ((diff == 8) && support_160) { + new.width = NL80211_CHAN_WIDTH_160; + new.center_freq1 = cf1; + } else if ((diff > 8) && support_80_80) { + new.width = NL80211_CHAN_WIDTH_80P80; + new.center_freq2 = cf1; + } + } + break; + case IEEE80211_VHT_CHANWIDTH_160MHZ: + /* deprecated encoding */ + new.width = NL80211_CHAN_WIDTH_160; + new.center_freq1 = cf0; + break; + case IEEE80211_VHT_CHANWIDTH_80P80MHZ: + /* deprecated encoding */ + new.width = NL80211_CHAN_WIDTH_80P80; + new.center_freq1 = cf0; + new.center_freq2 = cf1; + break; + default: + return false; + } + + if (!cfg80211_chandef_valid(&new)) + return false; + + *chandef = new; + return true; +} + +void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation *eht_oper, + bool support_160, bool support_320, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_eht_operation_info *info = (void *)eht_oper->optional; + + chandef->center_freq1 = + ieee80211_channel_to_frequency(info->ccfs0, + chandef->chan->band); + + switch (u8_get_bits(info->control, + IEEE80211_EHT_OPER_CHAN_WIDTH)) { + case IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ: + chandef->width = NL80211_CHAN_WIDTH_20; + break; + case IEEE80211_EHT_OPER_CHAN_WIDTH_40MHZ: + chandef->width = NL80211_CHAN_WIDTH_40; + break; + case IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ: + chandef->width = NL80211_CHAN_WIDTH_80; + break; + case IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ: + if (support_160) { + chandef->width = NL80211_CHAN_WIDTH_160; + chandef->center_freq1 = + ieee80211_channel_to_frequency(info->ccfs1, + chandef->chan->band); + } else { + chandef->width = NL80211_CHAN_WIDTH_80; + } + break; + case IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ: + if (support_320) { + chandef->width = NL80211_CHAN_WIDTH_320; + chandef->center_freq1 = + ieee80211_channel_to_frequency(info->ccfs1, + chandef->chan->band); + } else if (support_160) { + chandef->width = NL80211_CHAN_WIDTH_160; + } else { + chandef->width = NL80211_CHAN_WIDTH_80; + + if (chandef->center_freq1 > chandef->chan->center_freq) + chandef->center_freq1 -= 40; + else + chandef->center_freq1 += 40; + } + break; + } +} + +bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata, + const struct ieee80211_he_operation *he_oper, + const struct ieee80211_eht_operation *eht_oper, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif); + const struct ieee80211_sta_he_cap *he_cap; + const struct ieee80211_sta_eht_cap *eht_cap; + struct cfg80211_chan_def he_chandef = *chandef; + const struct ieee80211_he_6ghz_oper *he_6ghz_oper; + struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; + bool support_80_80, support_160, support_320; + u8 he_phy_cap, eht_phy_cap; + u32 freq; + + if (chandef->chan->band != NL80211_BAND_6GHZ) + return true; + + sband = local->hw.wiphy->bands[NL80211_BAND_6GHZ]; + + he_cap = ieee80211_get_he_iftype_cap(sband, iftype); + if (!he_cap) { + sdata_info(sdata, "Missing iftype sband data/HE cap"); + return false; + } + + he_phy_cap = he_cap->he_cap_elem.phy_cap_info[0]; + support_160 = + he_phy_cap & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; + support_80_80 = + he_phy_cap & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G; + + if (!he_oper) { + sdata_info(sdata, + "HE is not advertised on (on %d MHz), expect issues\n", + chandef->chan->center_freq); + return false; + } + + eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype); + if (!eht_cap) + eht_oper = NULL; + + he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper); + + if (!he_6ghz_oper) { + sdata_info(sdata, + "HE 6GHz operation missing (on %d MHz), expect issues\n", + chandef->chan->center_freq); + return false; + } + + /* + * The EHT operation IE does not contain the primary channel so the + * primary channel frequency should be taken from the 6 GHz operation + * information. + */ + freq = ieee80211_channel_to_frequency(he_6ghz_oper->primary, + NL80211_BAND_6GHZ); + he_chandef.chan = ieee80211_get_channel(sdata->local->hw.wiphy, freq); + + switch (u8_get_bits(he_6ghz_oper->control, + IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) { + case IEEE80211_6GHZ_CTRL_REG_LPI_AP: + bss_conf->power_type = IEEE80211_REG_LPI_AP; + break; + case IEEE80211_6GHZ_CTRL_REG_SP_AP: + bss_conf->power_type = IEEE80211_REG_SP_AP; + break; + default: + bss_conf->power_type = IEEE80211_REG_UNSET_AP; + break; + } + + if (!eht_oper || + !(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT)) { + switch (u8_get_bits(he_6ghz_oper->control, + IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH)) { + case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ: + he_chandef.width = NL80211_CHAN_WIDTH_20; + break; + case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ: + he_chandef.width = NL80211_CHAN_WIDTH_40; + break; + case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ: + he_chandef.width = NL80211_CHAN_WIDTH_80; + break; + case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ: + he_chandef.width = NL80211_CHAN_WIDTH_80; + if (!he_6ghz_oper->ccfs1) + break; + if (abs(he_6ghz_oper->ccfs1 - he_6ghz_oper->ccfs0) == 8) { + if (support_160) + he_chandef.width = NL80211_CHAN_WIDTH_160; + } else { + if (support_80_80) + he_chandef.width = NL80211_CHAN_WIDTH_80P80; + } + break; + } + + if (he_chandef.width == NL80211_CHAN_WIDTH_160) { + he_chandef.center_freq1 = + ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1, + NL80211_BAND_6GHZ); + } else { + he_chandef.center_freq1 = + ieee80211_channel_to_frequency(he_6ghz_oper->ccfs0, + NL80211_BAND_6GHZ); + if (support_80_80 || support_160) + he_chandef.center_freq2 = + ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1, + NL80211_BAND_6GHZ); + } + } else { + eht_phy_cap = eht_cap->eht_cap_elem.phy_cap_info[0]; + support_320 = + eht_phy_cap & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; + + ieee80211_chandef_eht_oper(eht_oper, support_160, + support_320, &he_chandef); + } + + if (!cfg80211_chandef_valid(&he_chandef)) { + sdata_info(sdata, + "HE 6GHz operation resulted in invalid chandef: %d MHz/%d/%d MHz/%d MHz\n", + he_chandef.chan ? he_chandef.chan->center_freq : 0, + he_chandef.width, + he_chandef.center_freq1, + he_chandef.center_freq2); + return false; + } + + *chandef = he_chandef; + + return true; +} + +bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper, + struct cfg80211_chan_def *chandef) +{ + u32 oper_freq; + + if (!oper) + return false; + + switch (FIELD_GET(S1G_OPER_CH_WIDTH_OPER, oper->ch_width)) { + case IEEE80211_S1G_CHANWIDTH_1MHZ: + chandef->width = NL80211_CHAN_WIDTH_1; + break; + case IEEE80211_S1G_CHANWIDTH_2MHZ: + chandef->width = NL80211_CHAN_WIDTH_2; + break; + case IEEE80211_S1G_CHANWIDTH_4MHZ: + chandef->width = NL80211_CHAN_WIDTH_4; + break; + case IEEE80211_S1G_CHANWIDTH_8MHZ: + chandef->width = NL80211_CHAN_WIDTH_8; + break; + case IEEE80211_S1G_CHANWIDTH_16MHZ: + chandef->width = NL80211_CHAN_WIDTH_16; + break; + default: + return false; + } + + oper_freq = ieee80211_channel_to_freq_khz(oper->oper_ch, + NL80211_BAND_S1GHZ); + chandef->center_freq1 = KHZ_TO_MHZ(oper_freq); + chandef->freq1_offset = oper_freq % 1000; + + return true; +} + +int ieee80211_parse_bitrates(enum nl80211_chan_width width, + const struct ieee80211_supported_band *sband, + const u8 *srates, int srates_len, u32 *rates) +{ + u32 rate_flags = ieee80211_chanwidth_rate_flags(width); + int shift = ieee80211_chanwidth_get_shift(width); + struct ieee80211_rate *br; + int brate, rate, i, j, count = 0; + + *rates = 0; + + for (i = 0; i < srates_len; i++) { + rate = srates[i] & 0x7f; + + for (j = 0; j < sband->n_bitrates; j++) { + br = &sband->bitrates[j]; + if ((rate_flags & br->flags) != rate_flags) + continue; + + brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5); + if (brate == rate) { + *rates |= BIT(j); + count++; + break; + } + } + } + return count; +} + +int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, bool need_basic, + enum nl80211_band band) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + int rate, shift; + u8 i, rates, *pos; + u32 basic_rates = sdata->vif.bss_conf.basic_rates; + u32 rate_flags; + + shift = ieee80211_vif_get_shift(&sdata->vif); + rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); + sband = local->hw.wiphy->bands[band]; + rates = 0; + for (i = 0; i < sband->n_bitrates; i++) { + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + rates++; + } + if (rates > 8) + rates = 8; + + if (skb_tailroom(skb) < rates + 2) + return -ENOMEM; + + pos = skb_put(skb, rates + 2); + *pos++ = WLAN_EID_SUPP_RATES; + *pos++ = rates; + for (i = 0; i < rates; i++) { + u8 basic = 0; + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + + if (need_basic && basic_rates & BIT(i)) + basic = 0x80; + rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, + 5 * (1 << shift)); + *pos++ = basic | (u8) rate; + } + + return 0; +} + +int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, bool need_basic, + enum nl80211_band band) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + int rate, shift; + u8 i, exrates, *pos; + u32 basic_rates = sdata->vif.bss_conf.basic_rates; + u32 rate_flags; + + rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); + shift = ieee80211_vif_get_shift(&sdata->vif); + + sband = local->hw.wiphy->bands[band]; + exrates = 0; + for (i = 0; i < sband->n_bitrates; i++) { + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + exrates++; + } + + if (exrates > 8) + exrates -= 8; + else + exrates = 0; + + if (skb_tailroom(skb) < exrates + 2) + return -ENOMEM; + + if (exrates) { + pos = skb_put(skb, exrates + 2); + *pos++ = WLAN_EID_EXT_SUPP_RATES; + *pos++ = exrates; + for (i = 8; i < sband->n_bitrates; i++) { + u8 basic = 0; + if ((rate_flags & sband->bitrates[i].flags) + != rate_flags) + continue; + if (need_basic && basic_rates & BIT(i)) + basic = 0x80; + rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, + 5 * (1 << shift)); + *pos++ = basic | (u8) rate; + } + } + return 0; +} + +int ieee80211_ave_rssi(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION)) + return 0; + + return -ewma_beacon_signal_read(&sdata->deflink.u.mgd.ave_beacon_signal); +} +EXPORT_SYMBOL_GPL(ieee80211_ave_rssi); + +u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs) +{ + if (!mcs) + return 1; + + /* TODO: consider rx_highest */ + + if (mcs->rx_mask[3]) + return 4; + if (mcs->rx_mask[2]) + return 3; + if (mcs->rx_mask[1]) + return 2; + return 1; +} + +/** + * ieee80211_calculate_rx_timestamp - calculate timestamp in frame + * @local: mac80211 hw info struct + * @status: RX status + * @mpdu_len: total MPDU length (including FCS) + * @mpdu_offset: offset into MPDU to calculate timestamp at + * + * This function calculates the RX timestamp at the given MPDU offset, taking + * into account what the RX timestamp was. An offset of 0 will just normalize + * the timestamp to TSF at beginning of MPDU reception. + */ +u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, + struct ieee80211_rx_status *status, + unsigned int mpdu_len, + unsigned int mpdu_offset) +{ + u64 ts = status->mactime; + struct rate_info ri; + u16 rate; + u8 n_ltf; + + if (WARN_ON(!ieee80211_have_rx_timestamp(status))) + return 0; + + memset(&ri, 0, sizeof(ri)); + + ri.bw = status->bw; + + /* Fill cfg80211 rate info */ + switch (status->encoding) { + case RX_ENC_EHT: + ri.flags |= RATE_INFO_FLAGS_EHT_MCS; + ri.mcs = status->rate_idx; + ri.nss = status->nss; + ri.eht_ru_alloc = status->eht.ru; + if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) + ri.flags |= RATE_INFO_FLAGS_SHORT_GI; + /* TODO/FIXME: is this right? handle other PPDUs */ + if (status->flag & RX_FLAG_MACTIME_PLCP_START) { + mpdu_offset += 2; + ts += 36; + } + break; + case RX_ENC_HE: + ri.flags |= RATE_INFO_FLAGS_HE_MCS; + ri.mcs = status->rate_idx; + ri.nss = status->nss; + ri.he_ru_alloc = status->he_ru; + if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) + ri.flags |= RATE_INFO_FLAGS_SHORT_GI; + + /* + * See P802.11ax_D6.0, section 27.3.4 for + * VHT PPDU format. + */ + if (status->flag & RX_FLAG_MACTIME_PLCP_START) { + mpdu_offset += 2; + ts += 36; + + /* + * TODO: + * For HE MU PPDU, add the HE-SIG-B. + * For HE ER PPDU, add 8us for the HE-SIG-A. + * For HE TB PPDU, add 4us for the HE-STF. + * Add the HE-LTF durations - variable. + */ + } + + break; + case RX_ENC_HT: + ri.mcs = status->rate_idx; + ri.flags |= RATE_INFO_FLAGS_MCS; + if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) + ri.flags |= RATE_INFO_FLAGS_SHORT_GI; + + /* + * See P802.11REVmd_D3.0, section 19.3.2 for + * HT PPDU format. + */ + if (status->flag & RX_FLAG_MACTIME_PLCP_START) { + mpdu_offset += 2; + if (status->enc_flags & RX_ENC_FLAG_HT_GF) + ts += 24; + else + ts += 32; + + /* + * Add Data HT-LTFs per streams + * TODO: add Extension HT-LTFs, 4us per LTF + */ + n_ltf = ((ri.mcs >> 3) & 3) + 1; + n_ltf = n_ltf == 3 ? 4 : n_ltf; + ts += n_ltf * 4; + } + + break; + case RX_ENC_VHT: + ri.flags |= RATE_INFO_FLAGS_VHT_MCS; + ri.mcs = status->rate_idx; + ri.nss = status->nss; + if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) + ri.flags |= RATE_INFO_FLAGS_SHORT_GI; + + /* + * See P802.11REVmd_D3.0, section 21.3.2 for + * VHT PPDU format. + */ + if (status->flag & RX_FLAG_MACTIME_PLCP_START) { + mpdu_offset += 2; + ts += 36; + + /* + * Add VHT-LTFs per streams + */ + n_ltf = (ri.nss != 1) && (ri.nss % 2) ? + ri.nss + 1 : ri.nss; + ts += 4 * n_ltf; + } + + break; + default: + WARN_ON(1); + fallthrough; + case RX_ENC_LEGACY: { + struct ieee80211_supported_band *sband; + int shift = 0; + int bitrate; + + switch (status->bw) { + case RATE_INFO_BW_10: + shift = 1; + break; + case RATE_INFO_BW_5: + shift = 2; + break; + } + + sband = local->hw.wiphy->bands[status->band]; + bitrate = sband->bitrates[status->rate_idx].bitrate; + ri.legacy = DIV_ROUND_UP(bitrate, (1 << shift)); + + if (status->flag & RX_FLAG_MACTIME_PLCP_START) { + if (status->band == NL80211_BAND_5GHZ) { + ts += 20 << shift; + mpdu_offset += 2; + } else if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) { + ts += 96; + } else { + ts += 192; + } + } + break; + } + } + + rate = cfg80211_calculate_bitrate(&ri); + if (WARN_ONCE(!rate, + "Invalid bitrate: flags=0x%llx, idx=%d, vht_nss=%d\n", + (unsigned long long)status->flag, status->rate_idx, + status->nss)) + return 0; + + /* rewind from end of MPDU */ + if (status->flag & RX_FLAG_MACTIME_END) + ts -= mpdu_len * 8 * 10 / rate; + + ts += mpdu_offset * 8 * 10 / rate; + + return ts; +} + +void ieee80211_dfs_cac_cancel(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + struct cfg80211_chan_def chandef; + + /* for interface list, to avoid linking iflist_mtx and chanctx_mtx */ + lockdep_assert_wiphy(local->hw.wiphy); + + mutex_lock(&local->mtx); + list_for_each_entry(sdata, &local->interfaces, list) { + /* it might be waiting for the local->mtx, but then + * by the time it gets it, sdata->wdev.cac_started + * will no longer be true + */ + cancel_delayed_work(&sdata->deflink.dfs_cac_timer_work); + + if (sdata->wdev.cac_started) { + chandef = sdata->vif.bss_conf.chandef; + ieee80211_link_release_channel(&sdata->deflink); + cfg80211_cac_event(sdata->dev, + &chandef, + NL80211_RADAR_CAC_ABORTED, + GFP_KERNEL); + } + } + mutex_unlock(&local->mtx); +} + +void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy, + struct wiphy_work *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, radar_detected_work); + struct cfg80211_chan_def chandef = local->hw.conf.chandef; + struct ieee80211_chanctx *ctx; + int num_chanctx = 0; + + mutex_lock(&local->chanctx_mtx); + list_for_each_entry(ctx, &local->chanctx_list, list) { + if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) + continue; + + num_chanctx++; + chandef = ctx->conf.def; + } + mutex_unlock(&local->chanctx_mtx); + + ieee80211_dfs_cac_cancel(local); + + if (num_chanctx > 1) + /* XXX: multi-channel is not supported yet */ + WARN_ON(1); + else + cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL); +} + +void ieee80211_radar_detected(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + trace_api_radar_detected(local); + + wiphy_work_queue(hw->wiphy, &local->radar_detected_work); +} +EXPORT_SYMBOL(ieee80211_radar_detected); + +ieee80211_conn_flags_t ieee80211_chandef_downgrade(struct cfg80211_chan_def *c) +{ + ieee80211_conn_flags_t ret; + int tmp; + + switch (c->width) { + case NL80211_CHAN_WIDTH_20: + c->width = NL80211_CHAN_WIDTH_20_NOHT; + ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_VHT; + break; + case NL80211_CHAN_WIDTH_40: + c->width = NL80211_CHAN_WIDTH_20; + c->center_freq1 = c->chan->center_freq; + ret = IEEE80211_CONN_DISABLE_40MHZ | + IEEE80211_CONN_DISABLE_VHT; + break; + case NL80211_CHAN_WIDTH_80: + tmp = (30 + c->chan->center_freq - c->center_freq1)/20; + /* n_P40 */ + tmp /= 2; + /* freq_P40 */ + c->center_freq1 = c->center_freq1 - 20 + 40 * tmp; + c->width = NL80211_CHAN_WIDTH_40; + ret = IEEE80211_CONN_DISABLE_VHT; + break; + case NL80211_CHAN_WIDTH_80P80: + c->center_freq2 = 0; + c->width = NL80211_CHAN_WIDTH_80; + ret = IEEE80211_CONN_DISABLE_80P80MHZ | + IEEE80211_CONN_DISABLE_160MHZ; + break; + case NL80211_CHAN_WIDTH_160: + /* n_P20 */ + tmp = (70 + c->chan->center_freq - c->center_freq1)/20; + /* n_P80 */ + tmp /= 4; + c->center_freq1 = c->center_freq1 - 40 + 80 * tmp; + c->width = NL80211_CHAN_WIDTH_80; + ret = IEEE80211_CONN_DISABLE_80P80MHZ | + IEEE80211_CONN_DISABLE_160MHZ; + break; + case NL80211_CHAN_WIDTH_320: + /* n_P20 */ + tmp = (150 + c->chan->center_freq - c->center_freq1) / 20; + /* n_P160 */ + tmp /= 8; + c->center_freq1 = c->center_freq1 - 80 + 160 * tmp; + c->width = NL80211_CHAN_WIDTH_160; + ret = IEEE80211_CONN_DISABLE_320MHZ; + break; + default: + case NL80211_CHAN_WIDTH_20_NOHT: + WARN_ON_ONCE(1); + c->width = NL80211_CHAN_WIDTH_20_NOHT; + ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_VHT; + break; + case NL80211_CHAN_WIDTH_1: + case NL80211_CHAN_WIDTH_2: + case NL80211_CHAN_WIDTH_4: + case NL80211_CHAN_WIDTH_8: + case NL80211_CHAN_WIDTH_16: + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: + WARN_ON_ONCE(1); + /* keep c->width */ + ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_VHT; + break; + } + + WARN_ON_ONCE(!cfg80211_chandef_valid(c)); + + return ret; +} + +/* + * Returns true if smps_mode_new is strictly more restrictive than + * smps_mode_old. + */ +bool ieee80211_smps_is_restrictive(enum ieee80211_smps_mode smps_mode_old, + enum ieee80211_smps_mode smps_mode_new) +{ + if (WARN_ON_ONCE(smps_mode_old == IEEE80211_SMPS_AUTOMATIC || + smps_mode_new == IEEE80211_SMPS_AUTOMATIC)) + return false; + + switch (smps_mode_old) { + case IEEE80211_SMPS_STATIC: + return false; + case IEEE80211_SMPS_DYNAMIC: + return smps_mode_new == IEEE80211_SMPS_STATIC; + case IEEE80211_SMPS_OFF: + return smps_mode_new != IEEE80211_SMPS_OFF; + default: + WARN_ON(1); + } + + return false; +} + +int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata, + struct cfg80211_csa_settings *csa_settings) +{ + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + struct ieee80211_local *local = sdata->local; + int freq; + int hdr_len = offsetofend(struct ieee80211_mgmt, + u.action.u.chan_switch); + u8 *pos; + + if (sdata->vif.type != NL80211_IFTYPE_ADHOC && + sdata->vif.type != NL80211_IFTYPE_MESH_POINT) + return -EOPNOTSUPP; + + skb = dev_alloc_skb(local->tx_headroom + hdr_len + + 5 + /* channel switch announcement element */ + 3 + /* secondary channel offset element */ + 5 + /* wide bandwidth channel switch announcement */ + 8); /* mesh channel switch parameters element */ + if (!skb) + return -ENOMEM; + + skb_reserve(skb, local->tx_headroom); + mgmt = skb_put_zero(skb, hdr_len); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + + eth_broadcast_addr(mgmt->da); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + if (ieee80211_vif_is_mesh(&sdata->vif)) { + memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); + } else { + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); + } + mgmt->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT; + mgmt->u.action.u.chan_switch.action_code = WLAN_ACTION_SPCT_CHL_SWITCH; + pos = skb_put(skb, 5); + *pos++ = WLAN_EID_CHANNEL_SWITCH; /* EID */ + *pos++ = 3; /* IE length */ + *pos++ = csa_settings->block_tx ? 1 : 0; /* CSA mode */ + freq = csa_settings->chandef.chan->center_freq; + *pos++ = ieee80211_frequency_to_channel(freq); /* channel */ + *pos++ = csa_settings->count; /* count */ + + if (csa_settings->chandef.width == NL80211_CHAN_WIDTH_40) { + enum nl80211_channel_type ch_type; + + skb_put(skb, 3); + *pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET; /* EID */ + *pos++ = 1; /* IE length */ + ch_type = cfg80211_get_chandef_type(&csa_settings->chandef); + if (ch_type == NL80211_CHAN_HT40PLUS) + *pos++ = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + else + *pos++ = IEEE80211_HT_PARAM_CHA_SEC_BELOW; + } + + if (ieee80211_vif_is_mesh(&sdata->vif)) { + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + skb_put(skb, 8); + *pos++ = WLAN_EID_CHAN_SWITCH_PARAM; /* EID */ + *pos++ = 6; /* IE length */ + *pos++ = sdata->u.mesh.mshcfg.dot11MeshTTL; /* Mesh TTL */ + *pos = 0x00; /* Mesh Flag: Tx Restrict, Initiator, Reason */ + *pos |= WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; + *pos++ |= csa_settings->block_tx ? + WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00; + put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos); /* Reason Cd */ + pos += 2; + put_unaligned_le16(ifmsh->pre_value, pos);/* Precedence Value */ + pos += 2; + } + + if (csa_settings->chandef.width == NL80211_CHAN_WIDTH_80 || + csa_settings->chandef.width == NL80211_CHAN_WIDTH_80P80 || + csa_settings->chandef.width == NL80211_CHAN_WIDTH_160) { + skb_put(skb, 5); + ieee80211_ie_build_wide_bw_cs(pos, &csa_settings->chandef); + } + + ieee80211_tx_skb(sdata, skb); + return 0; +} + +static bool +ieee80211_extend_noa_desc(struct ieee80211_noa_data *data, u32 tsf, int i) +{ + s32 end = data->desc[i].start + data->desc[i].duration - (tsf + 1); + int skip; + + if (end > 0) + return false; + + /* One shot NOA */ + if (data->count[i] == 1) + return false; + + if (data->desc[i].interval == 0) + return false; + + /* End time is in the past, check for repetitions */ + skip = DIV_ROUND_UP(-end, data->desc[i].interval); + if (data->count[i] < 255) { + if (data->count[i] <= skip) { + data->count[i] = 0; + return false; + } + + data->count[i] -= skip; + } + + data->desc[i].start += skip * data->desc[i].interval; + + return true; +} + +static bool +ieee80211_extend_absent_time(struct ieee80211_noa_data *data, u32 tsf, + s32 *offset) +{ + bool ret = false; + int i; + + for (i = 0; i < IEEE80211_P2P_NOA_DESC_MAX; i++) { + s32 cur; + + if (!data->count[i]) + continue; + + if (ieee80211_extend_noa_desc(data, tsf + *offset, i)) + ret = true; + + cur = data->desc[i].start - tsf; + if (cur > *offset) + continue; + + cur = data->desc[i].start + data->desc[i].duration - tsf; + if (cur > *offset) + *offset = cur; + } + + return ret; +} + +static u32 +ieee80211_get_noa_absent_time(struct ieee80211_noa_data *data, u32 tsf) +{ + s32 offset = 0; + int tries = 0; + /* + * arbitrary limit, used to avoid infinite loops when combined NoA + * descriptors cover the full time period. + */ + int max_tries = 5; + + ieee80211_extend_absent_time(data, tsf, &offset); + do { + if (!ieee80211_extend_absent_time(data, tsf, &offset)) + break; + + tries++; + } while (tries < max_tries); + + return offset; +} + +void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf) +{ + u32 next_offset = BIT(31) - 1; + int i; + + data->absent = 0; + data->has_next_tsf = false; + for (i = 0; i < IEEE80211_P2P_NOA_DESC_MAX; i++) { + s32 start; + + if (!data->count[i]) + continue; + + ieee80211_extend_noa_desc(data, tsf, i); + start = data->desc[i].start - tsf; + if (start <= 0) + data->absent |= BIT(i); + + if (next_offset > start) + next_offset = start; + + data->has_next_tsf = true; + } + + if (data->absent) + next_offset = ieee80211_get_noa_absent_time(data, tsf); + + data->next_tsf = tsf + next_offset; +} +EXPORT_SYMBOL(ieee80211_update_p2p_noa); + +int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr, + struct ieee80211_noa_data *data, u32 tsf) +{ + int ret = 0; + int i; + + memset(data, 0, sizeof(*data)); + + for (i = 0; i < IEEE80211_P2P_NOA_DESC_MAX; i++) { + const struct ieee80211_p2p_noa_desc *desc = &attr->desc[i]; + + if (!desc->count || !desc->duration) + continue; + + data->count[i] = desc->count; + data->desc[i].start = le32_to_cpu(desc->start_time); + data->desc[i].duration = le32_to_cpu(desc->duration); + data->desc[i].interval = le32_to_cpu(desc->interval); + + if (data->count[i] > 1 && + data->desc[i].interval < data->desc[i].duration) + continue; + + ieee80211_extend_noa_desc(data, tsf, i); + ret++; + } + + if (ret) + ieee80211_update_p2p_noa(data, tsf); + + return ret; +} +EXPORT_SYMBOL(ieee80211_parse_p2p_noa); + +void ieee80211_recalc_dtim(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + u64 tsf = drv_get_tsf(local, sdata); + u64 dtim_count = 0; + u16 beacon_int = sdata->vif.bss_conf.beacon_int * 1024; + u8 dtim_period = sdata->vif.bss_conf.dtim_period; + struct ps_data *ps; + u8 bcns_from_dtim; + + if (tsf == -1ULL || !beacon_int || !dtim_period) + return; + + if (sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + if (!sdata->bss) + return; + + ps = &sdata->bss->ps; + } else if (ieee80211_vif_is_mesh(&sdata->vif)) { + ps = &sdata->u.mesh.ps; + } else { + return; + } + + /* + * actually finds last dtim_count, mac80211 will update in + * __beacon_add_tim(). + * dtim_count = dtim_period - (tsf / bcn_int) % dtim_period + */ + do_div(tsf, beacon_int); + bcns_from_dtim = do_div(tsf, dtim_period); + /* just had a DTIM */ + if (!bcns_from_dtim) + dtim_count = 0; + else + dtim_count = dtim_period - bcns_from_dtim; + + ps->dtim_count = dtim_count; +} + +static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx) +{ + struct ieee80211_link_data *link; + u8 radar_detect = 0; + + lockdep_assert_held(&local->chanctx_mtx); + + if (WARN_ON(ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)) + return 0; + + list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) + if (link->reserved_radar_required) + radar_detect |= BIT(link->reserved_chandef.width); + + /* + * An in-place reservation context should not have any assigned vifs + * until it replaces the other context. + */ + WARN_ON(ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER && + !list_empty(&ctx->assigned_links)); + + list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list) { + if (!link->radar_required) + continue; + + radar_detect |= + BIT(link->conf->chandef.width); + } + + return radar_detect; +} + +int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, + const struct cfg80211_chan_def *chandef, + enum ieee80211_chanctx_mode chanmode, + u8 radar_detect) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_sub_if_data *sdata_iter; + enum nl80211_iftype iftype = sdata->wdev.iftype; + struct ieee80211_chanctx *ctx; + int total = 1; + struct iface_combination_params params = { + .radar_detect = radar_detect, + }; + + lockdep_assert_held(&local->chanctx_mtx); + + if (WARN_ON(hweight32(radar_detect) > 1)) + return -EINVAL; + + if (WARN_ON(chandef && chanmode == IEEE80211_CHANCTX_SHARED && + !chandef->chan)) + return -EINVAL; + + if (WARN_ON(iftype >= NUM_NL80211_IFTYPES)) + return -EINVAL; + + if (sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_MESH_POINT) { + /* + * always passing this is harmless, since it'll be the + * same value that cfg80211 finds if it finds the same + * interface ... and that's always allowed + */ + params.new_beacon_int = sdata->vif.bss_conf.beacon_int; + } + + /* Always allow software iftypes */ + if (cfg80211_iftype_allowed(local->hw.wiphy, iftype, 0, 1)) { + if (radar_detect) + return -EINVAL; + return 0; + } + + if (chandef) + params.num_different_channels = 1; + + if (iftype != NL80211_IFTYPE_UNSPECIFIED) + params.iftype_num[iftype] = 1; + + list_for_each_entry(ctx, &local->chanctx_list, list) { + if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) + continue; + params.radar_detect |= + ieee80211_chanctx_radar_detect(local, ctx); + if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) { + params.num_different_channels++; + continue; + } + if (chandef && chanmode == IEEE80211_CHANCTX_SHARED && + cfg80211_chandef_compatible(chandef, + &ctx->conf.def)) + continue; + params.num_different_channels++; + } + + list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) { + struct wireless_dev *wdev_iter; + + wdev_iter = &sdata_iter->wdev; + + if (sdata_iter == sdata || + !ieee80211_sdata_running(sdata_iter) || + cfg80211_iftype_allowed(local->hw.wiphy, + wdev_iter->iftype, 0, 1)) + continue; + + params.iftype_num[wdev_iter->iftype]++; + total++; + } + + if (total == 1 && !params.radar_detect) + return 0; + + return cfg80211_check_combinations(local->hw.wiphy, ¶ms); +} + +static void +ieee80211_iter_max_chans(const struct ieee80211_iface_combination *c, + void *data) +{ + u32 *max_num_different_channels = data; + + *max_num_different_channels = max(*max_num_different_channels, + c->num_different_channels); +} + +int ieee80211_max_num_channels(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_chanctx *ctx; + u32 max_num_different_channels = 1; + int err; + struct iface_combination_params params = {0}; + + lockdep_assert_held(&local->chanctx_mtx); + + list_for_each_entry(ctx, &local->chanctx_list, list) { + if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) + continue; + + params.num_different_channels++; + + params.radar_detect |= + ieee80211_chanctx_radar_detect(local, ctx); + } + + list_for_each_entry_rcu(sdata, &local->interfaces, list) + params.iftype_num[sdata->wdev.iftype]++; + + err = cfg80211_iter_combinations(local->hw.wiphy, ¶ms, + ieee80211_iter_max_chans, + &max_num_different_channels); + if (err < 0) + return err; + + return max_num_different_channels; +} + +void ieee80211_add_s1g_capab_ie(struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta_s1g_cap *caps, + struct sk_buff *skb) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_s1g_cap s1g_capab; + u8 *pos; + int i; + + if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) + return; + + if (!caps->s1g) + return; + + memcpy(s1g_capab.capab_info, caps->cap, sizeof(caps->cap)); + memcpy(s1g_capab.supp_mcs_nss, caps->nss_mcs, sizeof(caps->nss_mcs)); + + /* override the capability info */ + for (i = 0; i < sizeof(ifmgd->s1g_capa.capab_info); i++) { + u8 mask = ifmgd->s1g_capa_mask.capab_info[i]; + + s1g_capab.capab_info[i] &= ~mask; + s1g_capab.capab_info[i] |= ifmgd->s1g_capa.capab_info[i] & mask; + } + + /* then MCS and NSS set */ + for (i = 0; i < sizeof(ifmgd->s1g_capa.supp_mcs_nss); i++) { + u8 mask = ifmgd->s1g_capa_mask.supp_mcs_nss[i]; + + s1g_capab.supp_mcs_nss[i] &= ~mask; + s1g_capab.supp_mcs_nss[i] |= + ifmgd->s1g_capa.supp_mcs_nss[i] & mask; + } + + pos = skb_put(skb, 2 + sizeof(s1g_capab)); + *pos++ = WLAN_EID_S1G_CAPABILITIES; + *pos++ = sizeof(s1g_capab); + + memcpy(pos, &s1g_capab, sizeof(s1g_capab)); +} + +void ieee80211_add_aid_request_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + u8 *pos = skb_put(skb, 3); + + *pos++ = WLAN_EID_AID_REQUEST; + *pos++ = 1; + *pos++ = 0; +} + +u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo) +{ + *buf++ = WLAN_EID_VENDOR_SPECIFIC; + *buf++ = 7; /* len */ + *buf++ = 0x00; /* Microsoft OUI 00:50:F2 */ + *buf++ = 0x50; + *buf++ = 0xf2; + *buf++ = 2; /* WME */ + *buf++ = 0; /* WME info */ + *buf++ = 1; /* WME ver */ + *buf++ = qosinfo; /* U-APSD no in use */ + + return buf; +} + +void ieee80211_txq_get_depth(struct ieee80211_txq *txq, + unsigned long *frame_cnt, + unsigned long *byte_cnt) +{ + struct txq_info *txqi = to_txq_info(txq); + u32 frag_cnt = 0, frag_bytes = 0; + struct sk_buff *skb; + + skb_queue_walk(&txqi->frags, skb) { + frag_cnt++; + frag_bytes += skb->len; + } + + if (frame_cnt) + *frame_cnt = txqi->tin.backlog_packets + frag_cnt; + + if (byte_cnt) + *byte_cnt = txqi->tin.backlog_bytes + frag_bytes; +} +EXPORT_SYMBOL(ieee80211_txq_get_depth); + +const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS] = { + IEEE80211_WMM_IE_STA_QOSINFO_AC_VO, + IEEE80211_WMM_IE_STA_QOSINFO_AC_VI, + IEEE80211_WMM_IE_STA_QOSINFO_AC_BE, + IEEE80211_WMM_IE_STA_QOSINFO_AC_BK +}; + +u16 ieee80211_encode_usf(int listen_interval) +{ + static const int listen_int_usf[] = { 1, 10, 1000, 10000 }; + u16 ui, usf = 0; + + /* find greatest USF */ + while (usf < IEEE80211_MAX_USF) { + if (listen_interval % listen_int_usf[usf + 1]) + break; + usf += 1; + } + ui = listen_interval / listen_int_usf[usf]; + + /* error if there is a remainder. Should've been checked by user */ + WARN_ON_ONCE(ui > IEEE80211_MAX_UI); + listen_interval = FIELD_PREP(LISTEN_INT_USF, usf) | + FIELD_PREP(LISTEN_INT_UI, ui); + + return (u16) listen_interval; +} + +u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype) +{ + const struct ieee80211_sta_he_cap *he_cap; + const struct ieee80211_sta_eht_cap *eht_cap; + struct ieee80211_supported_band *sband; + bool is_ap; + u8 n; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return 0; + + he_cap = ieee80211_get_he_iftype_cap(sband, iftype); + eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype); + if (!he_cap || !eht_cap) + return 0; + + is_ap = iftype == NL80211_IFTYPE_AP || + iftype == NL80211_IFTYPE_P2P_GO; + + n = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem, + &eht_cap->eht_cap_elem, + is_ap); + return 2 + 1 + + sizeof(eht_cap->eht_cap_elem) + n + + ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0], + eht_cap->eht_cap_elem.phy_cap_info); + return 0; +} + +u8 *ieee80211_ie_build_eht_cap(u8 *pos, + const struct ieee80211_sta_he_cap *he_cap, + const struct ieee80211_sta_eht_cap *eht_cap, + u8 *end, + bool for_ap) +{ + u8 mcs_nss_len, ppet_len; + u8 ie_len; + u8 *orig_pos = pos; + + /* Make sure we have place for the IE */ + if (!he_cap || !eht_cap) + return orig_pos; + + mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem, + &eht_cap->eht_cap_elem, + for_ap); + ppet_len = ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0], + eht_cap->eht_cap_elem.phy_cap_info); + + ie_len = 2 + 1 + sizeof(eht_cap->eht_cap_elem) + mcs_nss_len + ppet_len; + if ((end - pos) < ie_len) + return orig_pos; + + *pos++ = WLAN_EID_EXTENSION; + *pos++ = ie_len - 2; + *pos++ = WLAN_EID_EXT_EHT_CAPABILITY; + + /* Fixed data */ + memcpy(pos, &eht_cap->eht_cap_elem, sizeof(eht_cap->eht_cap_elem)); + pos += sizeof(eht_cap->eht_cap_elem); + + memcpy(pos, &eht_cap->eht_mcs_nss_supp, mcs_nss_len); + pos += mcs_nss_len; + + if (ppet_len) { + memcpy(pos, &eht_cap->eht_ppe_thres, ppet_len); + pos += ppet_len; + } + + return pos; +} + +void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos, u8 frag_id) +{ + unsigned int elem_len; + + if (!len_pos) + return; + + elem_len = skb->data + skb->len - len_pos - 1; + + while (elem_len > 255) { + /* this one is 255 */ + *len_pos = 255; + /* remaining data gets smaller */ + elem_len -= 255; + /* make space for the fragment ID/len in SKB */ + skb_put(skb, 2); + /* shift back the remaining data to place fragment ID/len */ + memmove(len_pos + 255 + 3, len_pos + 255 + 1, elem_len); + /* place the fragment ID */ + len_pos += 255 + 1; + *len_pos = frag_id; + /* and point to fragment length to update later */ + len_pos++; + } + + *len_pos = elem_len; +} diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c new file mode 100644 index 0000000000..b3a5c3e96a --- /dev/null +++ b/net/mac80211/vht.c @@ -0,0 +1,793 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VHT handling + * + * Portions of this file + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright (C) 2018 - 2023 Intel Corporation + */ + +#include <linux/ieee80211.h> +#include <linux/export.h> +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "rate.h" + + +static void __check_vhtcap_disable(struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta_vht_cap *vht_cap, + u32 flag) +{ + __le32 le_flag = cpu_to_le32(flag); + + if (sdata->u.mgd.vht_capa_mask.vht_cap_info & le_flag && + !(sdata->u.mgd.vht_capa.vht_cap_info & le_flag)) + vht_cap->cap &= ~flag; +} + +void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta_vht_cap *vht_cap) +{ + int i; + u16 rxmcs_mask, rxmcs_cap, rxmcs_n, txmcs_mask, txmcs_cap, txmcs_n; + + if (!vht_cap->vht_supported) + return; + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return; + + __check_vhtcap_disable(sdata, vht_cap, + IEEE80211_VHT_CAP_RXLDPC); + __check_vhtcap_disable(sdata, vht_cap, + IEEE80211_VHT_CAP_SHORT_GI_80); + __check_vhtcap_disable(sdata, vht_cap, + IEEE80211_VHT_CAP_SHORT_GI_160); + __check_vhtcap_disable(sdata, vht_cap, + IEEE80211_VHT_CAP_TXSTBC); + __check_vhtcap_disable(sdata, vht_cap, + IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); + __check_vhtcap_disable(sdata, vht_cap, + IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); + __check_vhtcap_disable(sdata, vht_cap, + IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN); + __check_vhtcap_disable(sdata, vht_cap, + IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN); + + /* Allow user to decrease AMPDU length exponent */ + if (sdata->u.mgd.vht_capa_mask.vht_cap_info & + cpu_to_le32(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK)) { + u32 cap, n; + + n = le32_to_cpu(sdata->u.mgd.vht_capa.vht_cap_info) & + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; + n >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + cap = vht_cap->cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; + cap >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + + if (n < cap) { + vht_cap->cap &= + ~IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; + vht_cap->cap |= + n << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + } + } + + /* Allow the user to decrease MCSes */ + rxmcs_mask = + le16_to_cpu(sdata->u.mgd.vht_capa_mask.supp_mcs.rx_mcs_map); + rxmcs_n = le16_to_cpu(sdata->u.mgd.vht_capa.supp_mcs.rx_mcs_map); + rxmcs_n &= rxmcs_mask; + rxmcs_cap = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); + + txmcs_mask = + le16_to_cpu(sdata->u.mgd.vht_capa_mask.supp_mcs.tx_mcs_map); + txmcs_n = le16_to_cpu(sdata->u.mgd.vht_capa.supp_mcs.tx_mcs_map); + txmcs_n &= txmcs_mask; + txmcs_cap = le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); + for (i = 0; i < 8; i++) { + u8 m, n, c; + + m = (rxmcs_mask >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + n = (rxmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + c = (rxmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + + if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) || + n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) { + rxmcs_cap &= ~(3 << 2*i); + rxmcs_cap |= (rxmcs_n & (3 << 2*i)); + } + + m = (txmcs_mask >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + n = (txmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + c = (txmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + + if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) || + n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) { + txmcs_cap &= ~(3 << 2*i); + txmcs_cap |= (txmcs_n & (3 << 2*i)); + } + } + vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_cap); + vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_cap); +} + +void +ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const struct ieee80211_vht_cap *vht_cap_ie, + const struct ieee80211_vht_cap *vht_cap_ie2, + struct link_sta_info *link_sta) +{ + struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap; + struct ieee80211_sta_vht_cap own_cap; + u32 cap_info, i; + bool have_80mhz; + u32 mpdu_len; + + memset(vht_cap, 0, sizeof(*vht_cap)); + + if (!link_sta->pub->ht_cap.ht_supported) + return; + + if (!vht_cap_ie || !sband->vht_cap.vht_supported) + return; + + /* Allow VHT if at least one channel on the sband supports 80 MHz */ + have_80mhz = false; + for (i = 0; i < sband->n_channels; i++) { + if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED | + IEEE80211_CHAN_NO_80MHZ)) + continue; + + have_80mhz = true; + break; + } + + if (!have_80mhz) + return; + + /* + * A VHT STA must support 40 MHz, but if we verify that here + * then we break a few things - some APs (e.g. Netgear R6300v2 + * and others based on the BCM4360 chipset) will unset this + * capability bit when operating in 20 MHz. + */ + + vht_cap->vht_supported = true; + + own_cap = sband->vht_cap; + /* + * If user has specified capability overrides, take care + * of that if the station we're setting up is the AP that + * we advertised a restricted capability set to. Override + * our own capabilities and then use those below. + */ + if (sdata->vif.type == NL80211_IFTYPE_STATION && + !test_sta_flag(link_sta->sta, WLAN_STA_TDLS_PEER)) + ieee80211_apply_vhtcap_overrides(sdata, &own_cap); + + /* take some capabilities as-is */ + cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info); + vht_cap->cap = cap_info; + vht_cap->cap &= IEEE80211_VHT_CAP_RXLDPC | + IEEE80211_VHT_CAP_VHT_TXOP_PS | + IEEE80211_VHT_CAP_HTC_VHT | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | + IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB | + IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB | + IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | + IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; + + vht_cap->cap |= min_t(u32, cap_info & IEEE80211_VHT_CAP_MAX_MPDU_MASK, + own_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK); + + /* and some based on our own capabilities */ + switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: + vht_cap->cap |= cap_info & + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; + break; + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: + vht_cap->cap |= cap_info & + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; + break; + default: + /* nothing */ + break; + } + + /* symmetric capabilities */ + vht_cap->cap |= cap_info & own_cap.cap & + (IEEE80211_VHT_CAP_SHORT_GI_80 | + IEEE80211_VHT_CAP_SHORT_GI_160); + + /* remaining ones */ + if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) + vht_cap->cap |= cap_info & + (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); + + if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) + vht_cap->cap |= cap_info & + (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK); + + if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) + vht_cap->cap |= cap_info & + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + + if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) + vht_cap->cap |= cap_info & + IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE; + + if (own_cap.cap & IEEE80211_VHT_CAP_TXSTBC) + vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_RXSTBC_MASK; + + if (own_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) + vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_TXSTBC; + + /* Copy peer MCS info, the driver might need them. */ + memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs, + sizeof(struct ieee80211_vht_mcs_info)); + + /* copy EXT_NSS_BW Support value or remove the capability */ + if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_VHT_EXT_NSS_BW)) + vht_cap->cap |= (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK); + else + vht_cap->vht_mcs.tx_highest &= + ~cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); + + /* but also restrict MCSes */ + for (i = 0; i < 8; i++) { + u16 own_rx, own_tx, peer_rx, peer_tx; + + own_rx = le16_to_cpu(own_cap.vht_mcs.rx_mcs_map); + own_rx = (own_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + + own_tx = le16_to_cpu(own_cap.vht_mcs.tx_mcs_map); + own_tx = (own_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + + peer_rx = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); + peer_rx = (peer_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + + peer_tx = le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); + peer_tx = (peer_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + + if (peer_tx != IEEE80211_VHT_MCS_NOT_SUPPORTED) { + if (own_rx == IEEE80211_VHT_MCS_NOT_SUPPORTED) + peer_tx = IEEE80211_VHT_MCS_NOT_SUPPORTED; + else if (own_rx < peer_tx) + peer_tx = own_rx; + } + + if (peer_rx != IEEE80211_VHT_MCS_NOT_SUPPORTED) { + if (own_tx == IEEE80211_VHT_MCS_NOT_SUPPORTED) + peer_rx = IEEE80211_VHT_MCS_NOT_SUPPORTED; + else if (own_tx < peer_rx) + peer_rx = own_tx; + } + + vht_cap->vht_mcs.rx_mcs_map &= + ~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2); + vht_cap->vht_mcs.rx_mcs_map |= cpu_to_le16(peer_rx << i * 2); + + vht_cap->vht_mcs.tx_mcs_map &= + ~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2); + vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2); + } + + /* + * This is a workaround for VHT-enabled STAs which break the spec + * and have the VHT-MCS Rx map filled in with value 3 for all eight + * spacial streams, an example is AR9462. + * + * As per spec, in section 22.1.1 Introduction to the VHT PHY + * A VHT STA shall support at least single spactial stream VHT-MCSs + * 0 to 7 (transmit and receive) in all supported channel widths. + */ + if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) { + vht_cap->vht_supported = false; + sdata_info(sdata, + "Ignoring VHT IE from %pM (link:%pM) due to invalid rx_mcs_map\n", + link_sta->sta->addr, link_sta->addr); + return; + } + + /* finally set up the bandwidth */ + switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: + link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; + break; + default: + link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80; + + if (!(vht_cap->vht_mcs.tx_highest & + cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE))) + break; + + /* + * If this is non-zero, then it does support 160 MHz after all, + * in one form or the other. We don't distinguish here (or even + * above) between 160 and 80+80 yet. + */ + if (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) + link_sta->cur_max_bandwidth = + IEEE80211_STA_RX_BW_160; + } + + link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta); + + /* + * Work around the Cisco 9115 FW 17.3 bug by taking the min of + * both reported MPDU lengths. + */ + mpdu_len = vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK; + if (vht_cap_ie2) + mpdu_len = min_t(u32, mpdu_len, + le32_get_bits(vht_cap_ie2->vht_cap_info, + IEEE80211_VHT_CAP_MAX_MPDU_MASK)); + + /* + * FIXME - should the amsdu len be per link? store per link + * and maintain a minimum? + */ + switch (mpdu_len) { + case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: + link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454; + break; + case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991: + link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_7991; + break; + case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895: + default: + link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_3895; + break; + } + + ieee80211_sta_recalc_aggregates(&link_sta->sta->sta); +} + +/* FIXME: move this to some better location - parses HE/EHT now */ +enum ieee80211_sta_rx_bandwidth +ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta) +{ + unsigned int link_id = link_sta->link_id; + struct ieee80211_sub_if_data *sdata = link_sta->sta->sdata; + struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap; + struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap; + struct ieee80211_sta_eht_cap *eht_cap = &link_sta->pub->eht_cap; + u32 cap_width; + + if (he_cap->has_he) { + struct ieee80211_bss_conf *link_conf; + enum ieee80211_sta_rx_bandwidth ret; + u8 info; + + rcu_read_lock(); + link_conf = rcu_dereference(sdata->vif.link_conf[link_id]); + + if (eht_cap->has_eht && + link_conf->chandef.chan->band == NL80211_BAND_6GHZ) { + info = eht_cap->eht_cap_elem.phy_cap_info[0]; + + if (info & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) { + ret = IEEE80211_STA_RX_BW_320; + goto out; + } + } + + info = he_cap->he_cap_elem.phy_cap_info[0]; + + if (link_conf->chandef.chan->band == NL80211_BAND_2GHZ) { + if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G) + ret = IEEE80211_STA_RX_BW_40; + else + ret = IEEE80211_STA_RX_BW_20; + goto out; + } + + if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G || + info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) + ret = IEEE80211_STA_RX_BW_160; + else if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G) + ret = IEEE80211_STA_RX_BW_80; + else + ret = IEEE80211_STA_RX_BW_20; +out: + rcu_read_unlock(); + + return ret; + } + + if (!vht_cap->vht_supported) + return link_sta->pub->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? + IEEE80211_STA_RX_BW_40 : + IEEE80211_STA_RX_BW_20; + + cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; + + if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ || + cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) + return IEEE80211_STA_RX_BW_160; + + /* + * If this is non-zero, then it does support 160 MHz after all, + * in one form or the other. We don't distinguish here (or even + * above) between 160 and 80+80 yet. + */ + if (vht_cap->cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) + return IEEE80211_STA_RX_BW_160; + + return IEEE80211_STA_RX_BW_80; +} + +enum nl80211_chan_width +ieee80211_sta_cap_chan_bw(struct link_sta_info *link_sta) +{ + struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap; + u32 cap_width; + + if (!vht_cap->vht_supported) { + if (!link_sta->pub->ht_cap.ht_supported) + return NL80211_CHAN_WIDTH_20_NOHT; + + return link_sta->pub->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? + NL80211_CHAN_WIDTH_40 : NL80211_CHAN_WIDTH_20; + } + + cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; + + if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ) + return NL80211_CHAN_WIDTH_160; + else if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) + return NL80211_CHAN_WIDTH_80P80; + + return NL80211_CHAN_WIDTH_80; +} + +enum nl80211_chan_width +ieee80211_sta_rx_bw_to_chan_width(struct link_sta_info *link_sta) +{ + enum ieee80211_sta_rx_bandwidth cur_bw = + link_sta->pub->bandwidth; + struct ieee80211_sta_vht_cap *vht_cap = + &link_sta->pub->vht_cap; + u32 cap_width; + + switch (cur_bw) { + case IEEE80211_STA_RX_BW_20: + if (!link_sta->pub->ht_cap.ht_supported) + return NL80211_CHAN_WIDTH_20_NOHT; + else + return NL80211_CHAN_WIDTH_20; + case IEEE80211_STA_RX_BW_40: + return NL80211_CHAN_WIDTH_40; + case IEEE80211_STA_RX_BW_80: + return NL80211_CHAN_WIDTH_80; + case IEEE80211_STA_RX_BW_160: + cap_width = + vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; + + if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ) + return NL80211_CHAN_WIDTH_160; + + return NL80211_CHAN_WIDTH_80P80; + default: + return NL80211_CHAN_WIDTH_20; + } +} + +enum ieee80211_sta_rx_bandwidth +ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width) +{ + switch (width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + return IEEE80211_STA_RX_BW_20; + case NL80211_CHAN_WIDTH_40: + return IEEE80211_STA_RX_BW_40; + case NL80211_CHAN_WIDTH_80: + return IEEE80211_STA_RX_BW_80; + case NL80211_CHAN_WIDTH_160: + case NL80211_CHAN_WIDTH_80P80: + return IEEE80211_STA_RX_BW_160; + case NL80211_CHAN_WIDTH_320: + return IEEE80211_STA_RX_BW_320; + default: + WARN_ON_ONCE(1); + return IEEE80211_STA_RX_BW_20; + } +} + +/* FIXME: rename/move - this deals with everything not just VHT */ +enum ieee80211_sta_rx_bandwidth +ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta) +{ + struct sta_info *sta = link_sta->sta; + struct ieee80211_bss_conf *link_conf; + enum nl80211_chan_width bss_width; + enum ieee80211_sta_rx_bandwidth bw; + + rcu_read_lock(); + link_conf = rcu_dereference(sta->sdata->vif.link_conf[link_sta->link_id]); + if (WARN_ON(!link_conf)) + bss_width = NL80211_CHAN_WIDTH_20_NOHT; + else + bss_width = link_conf->chandef.width; + rcu_read_unlock(); + + bw = ieee80211_sta_cap_rx_bw(link_sta); + bw = min(bw, link_sta->cur_max_bandwidth); + + /* Don't consider AP's bandwidth for TDLS peers, section 11.23.1 of + * IEEE80211-2016 specification makes higher bandwidth operation + * possible on the TDLS link if the peers have wider bandwidth + * capability. + * + * However, in this case, and only if the TDLS peer is authorized, + * limit to the tdls_chandef so that the configuration here isn't + * wider than what's actually requested on the channel context. + */ + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && + test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) && + test_sta_flag(sta, WLAN_STA_AUTHORIZED) && + sta->tdls_chandef.chan) + bw = min(bw, ieee80211_chan_width_to_rx_bw(sta->tdls_chandef.width)); + else + bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width)); + + return bw; +} + +void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta) +{ + u8 ht_rx_nss = 0, vht_rx_nss = 0, he_rx_nss = 0, eht_rx_nss = 0, rx_nss; + bool support_160; + + /* if we received a notification already don't overwrite it */ + if (link_sta->pub->rx_nss) + return; + + if (link_sta->pub->eht_cap.has_eht) { + int i; + const u8 *rx_nss_mcs = (void *)&link_sta->pub->eht_cap.eht_mcs_nss_supp; + + /* get the max nss for EHT over all possible bandwidths and mcs */ + for (i = 0; i < sizeof(struct ieee80211_eht_mcs_nss_supp); i++) + eht_rx_nss = max_t(u8, eht_rx_nss, + u8_get_bits(rx_nss_mcs[i], + IEEE80211_EHT_MCS_NSS_RX)); + } + + if (link_sta->pub->he_cap.has_he) { + int i; + u8 rx_mcs_80 = 0, rx_mcs_160 = 0; + const struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap; + u16 mcs_160_map = + le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); + u16 mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); + + for (i = 7; i >= 0; i--) { + u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3; + + if (mcs_160 != IEEE80211_HE_MCS_NOT_SUPPORTED) { + rx_mcs_160 = i + 1; + break; + } + } + for (i = 7; i >= 0; i--) { + u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3; + + if (mcs_80 != IEEE80211_HE_MCS_NOT_SUPPORTED) { + rx_mcs_80 = i + 1; + break; + } + } + + support_160 = he_cap->he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; + + if (support_160) + he_rx_nss = min(rx_mcs_80, rx_mcs_160); + else + he_rx_nss = rx_mcs_80; + } + + if (link_sta->pub->ht_cap.ht_supported) { + if (link_sta->pub->ht_cap.mcs.rx_mask[0]) + ht_rx_nss++; + if (link_sta->pub->ht_cap.mcs.rx_mask[1]) + ht_rx_nss++; + if (link_sta->pub->ht_cap.mcs.rx_mask[2]) + ht_rx_nss++; + if (link_sta->pub->ht_cap.mcs.rx_mask[3]) + ht_rx_nss++; + /* FIXME: consider rx_highest? */ + } + + if (link_sta->pub->vht_cap.vht_supported) { + int i; + u16 rx_mcs_map; + + rx_mcs_map = le16_to_cpu(link_sta->pub->vht_cap.vht_mcs.rx_mcs_map); + + for (i = 7; i >= 0; i--) { + u8 mcs = (rx_mcs_map >> (2 * i)) & 3; + + if (mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) { + vht_rx_nss = i + 1; + break; + } + } + /* FIXME: consider rx_highest? */ + } + + rx_nss = max(vht_rx_nss, ht_rx_nss); + rx_nss = max(he_rx_nss, rx_nss); + rx_nss = max(eht_rx_nss, rx_nss); + link_sta->pub->rx_nss = max_t(u8, 1, rx_nss); +} + +u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, + struct link_sta_info *link_sta, + u8 opmode, enum nl80211_band band) +{ + enum ieee80211_sta_rx_bandwidth new_bw; + struct sta_opmode_info sta_opmode = {}; + u32 changed = 0; + u8 nss, cur_nss; + + /* ignore - no support for BF yet */ + if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF) + return 0; + + nss = opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK; + nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; + nss += 1; + + if (link_sta->pub->rx_nss != nss) { + cur_nss = link_sta->pub->rx_nss; + /* Reset rx_nss and call ieee80211_sta_set_rx_nss() which + * will set the same to max nss value calculated based on capability. + */ + link_sta->pub->rx_nss = 0; + ieee80211_sta_set_rx_nss(link_sta); + /* Do not allow an nss change to rx_nss greater than max_nss + * negotiated and capped to APs capability during association. + */ + if (nss <= link_sta->pub->rx_nss) { + link_sta->pub->rx_nss = nss; + sta_opmode.rx_nss = nss; + changed |= IEEE80211_RC_NSS_CHANGED; + sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED; + } else { + link_sta->pub->rx_nss = cur_nss; + pr_warn_ratelimited("Ignoring NSS change in VHT Operating Mode Notification from %pM with invalid nss %d", + link_sta->pub->addr, nss); + } + } + + switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) { + case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: + /* ignore IEEE80211_OPMODE_NOTIF_BW_160_80P80 must not be set */ + link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20; + break; + case IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: + /* ignore IEEE80211_OPMODE_NOTIF_BW_160_80P80 must not be set */ + link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_40; + break; + case IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: + if (opmode & IEEE80211_OPMODE_NOTIF_BW_160_80P80) + link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; + else + link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80; + break; + case IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: + /* legacy only, no longer used by newer spec */ + link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; + break; + } + + new_bw = ieee80211_sta_cur_vht_bw(link_sta); + if (new_bw != link_sta->pub->bandwidth) { + link_sta->pub->bandwidth = new_bw; + sta_opmode.bw = ieee80211_sta_rx_bw_to_chan_width(link_sta); + changed |= IEEE80211_RC_BW_CHANGED; + sta_opmode.changed |= STA_OPMODE_MAX_BW_CHANGED; + } + + if (sta_opmode.changed) + cfg80211_sta_opmode_change_notify(sdata->dev, link_sta->addr, + &sta_opmode, GFP_KERNEL); + + return changed; +} + +void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link, + struct ieee80211_mgmt *mgmt) +{ + struct ieee80211_bss_conf *link_conf = link->conf; + + if (!link_conf->mu_mimo_owner) + return; + + if (!memcmp(mgmt->u.action.u.vht_group_notif.position, + link_conf->mu_group.position, WLAN_USER_POSITION_LEN) && + !memcmp(mgmt->u.action.u.vht_group_notif.membership, + link_conf->mu_group.membership, WLAN_MEMBERSHIP_LEN)) + return; + + memcpy(link_conf->mu_group.membership, + mgmt->u.action.u.vht_group_notif.membership, + WLAN_MEMBERSHIP_LEN); + memcpy(link_conf->mu_group.position, + mgmt->u.action.u.vht_group_notif.position, + WLAN_USER_POSITION_LEN); + + ieee80211_link_info_change_notify(sdata, link, + BSS_CHANGED_MU_GROUPS); +} + +void ieee80211_update_mu_groups(struct ieee80211_vif *vif, unsigned int link_id, + const u8 *membership, const u8 *position) +{ + struct ieee80211_bss_conf *link_conf; + + rcu_read_lock(); + link_conf = rcu_dereference(vif->link_conf[link_id]); + + if (!WARN_ON_ONCE(!link_conf || !link_conf->mu_mimo_owner)) { + memcpy(link_conf->mu_group.membership, membership, + WLAN_MEMBERSHIP_LEN); + memcpy(link_conf->mu_group.position, position, + WLAN_USER_POSITION_LEN); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(ieee80211_update_mu_groups); + +void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, + struct link_sta_info *link_sta, + u8 opmode, enum nl80211_band band) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; + + u32 changed = __ieee80211_vht_handle_opmode(sdata, link_sta, + opmode, band); + + if (changed > 0) { + ieee80211_recalc_min_chandef(sdata, link_sta->link_id); + rate_control_rate_update(local, sband, link_sta->sta, + link_sta->link_id, changed); + } +} + +void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, + u16 vht_mask[NL80211_VHT_NSS_MAX]) +{ + int i; + u16 mask, cap = le16_to_cpu(vht_cap); + + for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { + mask = (cap >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + switch (mask) { + case IEEE80211_VHT_MCS_SUPPORT_0_7: + vht_mask[i] = 0x00FF; + break; + case IEEE80211_VHT_MCS_SUPPORT_0_8: + vht_mask[i] = 0x01FF; + break; + case IEEE80211_VHT_MCS_SUPPORT_0_9: + vht_mask[i] = 0x03FF; + break; + case IEEE80211_VHT_MCS_NOT_SUPPORTED: + default: + vht_mask[i] = 0; + break; + } + } +} diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c new file mode 100644 index 0000000000..9a6e11d7b4 --- /dev/null +++ b/net/mac80211/wep.c @@ -0,0 +1,306 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Software WEP encryption implementation + * Copyright 2002, Jouni Malinen <jkmaline@cc.hut.fi> + * Copyright 2003, Instant802 Networks, Inc. + */ + +#include <linux/netdevice.h> +#include <linux/types.h> +#include <linux/random.h> +#include <linux/compiler.h> +#include <linux/crc32.h> +#include <linux/crypto.h> +#include <linux/err.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <asm/unaligned.h> + +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "wep.h" + + +void ieee80211_wep_init(struct ieee80211_local *local) +{ + /* start WEP IV from a random value */ + get_random_bytes(&local->wep_iv, IEEE80211_WEP_IV_LEN); +} + +static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen) +{ + /* + * Fluhrer, Mantin, and Shamir have reported weaknesses in the + * key scheduling algorithm of RC4. At least IVs (KeyByte + 3, + * 0xff, N) can be used to speedup attacks, so avoid using them. + */ + if ((iv & 0xff00) == 0xff00) { + u8 B = (iv >> 16) & 0xff; + if (B >= 3 && B < 3 + keylen) + return true; + } + return false; +} + + +static void ieee80211_wep_get_iv(struct ieee80211_local *local, + int keylen, int keyidx, u8 *iv) +{ + local->wep_iv++; + if (ieee80211_wep_weak_iv(local->wep_iv, keylen)) + local->wep_iv += 0x0100; + + if (!iv) + return; + + *iv++ = (local->wep_iv >> 16) & 0xff; + *iv++ = (local->wep_iv >> 8) & 0xff; + *iv++ = local->wep_iv & 0xff; + *iv++ = keyidx << 6; +} + + +static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local, + struct sk_buff *skb, + int keylen, int keyidx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + unsigned int hdrlen; + u8 *newhdr; + + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); + + if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN)) + return NULL; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + newhdr = skb_push(skb, IEEE80211_WEP_IV_LEN); + memmove(newhdr, newhdr + IEEE80211_WEP_IV_LEN, hdrlen); + + /* the HW only needs room for the IV, but not the actual IV */ + if (info->control.hw_key && + (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) + return newhdr + hdrlen; + + ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen); + return newhdr + hdrlen; +} + + +static void ieee80211_wep_remove_iv(struct ieee80211_local *local, + struct sk_buff *skb, + struct ieee80211_key *key) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + unsigned int hdrlen; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen); + skb_pull(skb, IEEE80211_WEP_IV_LEN); +} + + +/* Perform WEP encryption using given key. data buffer must have tailroom + * for 4-byte ICV. data_len must not include this ICV. Note: this function + * does _not_ add IV. data = RC4(data | CRC32(data)) */ +int ieee80211_wep_encrypt_data(struct arc4_ctx *ctx, u8 *rc4key, + size_t klen, u8 *data, size_t data_len) +{ + __le32 icv; + + icv = cpu_to_le32(~crc32_le(~0, data, data_len)); + put_unaligned(icv, (__le32 *)(data + data_len)); + + arc4_setkey(ctx, rc4key, klen); + arc4_crypt(ctx, data, data, data_len + IEEE80211_WEP_ICV_LEN); + memzero_explicit(ctx, sizeof(*ctx)); + + return 0; +} + + +/* Perform WEP encryption on given skb. 4 bytes of extra space (IV) in the + * beginning of the buffer 4 bytes of extra space (ICV) in the end of the + * buffer will be added. Both IV and ICV will be transmitted, so the + * payload length increases with 8 bytes. + * + * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data)) + */ +int ieee80211_wep_encrypt(struct ieee80211_local *local, + struct sk_buff *skb, + const u8 *key, int keylen, int keyidx) +{ + u8 *iv; + size_t len; + u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; + + if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN)) + return -1; + + iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx); + if (!iv) + return -1; + + len = skb->len - (iv + IEEE80211_WEP_IV_LEN - skb->data); + + /* Prepend 24-bit IV to RC4 key */ + memcpy(rc4key, iv, 3); + + /* Copy rest of the WEP key (the secret part) */ + memcpy(rc4key + 3, key, keylen); + + /* Add room for ICV */ + skb_put(skb, IEEE80211_WEP_ICV_LEN); + + return ieee80211_wep_encrypt_data(&local->wep_tx_ctx, rc4key, keylen + 3, + iv + IEEE80211_WEP_IV_LEN, len); +} + + +/* Perform WEP decryption using given key. data buffer includes encrypted + * payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV. + * Return 0 on success and -1 on ICV mismatch. */ +int ieee80211_wep_decrypt_data(struct arc4_ctx *ctx, u8 *rc4key, + size_t klen, u8 *data, size_t data_len) +{ + __le32 crc; + + arc4_setkey(ctx, rc4key, klen); + arc4_crypt(ctx, data, data, data_len + IEEE80211_WEP_ICV_LEN); + memzero_explicit(ctx, sizeof(*ctx)); + + crc = cpu_to_le32(~crc32_le(~0, data, data_len)); + if (memcmp(&crc, data + data_len, IEEE80211_WEP_ICV_LEN) != 0) + /* ICV mismatch */ + return -1; + + return 0; +} + + +/* Perform WEP decryption on given skb. Buffer includes whole WEP part of + * the frame: IV (4 bytes), encrypted payload (including SNAP header), + * ICV (4 bytes). skb->len includes both IV and ICV. + * + * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on + * failure. If frame is OK, IV and ICV will be removed, i.e., decrypted payload + * is moved to the beginning of the skb and skb length will be reduced. + */ +static int ieee80211_wep_decrypt(struct ieee80211_local *local, + struct sk_buff *skb, + struct ieee80211_key *key) +{ + u32 klen; + u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; + u8 keyidx; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + unsigned int hdrlen; + size_t len; + int ret = 0; + + if (!ieee80211_has_protected(hdr->frame_control)) + return -1; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (skb->len < hdrlen + IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN) + return -1; + + len = skb->len - hdrlen - IEEE80211_WEP_IV_LEN - IEEE80211_WEP_ICV_LEN; + + keyidx = skb->data[hdrlen + 3] >> 6; + + if (!key || keyidx != key->conf.keyidx) + return -1; + + klen = 3 + key->conf.keylen; + + /* Prepend 24-bit IV to RC4 key */ + memcpy(rc4key, skb->data + hdrlen, 3); + + /* Copy rest of the WEP key (the secret part) */ + memcpy(rc4key + 3, key->conf.key, key->conf.keylen); + + if (ieee80211_wep_decrypt_data(&local->wep_rx_ctx, rc4key, klen, + skb->data + hdrlen + + IEEE80211_WEP_IV_LEN, len)) + ret = -1; + + /* Trim ICV */ + skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN); + + /* Remove IV */ + memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen); + skb_pull(skb, IEEE80211_WEP_IV_LEN); + + return ret; +} + +ieee80211_rx_result +ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + __le16 fc = hdr->frame_control; + + if (!ieee80211_is_data(fc) && !ieee80211_is_auth(fc)) + return RX_CONTINUE; + + if (!(status->flag & RX_FLAG_DECRYPTED)) { + if (skb_linearize(rx->skb)) + return RX_DROP_UNUSABLE; + if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) + return RX_DROP_UNUSABLE; + } else if (!(status->flag & RX_FLAG_IV_STRIPPED)) { + if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) + + IEEE80211_WEP_IV_LEN)) + return RX_DROP_UNUSABLE; + ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); + /* remove ICV */ + if (!(status->flag & RX_FLAG_ICV_STRIPPED) && + pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN)) + return RX_DROP_UNUSABLE; + } + + return RX_CONTINUE; +} + +static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_key_conf *hw_key = info->control.hw_key; + + if (!hw_key) { + if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key, + tx->key->conf.keylen, + tx->key->conf.keyidx)) + return -1; + } else if ((hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) || + (hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) { + if (!ieee80211_wep_add_iv(tx->local, skb, + tx->key->conf.keylen, + tx->key->conf.keyidx)) + return -1; + } + + return 0; +} + +ieee80211_tx_result +ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb; + + ieee80211_tx_set_protected(tx); + + skb_queue_walk(&tx->skbs, skb) { + if (wep_encrypt_skb(tx, skb) < 0) { + I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); + return TX_DROP; + } + } + + return TX_CONTINUE; +} diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h new file mode 100644 index 0000000000..4ffe83554c --- /dev/null +++ b/net/mac80211/wep.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Software WEP encryption implementation + * Copyright 2002, Jouni Malinen <jkmaline@cc.hut.fi> + * Copyright 2003, Instant802 Networks, Inc. + */ + +#ifndef WEP_H +#define WEP_H + +#include <linux/skbuff.h> +#include <linux/types.h> +#include "ieee80211_i.h" +#include "key.h" + +void ieee80211_wep_init(struct ieee80211_local *local); +int ieee80211_wep_encrypt_data(struct arc4_ctx *ctx, u8 *rc4key, + size_t klen, u8 *data, size_t data_len); +int ieee80211_wep_encrypt(struct ieee80211_local *local, + struct sk_buff *skb, + const u8 *key, int keylen, int keyidx); +int ieee80211_wep_decrypt_data(struct arc4_ctx *ctx, u8 *rc4key, + size_t klen, u8 *data, size_t data_len); + +ieee80211_rx_result +ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx); +ieee80211_tx_result +ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx); + +#endif /* WEP_H */ diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c new file mode 100644 index 0000000000..1601be5764 --- /dev/null +++ b/net/mac80211/wme.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2004, Instant802 Networks, Inc. + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2022 Intel Corporation + */ + +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/module.h> +#include <linux/if_arp.h> +#include <linux/types.h> +#include <net/ip.h> +#include <net/pkt_sched.h> + +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "wme.h" + +/* Default mapping in classifier to work with default + * queue setup. + */ +const int ieee802_1d_to_ac[8] = { + IEEE80211_AC_BE, + IEEE80211_AC_BK, + IEEE80211_AC_BK, + IEEE80211_AC_BE, + IEEE80211_AC_VI, + IEEE80211_AC_VI, + IEEE80211_AC_VO, + IEEE80211_AC_VO +}; + +static int wme_downgrade_ac(struct sk_buff *skb) +{ + switch (skb->priority) { + case 6: + case 7: + skb->priority = 5; /* VO -> VI */ + return 0; + case 4: + case 5: + skb->priority = 3; /* VI -> BE */ + return 0; + case 0: + case 3: + skb->priority = 2; /* BE -> BK */ + return 0; + default: + return -1; + } +} + +/** + * ieee80211_fix_reserved_tid - return the TID to use if this one is reserved + * @tid: the assumed-reserved TID + * + * Returns: the alternative TID to use, or 0 on error + */ +static inline u8 ieee80211_fix_reserved_tid(u8 tid) +{ + switch (tid) { + case 0: + return 3; + case 1: + return 2; + case 2: + return 1; + case 3: + return 0; + case 4: + return 5; + case 5: + return 4; + case 6: + return 7; + case 7: + return 6; + } + + return 0; +} + +static u16 ieee80211_downgrade_queue(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, struct sk_buff *skb) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + /* in case we are a client verify acm is not set for this ac */ + while (sdata->wmm_acm & BIT(skb->priority)) { + int ac = ieee802_1d_to_ac[skb->priority]; + + if (ifmgd->tx_tspec[ac].admitted_time && + skb->priority == ifmgd->tx_tspec[ac].up) + return ac; + + if (wme_downgrade_ac(skb)) { + /* + * This should not really happen. The AP has marked all + * lower ACs to require admission control which is not + * a reasonable configuration. Allow the frame to be + * transmitted using AC_BK as a workaround. + */ + break; + } + } + + /* Check to see if this is a reserved TID */ + if (sta && sta->reserved_tid == skb->priority) + skb->priority = ieee80211_fix_reserved_tid(skb->priority); + + /* look up which queue to use for frames with this 1d tag */ + return ieee802_1d_to_ac[skb->priority]; +} + +/* Indicate which queue to use for this fully formed 802.11 frame */ +u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, + struct ieee80211_hdr *hdr) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u8 *p; + + /* Ensure hash is set prior to potential SW encryption */ + skb_get_hash(skb); + + if ((info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER) || + local->hw.queues < IEEE80211_NUM_ACS) + return 0; + + if (!ieee80211_is_data(hdr->frame_control)) { + skb->priority = 7; + return ieee802_1d_to_ac[skb->priority]; + } + if (!ieee80211_is_data_qos(hdr->frame_control)) { + skb->priority = 0; + return ieee802_1d_to_ac[skb->priority]; + } + + p = ieee80211_get_qos_ctl(hdr); + skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; + + return ieee80211_downgrade_queue(sdata, NULL, skb); +} + +u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, struct sk_buff *skb) +{ + const struct ethhdr *eth = (void *)skb->data; + struct mac80211_qos_map *qos_map; + bool qos; + + /* Ensure hash is set prior to potential SW encryption */ + skb_get_hash(skb); + + /* all mesh/ocb stations are required to support WME */ + if ((sdata->vif.type == NL80211_IFTYPE_MESH_POINT && + !is_multicast_ether_addr(eth->h_dest)) || + (sdata->vif.type == NL80211_IFTYPE_OCB && sta)) + qos = true; + else if (sta) + qos = sta->sta.wme; + else + qos = false; + + if (!qos) { + skb->priority = 0; /* required for correct WPA/11i MIC */ + return IEEE80211_AC_BE; + } + + if (skb->protocol == sdata->control_port_protocol) { + skb->priority = 7; + goto downgrade; + } + + /* use the data classifier to determine what 802.1d tag the + * data frame has */ + qos_map = rcu_dereference(sdata->qos_map); + skb->priority = cfg80211_classify8021d(skb, qos_map ? + &qos_map->qos_map : NULL); + + downgrade: + return ieee80211_downgrade_queue(sdata, sta, skb); +} + +/** + * ieee80211_set_qos_hdr - Fill in the QoS header if there is one. + * + * @sdata: local subif + * @skb: packet to be updated + */ +void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; + u8 flags; + u8 *p; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return; + + p = ieee80211_get_qos_ctl(hdr); + + /* don't overwrite the QoS field of injected frames */ + if (info->flags & IEEE80211_TX_CTL_INJECTED) { + /* do take into account Ack policy of injected frames */ + if (*p & IEEE80211_QOS_CTL_ACK_POLICY_NOACK) + info->flags |= IEEE80211_TX_CTL_NO_ACK; + return; + } + + /* set up the first byte */ + + /* + * preserve everything but the TID and ACK policy + * (which we both write here) + */ + flags = *p & ~(IEEE80211_QOS_CTL_TID_MASK | + IEEE80211_QOS_CTL_ACK_POLICY_MASK); + + if (is_multicast_ether_addr(hdr->addr1) || + sdata->noack_map & BIT(tid)) { + flags |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK; + info->flags |= IEEE80211_TX_CTL_NO_ACK; + } + + *p = flags | tid; + + /* set up the second byte */ + p++; + + if (ieee80211_vif_is_mesh(&sdata->vif)) { + /* preserve RSPI and Mesh PS Level bit */ + *p &= ((IEEE80211_QOS_CTL_RSPI | + IEEE80211_QOS_CTL_MESH_PS_LEVEL) >> 8); + + /* Nulls don't have a mesh header (frame body) */ + if (!ieee80211_is_qos_nullfunc(hdr->frame_control)) + *p |= (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8); + } else { + *p = 0; + } +} diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h new file mode 100644 index 0000000000..81f0039527 --- /dev/null +++ b/net/mac80211/wme.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2004, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + */ + +#ifndef _WME_H +#define _WME_H + +#include <linux/netdevice.h> +#include "ieee80211_i.h" + +u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, + struct ieee80211_hdr *hdr); +u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, struct sk_buff *skb); +void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); + +#endif /* _WME_H */ diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c new file mode 100644 index 0000000000..2d8e38b3bc --- /dev/null +++ b/net/mac80211/wpa.c @@ -0,0 +1,1118 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2002-2004, Instant802 Networks, Inc. + * Copyright 2008, Jouni Malinen <j@w1.fi> + * Copyright (C) 2016-2017 Intel Deutschland GmbH + * Copyright (C) 2020-2022 Intel Corporation + */ + +#include <linux/netdevice.h> +#include <linux/types.h> +#include <linux/skbuff.h> +#include <linux/compiler.h> +#include <linux/ieee80211.h> +#include <linux/gfp.h> +#include <asm/unaligned.h> +#include <net/mac80211.h> +#include <crypto/aes.h> +#include <crypto/utils.h> + +#include "ieee80211_i.h" +#include "michael.h" +#include "tkip.h" +#include "aes_ccm.h" +#include "aes_cmac.h" +#include "aes_gmac.h" +#include "aes_gcm.h" +#include "wpa.h" + +ieee80211_tx_result +ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) +{ + u8 *data, *key, *mic; + size_t data_len; + unsigned int hdrlen; + struct ieee80211_hdr *hdr; + struct sk_buff *skb = tx->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int tail; + + hdr = (struct ieee80211_hdr *)skb->data; + if (!tx->key || tx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP || + skb->len < 24 || !ieee80211_is_data_present(hdr->frame_control)) + return TX_CONTINUE; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (skb->len < hdrlen) + return TX_DROP; + + data = skb->data + hdrlen; + data_len = skb->len - hdrlen; + + if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) { + /* Need to use software crypto for the test */ + info->control.hw_key = NULL; + } + + if (info->control.hw_key && + (info->flags & IEEE80211_TX_CTL_DONTFRAG || + ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG)) && + !(tx->key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | + IEEE80211_KEY_FLAG_PUT_MIC_SPACE))) { + /* hwaccel - with no need for SW-generated MMIC or MIC space */ + return TX_CONTINUE; + } + + tail = MICHAEL_MIC_LEN; + if (!info->control.hw_key) + tail += IEEE80211_TKIP_ICV_LEN; + + if (WARN(skb_tailroom(skb) < tail || + skb_headroom(skb) < IEEE80211_TKIP_IV_LEN, + "mmic: not enough head/tail (%d/%d,%d/%d)\n", + skb_headroom(skb), IEEE80211_TKIP_IV_LEN, + skb_tailroom(skb), tail)) + return TX_DROP; + + mic = skb_put(skb, MICHAEL_MIC_LEN); + + if (tx->key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) { + /* Zeroed MIC can help with debug */ + memset(mic, 0, MICHAEL_MIC_LEN); + return TX_CONTINUE; + } + + key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]; + michael_mic(key, hdr, data, data_len, mic); + if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) + mic[0]++; + + return TX_CONTINUE; +} + + +ieee80211_rx_result +ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) +{ + u8 *data, *key = NULL; + size_t data_len; + unsigned int hdrlen; + u8 mic[MICHAEL_MIC_LEN]; + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + /* + * it makes no sense to check for MIC errors on anything other + * than data frames. + */ + if (!ieee80211_is_data_present(hdr->frame_control)) + return RX_CONTINUE; + + /* + * No way to verify the MIC if the hardware stripped it or + * the IV with the key index. In this case we have solely rely + * on the driver to set RX_FLAG_MMIC_ERROR in the event of a + * MIC failure report. + */ + if (status->flag & (RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED)) { + if (status->flag & RX_FLAG_MMIC_ERROR) + goto mic_fail_no_key; + + if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key && + rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP) + goto update_iv; + + return RX_CONTINUE; + } + + /* + * Some hardware seems to generate Michael MIC failure reports; even + * though, the frame was not encrypted with TKIP and therefore has no + * MIC. Ignore the flag them to avoid triggering countermeasures. + */ + if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP || + !(status->flag & RX_FLAG_DECRYPTED)) + return RX_CONTINUE; + + if (rx->sdata->vif.type == NL80211_IFTYPE_AP && rx->key->conf.keyidx) { + /* + * APs with pairwise keys should never receive Michael MIC + * errors for non-zero keyidx because these are reserved for + * group keys and only the AP is sending real multicast + * frames in the BSS. + */ + return RX_DROP_UNUSABLE; + } + + if (status->flag & RX_FLAG_MMIC_ERROR) + goto mic_fail; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (skb->len < hdrlen + MICHAEL_MIC_LEN) + return RX_DROP_UNUSABLE; + + if (skb_linearize(rx->skb)) + return RX_DROP_UNUSABLE; + hdr = (void *)skb->data; + + data = skb->data + hdrlen; + data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; + key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; + michael_mic(key, hdr, data, data_len, mic); + if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN)) + goto mic_fail; + + /* remove Michael MIC from payload */ + skb_trim(skb, skb->len - MICHAEL_MIC_LEN); + +update_iv: + /* update IV in key information to be able to detect replays */ + rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip.iv32; + rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip.iv16; + + return RX_CONTINUE; + +mic_fail: + rx->key->u.tkip.mic_failures++; + +mic_fail_no_key: + /* + * In some cases the key can be unset - e.g. a multicast packet, in + * a driver that supports HW encryption. Send up the key idx only if + * the key is set. + */ + cfg80211_michael_mic_failure(rx->sdata->dev, hdr->addr2, + is_multicast_ether_addr(hdr->addr1) ? + NL80211_KEYTYPE_GROUP : + NL80211_KEYTYPE_PAIRWISE, + rx->key ? rx->key->conf.keyidx : -1, + NULL, GFP_ATOMIC); + return RX_DROP_UNUSABLE; +} + +static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_key *key = tx->key; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + unsigned int hdrlen; + int len, tail; + u64 pn; + u8 *pos; + + if (info->control.hw_key && + !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) && + !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) { + /* hwaccel - with no need for software-generated IV */ + return 0; + } + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + len = skb->len - hdrlen; + + if (info->control.hw_key) + tail = 0; + else + tail = IEEE80211_TKIP_ICV_LEN; + + if (WARN_ON(skb_tailroom(skb) < tail || + skb_headroom(skb) < IEEE80211_TKIP_IV_LEN)) + return -1; + + pos = skb_push(skb, IEEE80211_TKIP_IV_LEN); + memmove(pos, pos + IEEE80211_TKIP_IV_LEN, hdrlen); + pos += hdrlen; + + /* the HW only needs room for the IV, but not the actual IV */ + if (info->control.hw_key && + (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) + return 0; + + /* Increase IV for the frame */ + pn = atomic64_inc_return(&key->conf.tx_pn); + pos = ieee80211_tkip_add_iv(pos, &key->conf, pn); + + /* hwaccel - with software IV */ + if (info->control.hw_key) + return 0; + + /* Add room for ICV */ + skb_put(skb, IEEE80211_TKIP_ICV_LEN); + + return ieee80211_tkip_encrypt_data(&tx->local->wep_tx_ctx, + key, skb, pos, len); +} + + +ieee80211_tx_result +ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb; + + ieee80211_tx_set_protected(tx); + + skb_queue_walk(&tx->skbs, skb) { + if (tkip_encrypt_skb(tx, skb) < 0) + return TX_DROP; + } + + return TX_CONTINUE; +} + + +ieee80211_rx_result +ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; + int hdrlen, res, hwaccel = 0; + struct ieee80211_key *key = rx->key; + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + + if (!ieee80211_is_data(hdr->frame_control)) + return RX_CONTINUE; + + if (!rx->sta || skb->len - hdrlen < 12) + return RX_DROP_UNUSABLE; + + /* it may be possible to optimize this a bit more */ + if (skb_linearize(rx->skb)) + return RX_DROP_UNUSABLE; + hdr = (void *)skb->data; + + /* + * Let TKIP code verify IV, but skip decryption. + * In the case where hardware checks the IV as well, + * we don't even get here, see ieee80211_rx_h_decrypt() + */ + if (status->flag & RX_FLAG_DECRYPTED) + hwaccel = 1; + + res = ieee80211_tkip_decrypt_data(&rx->local->wep_rx_ctx, + key, skb->data + hdrlen, + skb->len - hdrlen, rx->sta->sta.addr, + hdr->addr1, hwaccel, rx->security_idx, + &rx->tkip.iv32, + &rx->tkip.iv16); + if (res != TKIP_DECRYPT_OK) + return RX_DROP_UNUSABLE; + + /* Trim ICV */ + if (!(status->flag & RX_FLAG_ICV_STRIPPED)) + skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN); + + /* Remove IV */ + memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen); + skb_pull(skb, IEEE80211_TKIP_IV_LEN); + + return RX_CONTINUE; +} + +/* + * Calculate AAD for CCMP/GCMP, returning qos_tid since we + * need that in CCMP also for b_0. + */ +static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + __le16 mask_fc; + int a4_included, mgmt; + u8 qos_tid; + u16 len_a = 22; + + /* + * Mask FC: zero subtype b4 b5 b6 (if not mgmt) + * Retry, PwrMgt, MoreData, Order (if Qos Data); set Protected + */ + mgmt = ieee80211_is_mgmt(hdr->frame_control); + mask_fc = hdr->frame_control; + mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | + IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA); + if (!mgmt) + mask_fc &= ~cpu_to_le16(0x0070); + mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); + + a4_included = ieee80211_has_a4(hdr->frame_control); + if (a4_included) + len_a += 6; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + qos_tid = ieee80211_get_tid(hdr); + mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_ORDER); + len_a += 2; + } else { + qos_tid = 0; + } + + /* AAD (extra authenticate-only data) / masked 802.11 header + * FC | A1 | A2 | A3 | SC | [A4] | [QC] */ + put_unaligned_be16(len_a, &aad[0]); + put_unaligned(mask_fc, (__le16 *)&aad[2]); + memcpy(&aad[4], &hdr->addrs, 3 * ETH_ALEN); + + /* Mask Seq#, leave Frag# */ + aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f; + aad[23] = 0; + + if (a4_included) { + memcpy(&aad[24], hdr->addr4, ETH_ALEN); + aad[30] = qos_tid; + aad[31] = 0; + } else { + memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN); + aad[24] = qos_tid; + } + + return qos_tid; +} + +static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + u8 qos_tid = ccmp_gcmp_aad(skb, aad); + + /* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC + * mode authentication are not allowed to collide, yet both are derived + * from this vector b_0. We only set L := 1 here to indicate that the + * data size can be represented in (L+1) bytes. The CCM layer will take + * care of storing the data length in the top (L+1) bytes and setting + * and clearing the other bits as is required to derive the two IVs. + */ + b_0[0] = 0x1; + + /* Nonce: Nonce Flags | A2 | PN + * Nonce Flags: Priority (b0..b3) | Management (b4) | Reserved (b5..b7) + */ + b_0[1] = qos_tid | (ieee80211_is_mgmt(hdr->frame_control) << 4); + memcpy(&b_0[2], hdr->addr2, ETH_ALEN); + memcpy(&b_0[8], pn, IEEE80211_CCMP_PN_LEN); +} + +static inline void ccmp_pn2hdr(u8 *hdr, u8 *pn, int key_id) +{ + hdr[0] = pn[5]; + hdr[1] = pn[4]; + hdr[2] = 0; + hdr[3] = 0x20 | (key_id << 6); + hdr[4] = pn[3]; + hdr[5] = pn[2]; + hdr[6] = pn[1]; + hdr[7] = pn[0]; +} + + +static inline void ccmp_hdr2pn(u8 *pn, u8 *hdr) +{ + pn[0] = hdr[7]; + pn[1] = hdr[6]; + pn[2] = hdr[5]; + pn[3] = hdr[4]; + pn[4] = hdr[1]; + pn[5] = hdr[0]; +} + + +static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb, + unsigned int mic_len) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_key *key = tx->key; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int hdrlen, len, tail; + u8 *pos; + u8 pn[6]; + u64 pn64; + u8 aad[CCM_AAD_LEN]; + u8 b_0[AES_BLOCK_SIZE]; + + if (info->control.hw_key && + !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) && + !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && + !((info->control.hw_key->flags & + IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) && + ieee80211_is_mgmt(hdr->frame_control))) { + /* + * hwaccel has no need for preallocated room for CCMP + * header or MIC fields + */ + return 0; + } + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + len = skb->len - hdrlen; + + if (info->control.hw_key) + tail = 0; + else + tail = mic_len; + + if (WARN_ON(skb_tailroom(skb) < tail || + skb_headroom(skb) < IEEE80211_CCMP_HDR_LEN)) + return -1; + + pos = skb_push(skb, IEEE80211_CCMP_HDR_LEN); + memmove(pos, pos + IEEE80211_CCMP_HDR_LEN, hdrlen); + + /* the HW only needs room for the IV, but not the actual IV */ + if (info->control.hw_key && + (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) + return 0; + + pos += hdrlen; + + pn64 = atomic64_inc_return(&key->conf.tx_pn); + + pn[5] = pn64; + pn[4] = pn64 >> 8; + pn[3] = pn64 >> 16; + pn[2] = pn64 >> 24; + pn[1] = pn64 >> 32; + pn[0] = pn64 >> 40; + + ccmp_pn2hdr(pos, pn, key->conf.keyidx); + + /* hwaccel - with software CCMP header */ + if (info->control.hw_key) + return 0; + + pos += IEEE80211_CCMP_HDR_LEN; + ccmp_special_blocks(skb, pn, b_0, aad); + return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len, + skb_put(skb, mic_len)); +} + + +ieee80211_tx_result +ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx, + unsigned int mic_len) +{ + struct sk_buff *skb; + + ieee80211_tx_set_protected(tx); + + skb_queue_walk(&tx->skbs, skb) { + if (ccmp_encrypt_skb(tx, skb, mic_len) < 0) + return TX_DROP; + } + + return TX_CONTINUE; +} + + +ieee80211_rx_result +ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx, + unsigned int mic_len) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + int hdrlen; + struct ieee80211_key *key = rx->key; + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + u8 pn[IEEE80211_CCMP_PN_LEN]; + int data_len; + int queue; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + + if (!ieee80211_is_data(hdr->frame_control) && + !ieee80211_is_robust_mgmt_frame(skb)) + return RX_CONTINUE; + + if (status->flag & RX_FLAG_DECRYPTED) { + if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN)) + return RX_DROP_UNUSABLE; + if (status->flag & RX_FLAG_MIC_STRIPPED) + mic_len = 0; + } else { + if (skb_linearize(rx->skb)) + return RX_DROP_UNUSABLE; + } + + /* reload hdr - skb might have been reallocated */ + hdr = (void *)rx->skb->data; + + data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len; + if (!rx->sta || data_len < 0) + return RX_DROP_UNUSABLE; + + if (!(status->flag & RX_FLAG_PN_VALIDATED)) { + int res; + + ccmp_hdr2pn(pn, skb->data + hdrlen); + + queue = rx->security_idx; + + res = memcmp(pn, key->u.ccmp.rx_pn[queue], + IEEE80211_CCMP_PN_LEN); + if (res < 0 || + (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) { + key->u.ccmp.replays++; + return RX_DROP_U_REPLAY; + } + + if (!(status->flag & RX_FLAG_DECRYPTED)) { + u8 aad[2 * AES_BLOCK_SIZE]; + u8 b_0[AES_BLOCK_SIZE]; + /* hardware didn't decrypt/verify MIC */ + ccmp_special_blocks(skb, pn, b_0, aad); + + if (ieee80211_aes_ccm_decrypt( + key->u.ccmp.tfm, b_0, aad, + skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN, + data_len, + skb->data + skb->len - mic_len)) + return RX_DROP_U_MIC_FAIL; + } + + memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN); + if (unlikely(ieee80211_is_frag(hdr))) + memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN); + } + + /* Remove CCMP header and MIC */ + if (pskb_trim(skb, skb->len - mic_len)) + return RX_DROP_UNUSABLE; + memmove(skb->data + IEEE80211_CCMP_HDR_LEN, skb->data, hdrlen); + skb_pull(skb, IEEE80211_CCMP_HDR_LEN); + + return RX_CONTINUE; +} + +static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + + memcpy(j_0, hdr->addr2, ETH_ALEN); + memcpy(&j_0[ETH_ALEN], pn, IEEE80211_GCMP_PN_LEN); + j_0[13] = 0; + j_0[14] = 0; + j_0[AES_BLOCK_SIZE - 1] = 0x01; + + ccmp_gcmp_aad(skb, aad); +} + +static inline void gcmp_pn2hdr(u8 *hdr, const u8 *pn, int key_id) +{ + hdr[0] = pn[5]; + hdr[1] = pn[4]; + hdr[2] = 0; + hdr[3] = 0x20 | (key_id << 6); + hdr[4] = pn[3]; + hdr[5] = pn[2]; + hdr[6] = pn[1]; + hdr[7] = pn[0]; +} + +static inline void gcmp_hdr2pn(u8 *pn, const u8 *hdr) +{ + pn[0] = hdr[7]; + pn[1] = hdr[6]; + pn[2] = hdr[5]; + pn[3] = hdr[4]; + pn[4] = hdr[1]; + pn[5] = hdr[0]; +} + +static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_key *key = tx->key; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int hdrlen, len, tail; + u8 *pos; + u8 pn[6]; + u64 pn64; + u8 aad[GCM_AAD_LEN]; + u8 j_0[AES_BLOCK_SIZE]; + + if (info->control.hw_key && + !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) && + !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && + !((info->control.hw_key->flags & + IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) && + ieee80211_is_mgmt(hdr->frame_control))) { + /* hwaccel has no need for preallocated room for GCMP + * header or MIC fields + */ + return 0; + } + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + len = skb->len - hdrlen; + + if (info->control.hw_key) + tail = 0; + else + tail = IEEE80211_GCMP_MIC_LEN; + + if (WARN_ON(skb_tailroom(skb) < tail || + skb_headroom(skb) < IEEE80211_GCMP_HDR_LEN)) + return -1; + + pos = skb_push(skb, IEEE80211_GCMP_HDR_LEN); + memmove(pos, pos + IEEE80211_GCMP_HDR_LEN, hdrlen); + skb_set_network_header(skb, skb_network_offset(skb) + + IEEE80211_GCMP_HDR_LEN); + + /* the HW only needs room for the IV, but not the actual IV */ + if (info->control.hw_key && + (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) + return 0; + + pos += hdrlen; + + pn64 = atomic64_inc_return(&key->conf.tx_pn); + + pn[5] = pn64; + pn[4] = pn64 >> 8; + pn[3] = pn64 >> 16; + pn[2] = pn64 >> 24; + pn[1] = pn64 >> 32; + pn[0] = pn64 >> 40; + + gcmp_pn2hdr(pos, pn, key->conf.keyidx); + + /* hwaccel - with software GCMP header */ + if (info->control.hw_key) + return 0; + + pos += IEEE80211_GCMP_HDR_LEN; + gcmp_special_blocks(skb, pn, j_0, aad); + return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len, + skb_put(skb, IEEE80211_GCMP_MIC_LEN)); +} + +ieee80211_tx_result +ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb; + + ieee80211_tx_set_protected(tx); + + skb_queue_walk(&tx->skbs, skb) { + if (gcmp_encrypt_skb(tx, skb) < 0) + return TX_DROP; + } + + return TX_CONTINUE; +} + +ieee80211_rx_result +ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + int hdrlen; + struct ieee80211_key *key = rx->key; + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + u8 pn[IEEE80211_GCMP_PN_LEN]; + int data_len, queue, mic_len = IEEE80211_GCMP_MIC_LEN; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + + if (!ieee80211_is_data(hdr->frame_control) && + !ieee80211_is_robust_mgmt_frame(skb)) + return RX_CONTINUE; + + if (status->flag & RX_FLAG_DECRYPTED) { + if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN)) + return RX_DROP_UNUSABLE; + if (status->flag & RX_FLAG_MIC_STRIPPED) + mic_len = 0; + } else { + if (skb_linearize(rx->skb)) + return RX_DROP_UNUSABLE; + } + + /* reload hdr - skb might have been reallocated */ + hdr = (void *)rx->skb->data; + + data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len; + if (!rx->sta || data_len < 0) + return RX_DROP_UNUSABLE; + + if (!(status->flag & RX_FLAG_PN_VALIDATED)) { + int res; + + gcmp_hdr2pn(pn, skb->data + hdrlen); + + queue = rx->security_idx; + + res = memcmp(pn, key->u.gcmp.rx_pn[queue], + IEEE80211_GCMP_PN_LEN); + if (res < 0 || + (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) { + key->u.gcmp.replays++; + return RX_DROP_U_REPLAY; + } + + if (!(status->flag & RX_FLAG_DECRYPTED)) { + u8 aad[2 * AES_BLOCK_SIZE]; + u8 j_0[AES_BLOCK_SIZE]; + /* hardware didn't decrypt/verify MIC */ + gcmp_special_blocks(skb, pn, j_0, aad); + + if (ieee80211_aes_gcm_decrypt( + key->u.gcmp.tfm, j_0, aad, + skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN, + data_len, + skb->data + skb->len - + IEEE80211_GCMP_MIC_LEN)) + return RX_DROP_U_MIC_FAIL; + } + + memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN); + if (unlikely(ieee80211_is_frag(hdr))) + memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN); + } + + /* Remove GCMP header and MIC */ + if (pskb_trim(skb, skb->len - mic_len)) + return RX_DROP_UNUSABLE; + memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen); + skb_pull(skb, IEEE80211_GCMP_HDR_LEN); + + return RX_CONTINUE; +} + +static void bip_aad(struct sk_buff *skb, u8 *aad) +{ + __le16 mask_fc; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + + /* BIP AAD: FC(masked) || A1 || A2 || A3 */ + + /* FC type/subtype */ + /* Mask FC Retry, PwrMgt, MoreData flags to zero */ + mask_fc = hdr->frame_control; + mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM | + IEEE80211_FCTL_MOREDATA); + put_unaligned(mask_fc, (__le16 *) &aad[0]); + /* A1 || A2 || A3 */ + memcpy(aad + 2, &hdr->addrs, 3 * ETH_ALEN); +} + + +static inline void bip_ipn_set64(u8 *d, u64 pn) +{ + *d++ = pn; + *d++ = pn >> 8; + *d++ = pn >> 16; + *d++ = pn >> 24; + *d++ = pn >> 32; + *d = pn >> 40; +} + +static inline void bip_ipn_swap(u8 *d, const u8 *s) +{ + *d++ = s[5]; + *d++ = s[4]; + *d++ = s[3]; + *d++ = s[2]; + *d++ = s[1]; + *d = s[0]; +} + + +ieee80211_tx_result +ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb; + struct ieee80211_tx_info *info; + struct ieee80211_key *key = tx->key; + struct ieee80211_mmie *mmie; + u8 aad[20]; + u64 pn64; + + if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) + return TX_DROP; + + skb = skb_peek(&tx->skbs); + + info = IEEE80211_SKB_CB(skb); + + if (info->control.hw_key && + !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIE)) + return TX_CONTINUE; + + if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) + return TX_DROP; + + mmie = skb_put(skb, sizeof(*mmie)); + mmie->element_id = WLAN_EID_MMIE; + mmie->length = sizeof(*mmie) - 2; + mmie->key_id = cpu_to_le16(key->conf.keyidx); + + /* PN = PN + 1 */ + pn64 = atomic64_inc_return(&key->conf.tx_pn); + + bip_ipn_set64(mmie->sequence_number, pn64); + + if (info->control.hw_key) + return TX_CONTINUE; + + bip_aad(skb, aad); + + /* + * MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) + */ + ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, + skb->data + 24, skb->len - 24, mmie->mic); + + return TX_CONTINUE; +} + +ieee80211_tx_result +ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb; + struct ieee80211_tx_info *info; + struct ieee80211_key *key = tx->key; + struct ieee80211_mmie_16 *mmie; + u8 aad[20]; + u64 pn64; + + if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) + return TX_DROP; + + skb = skb_peek(&tx->skbs); + + info = IEEE80211_SKB_CB(skb); + + if (info->control.hw_key) + return TX_CONTINUE; + + if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) + return TX_DROP; + + mmie = skb_put(skb, sizeof(*mmie)); + mmie->element_id = WLAN_EID_MMIE; + mmie->length = sizeof(*mmie) - 2; + mmie->key_id = cpu_to_le16(key->conf.keyidx); + + /* PN = PN + 1 */ + pn64 = atomic64_inc_return(&key->conf.tx_pn); + + bip_ipn_set64(mmie->sequence_number, pn64); + + bip_aad(skb, aad); + + /* MIC = AES-256-CMAC(IGTK, AAD || Management Frame Body || MMIE, 128) + */ + ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, + skb->data + 24, skb->len - 24, mmie->mic); + + return TX_CONTINUE; +} + +ieee80211_rx_result +ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_key *key = rx->key; + struct ieee80211_mmie *mmie; + u8 aad[20], mic[8], ipn[6]; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + + if (!ieee80211_is_mgmt(hdr->frame_control)) + return RX_CONTINUE; + + /* management frames are already linear */ + + if (skb->len < 24 + sizeof(*mmie)) + return RX_DROP_UNUSABLE; + + mmie = (struct ieee80211_mmie *) + (skb->data + skb->len - sizeof(*mmie)); + if (mmie->element_id != WLAN_EID_MMIE || + mmie->length != sizeof(*mmie) - 2) + return RX_DROP_U_BAD_MMIE; /* Invalid MMIE */ + + bip_ipn_swap(ipn, mmie->sequence_number); + + if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) { + key->u.aes_cmac.replays++; + return RX_DROP_U_REPLAY; + } + + if (!(status->flag & RX_FLAG_DECRYPTED)) { + /* hardware didn't decrypt/verify MIC */ + bip_aad(skb, aad); + ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, + skb->data + 24, skb->len - 24, mic); + if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { + key->u.aes_cmac.icverrors++; + return RX_DROP_U_MIC_FAIL; + } + } + + memcpy(key->u.aes_cmac.rx_pn, ipn, 6); + + /* Remove MMIE */ + skb_trim(skb, skb->len - sizeof(*mmie)); + + return RX_CONTINUE; +} + +ieee80211_rx_result +ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_key *key = rx->key; + struct ieee80211_mmie_16 *mmie; + u8 aad[20], mic[16], ipn[6]; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + if (!ieee80211_is_mgmt(hdr->frame_control)) + return RX_CONTINUE; + + /* management frames are already linear */ + + if (skb->len < 24 + sizeof(*mmie)) + return RX_DROP_UNUSABLE; + + mmie = (struct ieee80211_mmie_16 *) + (skb->data + skb->len - sizeof(*mmie)); + if (mmie->element_id != WLAN_EID_MMIE || + mmie->length != sizeof(*mmie) - 2) + return RX_DROP_UNUSABLE; /* Invalid MMIE */ + + bip_ipn_swap(ipn, mmie->sequence_number); + + if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) { + key->u.aes_cmac.replays++; + return RX_DROP_U_REPLAY; + } + + if (!(status->flag & RX_FLAG_DECRYPTED)) { + /* hardware didn't decrypt/verify MIC */ + bip_aad(skb, aad); + ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, + skb->data + 24, skb->len - 24, mic); + if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { + key->u.aes_cmac.icverrors++; + return RX_DROP_U_MIC_FAIL; + } + } + + memcpy(key->u.aes_cmac.rx_pn, ipn, 6); + + /* Remove MMIE */ + skb_trim(skb, skb->len - sizeof(*mmie)); + + return RX_CONTINUE; +} + +ieee80211_tx_result +ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb; + struct ieee80211_tx_info *info; + struct ieee80211_key *key = tx->key; + struct ieee80211_mmie_16 *mmie; + struct ieee80211_hdr *hdr; + u8 aad[GMAC_AAD_LEN]; + u64 pn64; + u8 nonce[GMAC_NONCE_LEN]; + + if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) + return TX_DROP; + + skb = skb_peek(&tx->skbs); + + info = IEEE80211_SKB_CB(skb); + + if (info->control.hw_key) + return TX_CONTINUE; + + if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) + return TX_DROP; + + mmie = skb_put(skb, sizeof(*mmie)); + mmie->element_id = WLAN_EID_MMIE; + mmie->length = sizeof(*mmie) - 2; + mmie->key_id = cpu_to_le16(key->conf.keyidx); + + /* PN = PN + 1 */ + pn64 = atomic64_inc_return(&key->conf.tx_pn); + + bip_ipn_set64(mmie->sequence_number, pn64); + + bip_aad(skb, aad); + + hdr = (struct ieee80211_hdr *)skb->data; + memcpy(nonce, hdr->addr2, ETH_ALEN); + bip_ipn_swap(nonce + ETH_ALEN, mmie->sequence_number); + + /* MIC = AES-GMAC(IGTK, AAD || Management Frame Body || MMIE, 128) */ + if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, + skb->data + 24, skb->len - 24, mmie->mic) < 0) + return TX_DROP; + + return TX_CONTINUE; +} + +ieee80211_rx_result +ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_key *key = rx->key; + struct ieee80211_mmie_16 *mmie; + u8 aad[GMAC_AAD_LEN], *mic, ipn[6], nonce[GMAC_NONCE_LEN]; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + if (!ieee80211_is_mgmt(hdr->frame_control)) + return RX_CONTINUE; + + /* management frames are already linear */ + + if (skb->len < 24 + sizeof(*mmie)) + return RX_DROP_UNUSABLE; + + mmie = (struct ieee80211_mmie_16 *) + (skb->data + skb->len - sizeof(*mmie)); + if (mmie->element_id != WLAN_EID_MMIE || + mmie->length != sizeof(*mmie) - 2) + return RX_DROP_U_BAD_MMIE; /* Invalid MMIE */ + + bip_ipn_swap(ipn, mmie->sequence_number); + + if (memcmp(ipn, key->u.aes_gmac.rx_pn, 6) <= 0) { + key->u.aes_gmac.replays++; + return RX_DROP_U_REPLAY; + } + + if (!(status->flag & RX_FLAG_DECRYPTED)) { + /* hardware didn't decrypt/verify MIC */ + bip_aad(skb, aad); + + memcpy(nonce, hdr->addr2, ETH_ALEN); + memcpy(nonce + ETH_ALEN, ipn, 6); + + mic = kmalloc(GMAC_MIC_LEN, GFP_ATOMIC); + if (!mic) + return RX_DROP_UNUSABLE; + if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, + skb->data + 24, skb->len - 24, + mic) < 0 || + crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { + key->u.aes_gmac.icverrors++; + kfree(mic); + return RX_DROP_U_MIC_FAIL; + } + kfree(mic); + } + + memcpy(key->u.aes_gmac.rx_pn, ipn, 6); + + /* Remove MMIE */ + skb_trim(skb, skb->len - sizeof(*mmie)); + + return RX_CONTINUE; +} diff --git a/net/mac80211/wpa.h b/net/mac80211/wpa.h new file mode 100644 index 0000000000..a9a81abb54 --- /dev/null +++ b/net/mac80211/wpa.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2002-2004, Instant802 Networks, Inc. + * Copyright (C) 2022 Intel Corporation + */ + +#ifndef WPA_H +#define WPA_H + +#include <linux/skbuff.h> +#include <linux/types.h> +#include "ieee80211_i.h" + +ieee80211_tx_result +ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx); +ieee80211_rx_result +ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx); + +ieee80211_tx_result +ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx); +ieee80211_rx_result +ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx); + +ieee80211_tx_result +ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx, + unsigned int mic_len); +ieee80211_rx_result +ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx, + unsigned int mic_len); + +ieee80211_tx_result +ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx); +ieee80211_tx_result +ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx); +ieee80211_rx_result +ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx); +ieee80211_rx_result +ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx); +ieee80211_tx_result +ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx); +ieee80211_rx_result +ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx); + +ieee80211_tx_result +ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx); +ieee80211_rx_result +ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx); + +#endif /* WPA_H */ |