diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath6kl')
31 files changed, 30877 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig new file mode 100644 index 000000000..cd96cf8d9 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/Kconfig @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: ISC +config ATH6KL + tristate "Atheros mobile chipsets support" + depends on CFG80211 + help + This module adds core support for wireless adapters based on + Atheros AR6003 and AR6004 chipsets. You still need separate + bus drivers for USB and SDIO to be able to use real devices. + + If you choose to build it as a module, it will be called + ath6kl_core. Please note that AR6002 and AR6001 are not + supported by this driver. + +config ATH6KL_SDIO + tristate "Atheros ath6kl SDIO support" + depends on ATH6KL + depends on MMC + help + This module adds support for wireless adapters based on + Atheros AR6003 and AR6004 chipsets running over SDIO. If you + choose to build it as a module, it will be called ath6kl_sdio. + Please note that AR6002 and AR6001 are not supported by this + driver. + +config ATH6KL_USB + tristate "Atheros ath6kl USB support" + depends on ATH6KL + depends on USB + help + This module adds support for wireless adapters based on + Atheros AR6004 chipset and chipsets based on it running over + USB. If you choose to build it as a module, it will be + called ath6kl_usb. + +config ATH6KL_DEBUG + bool "Atheros ath6kl debugging" + depends on ATH6KL + help + Enables ath6kl debug support, including debug messages + enabled with debug_mask module parameter and debugfs + interface. + + If unsure, say Y to make it easier to debug problems. + +config ATH6KL_TRACING + bool "Atheros ath6kl tracing support" + depends on ATH6KL + depends on EVENT_TRACING + help + Select this to ath6kl use tracing infrastructure which, for + example, can be enabled with help of trace-cmd. All debug + messages and commands are delivered to using individually + enablable trace points. + + If unsure, say Y to make it easier to debug problems. + +config ATH6KL_REGDOMAIN + bool "Atheros ath6kl regdomain support" + depends on ATH6KL + depends on CFG80211_CERTIFICATION_ONUS + help + Enabling this makes it possible to change the regdomain in + the firmware. This can be only enabled if regulatory requirements + are taken into account. + + If unsure, say N. diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile new file mode 100644 index 000000000..dc2b3b467 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/Makefile @@ -0,0 +1,49 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2004-2011 Atheros Communications Inc. +# Copyright (c) 2011-2012 Qualcomm Atheros, Inc. +# All rights reserved. +# +# +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +# +# +# +# Author(s): ="Atheros" +#------------------------------------------------------------------------------ + +obj-$(CONFIG_ATH6KL) += ath6kl_core.o +ath6kl_core-y += debug.o +ath6kl_core-y += hif.o +ath6kl_core-y += htc_mbox.o +ath6kl_core-y += htc_pipe.o +ath6kl_core-y += bmi.o +ath6kl_core-y += cfg80211.o +ath6kl_core-y += init.o +ath6kl_core-y += main.o +ath6kl_core-y += txrx.o +ath6kl_core-y += wmi.o +ath6kl_core-y += core.o +ath6kl_core-y += recovery.o + +ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o +ath6kl_core-$(CONFIG_ATH6KL_TRACING) += trace.o + +obj-$(CONFIG_ATH6KL_SDIO) += ath6kl_sdio.o +ath6kl_sdio-y += sdio.o + +obj-$(CONFIG_ATH6KL_USB) += ath6kl_usb.o +ath6kl_usb-y += usb.o + +# for tracing framework to find trace.h +CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c new file mode 100644 index 000000000..af98e8711 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/bmi.c @@ -0,0 +1,548 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "core.h" +#include "hif-ops.h" +#include "target.h" +#include "debug.h" + +int ath6kl_bmi_done(struct ath6kl *ar) +{ + int ret; + u32 cid = BMI_DONE; + + if (ar->bmi.done_sent) { + ath6kl_dbg(ATH6KL_DBG_BMI, "bmi done skipped\n"); + return 0; + } + + ar->bmi.done_sent = true; + + ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid)); + if (ret) { + ath6kl_err("Unable to send bmi done: %d\n", ret); + return ret; + } + + return 0; +} + +int ath6kl_bmi_get_target_info(struct ath6kl *ar, + struct ath6kl_bmi_target_info *targ_info) +{ + int ret; + u32 cid = BMI_GET_TARGET_INFO; + + if (ar->bmi.done_sent) { + ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); + return -EACCES; + } + + ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid)); + if (ret) { + ath6kl_err("Unable to send get target info: %d\n", ret); + return ret; + } + + if (ar->hif_type == ATH6KL_HIF_TYPE_USB) { + ret = ath6kl_hif_bmi_read(ar, (u8 *)targ_info, + sizeof(*targ_info)); + } else { + ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version, + sizeof(targ_info->version)); + } + + if (ret) { + ath6kl_err("Unable to recv target info: %d\n", ret); + return ret; + } + + if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) { + /* Determine how many bytes are in the Target's targ_info */ + ret = ath6kl_hif_bmi_read(ar, + (u8 *)&targ_info->byte_count, + sizeof(targ_info->byte_count)); + if (ret) { + ath6kl_err("unable to read target info byte count: %d\n", + ret); + return ret; + } + + /* + * The target's targ_info doesn't match the host's targ_info. + * We need to do some backwards compatibility to make this work. + */ + if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) { + WARN_ON(1); + return -EINVAL; + } + + /* Read the remainder of the targ_info */ + ret = ath6kl_hif_bmi_read(ar, + ((u8 *)targ_info) + + sizeof(targ_info->byte_count), + sizeof(*targ_info) - + sizeof(targ_info->byte_count)); + + if (ret) { + ath6kl_err("Unable to read target info (%d bytes): %d\n", + targ_info->byte_count, ret); + return ret; + } + } + + ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n", + targ_info->version, targ_info->type); + + return 0; +} + +int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) +{ + u32 cid = BMI_READ_MEMORY; + int ret; + u32 offset; + u32 len_remain, rx_len; + u16 size; + + if (ar->bmi.done_sent) { + ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); + return -EACCES; + } + + size = ar->bmi.max_data_size + sizeof(cid) + sizeof(addr) + sizeof(len); + if (size > ar->bmi.max_cmd_size) { + WARN_ON(1); + return -EINVAL; + } + memset(ar->bmi.cmd_buf, 0, size); + + ath6kl_dbg(ATH6KL_DBG_BMI, + "bmi read memory: device: addr: 0x%x, len: %d\n", + addr, len); + + len_remain = len; + + while (len_remain) { + rx_len = (len_remain < ar->bmi.max_data_size) ? + len_remain : ar->bmi.max_data_size; + offset = 0; + memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); + offset += sizeof(cid); + memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); + offset += sizeof(addr); + memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len)); + offset += sizeof(len); + + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + if (ret) { + ath6kl_err("Unable to write to the device: %d\n", + ret); + return ret; + } + ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, rx_len); + if (ret) { + ath6kl_err("Unable to read from the device: %d\n", + ret); + return ret; + } + memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len); + len_remain -= rx_len; addr += rx_len; + } + + return 0; +} + +int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) +{ + u32 cid = BMI_WRITE_MEMORY; + int ret; + u32 offset; + u32 len_remain, tx_len; + const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len); + u8 aligned_buf[400]; + u8 *src; + + if (ar->bmi.done_sent) { + ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); + return -EACCES; + } + + if ((ar->bmi.max_data_size + header) > ar->bmi.max_cmd_size) { + WARN_ON(1); + return -EINVAL; + } + + if (WARN_ON(ar->bmi.max_data_size > sizeof(aligned_buf))) + return -E2BIG; + + memset(ar->bmi.cmd_buf, 0, ar->bmi.max_data_size + header); + + ath6kl_dbg(ATH6KL_DBG_BMI, + "bmi write memory: addr: 0x%x, len: %d\n", addr, len); + + len_remain = len; + while (len_remain) { + src = &buf[len - len_remain]; + + if (len_remain < (ar->bmi.max_data_size - header)) { + if (len_remain & 3) { + /* align it with 4 bytes */ + len_remain = len_remain + + (4 - (len_remain & 3)); + memcpy(aligned_buf, src, len_remain); + src = aligned_buf; + } + tx_len = len_remain; + } else { + tx_len = (ar->bmi.max_data_size - header); + } + + offset = 0; + memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); + offset += sizeof(cid); + memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); + offset += sizeof(addr); + memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len)); + offset += sizeof(tx_len); + memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len); + offset += tx_len; + + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + if (ret) { + ath6kl_err("Unable to write to the device: %d\n", + ret); + return ret; + } + len_remain -= tx_len; addr += tx_len; + } + + return 0; +} + +int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param) +{ + u32 cid = BMI_EXECUTE; + int ret; + u32 offset; + u16 size; + + if (ar->bmi.done_sent) { + ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); + return -EACCES; + } + + size = sizeof(cid) + sizeof(addr) + sizeof(*param); + if (size > ar->bmi.max_cmd_size) { + WARN_ON(1); + return -EINVAL; + } + memset(ar->bmi.cmd_buf, 0, size); + + ath6kl_dbg(ATH6KL_DBG_BMI, "bmi execute: addr: 0x%x, param: %d)\n", + addr, *param); + + offset = 0; + memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); + offset += sizeof(cid); + memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); + offset += sizeof(addr); + memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param)); + offset += sizeof(*param); + + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + if (ret) { + ath6kl_err("Unable to write to the device: %d\n", ret); + return ret; + } + + ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param)); + if (ret) { + ath6kl_err("Unable to read from the device: %d\n", ret); + return ret; + } + + memcpy(param, ar->bmi.cmd_buf, sizeof(*param)); + + return 0; +} + +int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr) +{ + u32 cid = BMI_SET_APP_START; + int ret; + u32 offset; + u16 size; + + if (ar->bmi.done_sent) { + ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); + return -EACCES; + } + + size = sizeof(cid) + sizeof(addr); + if (size > ar->bmi.max_cmd_size) { + WARN_ON(1); + return -EINVAL; + } + memset(ar->bmi.cmd_buf, 0, size); + + ath6kl_dbg(ATH6KL_DBG_BMI, "bmi set app start: addr: 0x%x\n", addr); + + offset = 0; + memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); + offset += sizeof(cid); + memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); + offset += sizeof(addr); + + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + if (ret) { + ath6kl_err("Unable to write to the device: %d\n", ret); + return ret; + } + + return 0; +} + +int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param) +{ + u32 cid = BMI_READ_SOC_REGISTER; + int ret; + u32 offset; + u16 size; + + if (ar->bmi.done_sent) { + ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); + return -EACCES; + } + + size = sizeof(cid) + sizeof(addr); + if (size > ar->bmi.max_cmd_size) { + WARN_ON(1); + return -EINVAL; + } + memset(ar->bmi.cmd_buf, 0, size); + + ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read SOC reg: addr: 0x%x\n", addr); + + offset = 0; + memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); + offset += sizeof(cid); + memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); + offset += sizeof(addr); + + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + if (ret) { + ath6kl_err("Unable to write to the device: %d\n", ret); + return ret; + } + + ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param)); + if (ret) { + ath6kl_err("Unable to read from the device: %d\n", ret); + return ret; + } + memcpy(param, ar->bmi.cmd_buf, sizeof(*param)); + + return 0; +} + +int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param) +{ + u32 cid = BMI_WRITE_SOC_REGISTER; + int ret; + u32 offset; + u16 size; + + if (ar->bmi.done_sent) { + ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); + return -EACCES; + } + + size = sizeof(cid) + sizeof(addr) + sizeof(param); + if (size > ar->bmi.max_cmd_size) { + WARN_ON(1); + return -EINVAL; + } + memset(ar->bmi.cmd_buf, 0, size); + + ath6kl_dbg(ATH6KL_DBG_BMI, + "bmi write SOC reg: addr: 0x%x, param: %d\n", + addr, param); + + offset = 0; + memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); + offset += sizeof(cid); + memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); + offset += sizeof(addr); + memcpy(&(ar->bmi.cmd_buf[offset]), ¶m, sizeof(param)); + offset += sizeof(param); + + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + if (ret) { + ath6kl_err("Unable to write to the device: %d\n", ret); + return ret; + } + + return 0; +} + +int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len) +{ + u32 cid = BMI_LZ_DATA; + int ret; + u32 offset; + u32 len_remain, tx_len; + const u32 header = sizeof(cid) + sizeof(len); + u16 size; + + if (ar->bmi.done_sent) { + ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); + return -EACCES; + } + + size = ar->bmi.max_data_size + header; + if (size > ar->bmi.max_cmd_size) { + WARN_ON(1); + return -EINVAL; + } + memset(ar->bmi.cmd_buf, 0, size); + + ath6kl_dbg(ATH6KL_DBG_BMI, "bmi send LZ data: len: %d)\n", + len); + + len_remain = len; + while (len_remain) { + tx_len = (len_remain < (ar->bmi.max_data_size - header)) ? + len_remain : (ar->bmi.max_data_size - header); + + offset = 0; + memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); + offset += sizeof(cid); + memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len)); + offset += sizeof(tx_len); + memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain], + tx_len); + offset += tx_len; + + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + if (ret) { + ath6kl_err("Unable to write to the device: %d\n", + ret); + return ret; + } + + len_remain -= tx_len; + } + + return 0; +} + +int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr) +{ + u32 cid = BMI_LZ_STREAM_START; + int ret; + u32 offset; + u16 size; + + if (ar->bmi.done_sent) { + ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); + return -EACCES; + } + + size = sizeof(cid) + sizeof(addr); + if (size > ar->bmi.max_cmd_size) { + WARN_ON(1); + return -EINVAL; + } + memset(ar->bmi.cmd_buf, 0, size); + + ath6kl_dbg(ATH6KL_DBG_BMI, + "bmi LZ stream start: addr: 0x%x)\n", + addr); + + offset = 0; + memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); + offset += sizeof(cid); + memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); + offset += sizeof(addr); + + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + if (ret) { + ath6kl_err("Unable to start LZ stream to the device: %d\n", + ret); + return ret; + } + + return 0; +} + +int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) +{ + int ret; + u32 last_word = 0; + u32 last_word_offset = len & ~0x3; + u32 unaligned_bytes = len & 0x3; + + ret = ath6kl_bmi_lz_stream_start(ar, addr); + if (ret) + return ret; + + if (unaligned_bytes) { + /* copy the last word into a zero padded buffer */ + memcpy(&last_word, &buf[last_word_offset], unaligned_bytes); + } + + ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset); + if (ret) + return ret; + + if (unaligned_bytes) + ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4); + + if (!ret) { + /* Close compressed stream and open a new (fake) one. + * This serves mainly to flush Target caches. */ + ret = ath6kl_bmi_lz_stream_start(ar, 0x00); + } + return ret; +} + +void ath6kl_bmi_reset(struct ath6kl *ar) +{ + ar->bmi.done_sent = false; +} + +int ath6kl_bmi_init(struct ath6kl *ar) +{ + if (WARN_ON(ar->bmi.max_data_size == 0)) + return -EINVAL; + + /* cmd + addr + len + data_size */ + ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3); + + ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_KERNEL); + if (!ar->bmi.cmd_buf) + return -ENOMEM; + + return 0; +} + +void ath6kl_bmi_cleanup(struct ath6kl *ar) +{ + kfree(ar->bmi.cmd_buf); + ar->bmi.cmd_buf = NULL; +} diff --git a/drivers/net/wireless/ath/ath6kl/bmi.h b/drivers/net/wireless/ath/ath6kl/bmi.h new file mode 100644 index 000000000..397a52f26 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/bmi.h @@ -0,0 +1,271 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef BMI_H +#define BMI_H + +/* + * Bootloader Messaging Interface (BMI) + * + * BMI is a very simple messaging interface used during initialization + * to read memory, write memory, execute code, and to define an + * application entry PC. + * + * It is used to download an application to ATH6KL, to provide + * patches to code that is already resident on ATH6KL, and generally + * to examine and modify state. The Host has an opportunity to use + * BMI only once during bootup. Once the Host issues a BMI_DONE + * command, this opportunity ends. + * + * The Host writes BMI requests to mailbox0, and reads BMI responses + * from mailbox0. BMI requests all begin with a command + * (see below for specific commands), and are followed by + * command-specific data. + * + * Flow control: + * The Host can only issue a command once the Target gives it a + * "BMI Command Credit", using ATH6KL Counter #4. As soon as the + * Target has completed a command, it issues another BMI Command + * Credit (so the Host can issue the next command). + * + * BMI handles all required Target-side cache flushing. + */ + +/* BMI Commands */ + +#define BMI_NO_COMMAND 0 + +#define BMI_DONE 1 +/* + * Semantics: Host is done using BMI + * Request format: + * u32 command (BMI_DONE) + * Response format: none + */ + +#define BMI_READ_MEMORY 2 +/* + * Semantics: Host reads ATH6KL memory + * Request format: + * u32 command (BMI_READ_MEMORY) + * u32 address + * u32 length, at most BMI_DATASZ_MAX + * Response format: + * u8 data[length] + */ + +#define BMI_WRITE_MEMORY 3 +/* + * Semantics: Host writes ATH6KL memory + * Request format: + * u32 command (BMI_WRITE_MEMORY) + * u32 address + * u32 length, at most BMI_DATASZ_MAX + * u8 data[length] + * Response format: none + */ + +#define BMI_EXECUTE 4 +/* + * Semantics: Causes ATH6KL to execute code + * Request format: + * u32 command (BMI_EXECUTE) + * u32 address + * u32 parameter + * Response format: + * u32 return value + */ + +#define BMI_SET_APP_START 5 +/* + * Semantics: Set Target application starting address + * Request format: + * u32 command (BMI_SET_APP_START) + * u32 address + * Response format: none + */ + +#define BMI_READ_SOC_REGISTER 6 +/* + * Semantics: Read a 32-bit Target SOC register. + * Request format: + * u32 command (BMI_READ_REGISTER) + * u32 address + * Response format: + * u32 value + */ + +#define BMI_WRITE_SOC_REGISTER 7 +/* + * Semantics: Write a 32-bit Target SOC register. + * Request format: + * u32 command (BMI_WRITE_REGISTER) + * u32 address + * u32 value + * + * Response format: none + */ + +#define BMI_GET_TARGET_ID 8 +#define BMI_GET_TARGET_INFO 8 +/* + * Semantics: Fetch the 4-byte Target information + * Request format: + * u32 command (BMI_GET_TARGET_ID/INFO) + * Response format1 (old firmware): + * u32 TargetVersionID + * Response format2 (newer firmware): + * u32 TARGET_VERSION_SENTINAL + * struct bmi_target_info; + */ + +#define TARGET_VERSION_SENTINAL 0xffffffff +#define TARGET_TYPE_AR6003 3 +#define TARGET_TYPE_AR6004 5 +#define BMI_ROMPATCH_INSTALL 9 +/* + * Semantics: Install a ROM Patch. + * Request format: + * u32 command (BMI_ROMPATCH_INSTALL) + * u32 Target ROM Address + * u32 Target RAM Address or Value (depending on Target Type) + * u32 Size, in bytes + * u32 Activate? 1-->activate; + * 0-->install but do not activate + * Response format: + * u32 PatchID + */ + +#define BMI_ROMPATCH_UNINSTALL 10 +/* + * Semantics: Uninstall a previously-installed ROM Patch, + * automatically deactivating, if necessary. + * Request format: + * u32 command (BMI_ROMPATCH_UNINSTALL) + * u32 PatchID + * + * Response format: none + */ + +#define BMI_ROMPATCH_ACTIVATE 11 +/* + * Semantics: Activate a list of previously-installed ROM Patches. + * Request format: + * u32 command (BMI_ROMPATCH_ACTIVATE) + * u32 rompatch_count + * u32 PatchID[rompatch_count] + * + * Response format: none + */ + +#define BMI_ROMPATCH_DEACTIVATE 12 +/* + * Semantics: Deactivate a list of active ROM Patches. + * Request format: + * u32 command (BMI_ROMPATCH_DEACTIVATE) + * u32 rompatch_count + * u32 PatchID[rompatch_count] + * + * Response format: none + */ + + +#define BMI_LZ_STREAM_START 13 +/* + * Semantics: Begin an LZ-compressed stream of input + * which is to be uncompressed by the Target to an + * output buffer at address. The output buffer must + * be sufficiently large to hold the uncompressed + * output from the compressed input stream. This BMI + * command should be followed by a series of 1 or more + * BMI_LZ_DATA commands. + * u32 command (BMI_LZ_STREAM_START) + * u32 address + * Note: Not supported on all versions of ROM firmware. + */ + +#define BMI_LZ_DATA 14 +/* + * Semantics: Host writes ATH6KL memory with LZ-compressed + * data which is uncompressed by the Target. This command + * must be preceded by a BMI_LZ_STREAM_START command. A series + * of BMI_LZ_DATA commands are considered part of a single + * input stream until another BMI_LZ_STREAM_START is issued. + * Request format: + * u32 command (BMI_LZ_DATA) + * u32 length (of compressed data), + * at most BMI_DATASZ_MAX + * u8 CompressedData[length] + * Response format: none + * Note: Not supported on all versions of ROM firmware. + */ + +#define BMI_COMMUNICATION_TIMEOUT 1000 /* in msec */ + +struct ath6kl; +struct ath6kl_bmi_target_info { + __le32 byte_count; /* size of this structure */ + __le32 version; /* target version id */ + __le32 type; /* target type */ +} __packed; + +#define ath6kl_bmi_write_hi32(ar, item, val) \ + ({ \ + u32 addr; \ + __le32 v; \ + \ + addr = ath6kl_get_hi_item_addr(ar, HI_ITEM(item)); \ + v = cpu_to_le32(val); \ + ath6kl_bmi_write(ar, addr, (u8 *) &v, sizeof(v)); \ + }) + +#define ath6kl_bmi_read_hi32(ar, item, val) \ + ({ \ + u32 addr, *check_type = val; \ + __le32 tmp; \ + int ret; \ + \ + (void) (check_type == val); \ + addr = ath6kl_get_hi_item_addr(ar, HI_ITEM(item)); \ + ret = ath6kl_bmi_read(ar, addr, (u8 *) &tmp, 4); \ + if (!ret) \ + *val = le32_to_cpu(tmp); \ + ret; \ + }) + +int ath6kl_bmi_init(struct ath6kl *ar); +void ath6kl_bmi_cleanup(struct ath6kl *ar); +void ath6kl_bmi_reset(struct ath6kl *ar); + +int ath6kl_bmi_done(struct ath6kl *ar); +int ath6kl_bmi_get_target_info(struct ath6kl *ar, + struct ath6kl_bmi_target_info *targ_info); +int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len); +int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len); +int ath6kl_bmi_execute(struct ath6kl *ar, + u32 addr, u32 *param); +int ath6kl_bmi_set_app_start(struct ath6kl *ar, + u32 addr); +int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param); +int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param); +int ath6kl_bmi_lz_data(struct ath6kl *ar, + u8 *buf, u32 len); +int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, + u32 addr); +int ath6kl_bmi_fast_download(struct ath6kl *ar, + u32 addr, u8 *buf, u32 len); +#endif diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c new file mode 100644 index 000000000..a20e0aeae --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -0,0 +1,4040 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/moduleparam.h> +#include <linux/inetdevice.h> +#include <linux/export.h> +#include <linux/sched/signal.h> + +#include "core.h" +#include "cfg80211.h" +#include "debug.h" +#include "hif-ops.h" +#include "testmode.h" + +#define RATETAB_ENT(_rate, _rateid, _flags) { \ + .bitrate = (_rate), \ + .flags = (_flags), \ + .hw_value = (_rateid), \ +} + +#define CHAN2G(_channel, _freq, _flags) { \ + .band = NL80211_BAND_2GHZ, \ + .hw_value = (_channel), \ + .center_freq = (_freq), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define CHAN5G(_channel, _flags) { \ + .band = NL80211_BAND_5GHZ, \ + .hw_value = (_channel), \ + .center_freq = 5000 + (5 * (_channel)), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define DEFAULT_BG_SCAN_PERIOD 60 + +struct ath6kl_cfg80211_match_probe_ssid { + struct cfg80211_ssid ssid; + u8 flag; +}; + +static struct ieee80211_rate ath6kl_rates[] = { + RATETAB_ENT(10, 0x1, 0), + RATETAB_ENT(20, 0x2, 0), + RATETAB_ENT(55, 0x4, 0), + RATETAB_ENT(110, 0x8, 0), + RATETAB_ENT(60, 0x10, 0), + RATETAB_ENT(90, 0x20, 0), + RATETAB_ENT(120, 0x40, 0), + RATETAB_ENT(180, 0x80, 0), + RATETAB_ENT(240, 0x100, 0), + RATETAB_ENT(360, 0x200, 0), + RATETAB_ENT(480, 0x400, 0), + RATETAB_ENT(540, 0x800, 0), +}; + +#define ath6kl_a_rates (ath6kl_rates + 4) +#define ath6kl_a_rates_size 8 +#define ath6kl_g_rates (ath6kl_rates + 0) +#define ath6kl_g_rates_size 12 + +#define ath6kl_g_htcap IEEE80211_HT_CAP_SGI_20 +#define ath6kl_a_htcap (IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \ + IEEE80211_HT_CAP_SGI_20 | \ + IEEE80211_HT_CAP_SGI_40) + +static struct ieee80211_channel ath6kl_2ghz_channels[] = { + CHAN2G(1, 2412, 0), + CHAN2G(2, 2417, 0), + CHAN2G(3, 2422, 0), + CHAN2G(4, 2427, 0), + CHAN2G(5, 2432, 0), + CHAN2G(6, 2437, 0), + CHAN2G(7, 2442, 0), + CHAN2G(8, 2447, 0), + CHAN2G(9, 2452, 0), + CHAN2G(10, 2457, 0), + CHAN2G(11, 2462, 0), + CHAN2G(12, 2467, 0), + CHAN2G(13, 2472, 0), + CHAN2G(14, 2484, 0), +}; + +static struct ieee80211_channel ath6kl_5ghz_a_channels[] = { + CHAN5G(36, 0), CHAN5G(40, 0), + CHAN5G(44, 0), CHAN5G(48, 0), + CHAN5G(52, 0), CHAN5G(56, 0), + CHAN5G(60, 0), CHAN5G(64, 0), + CHAN5G(100, 0), CHAN5G(104, 0), + CHAN5G(108, 0), CHAN5G(112, 0), + CHAN5G(116, 0), CHAN5G(120, 0), + CHAN5G(124, 0), CHAN5G(128, 0), + CHAN5G(132, 0), CHAN5G(136, 0), + CHAN5G(140, 0), CHAN5G(149, 0), + CHAN5G(153, 0), CHAN5G(157, 0), + CHAN5G(161, 0), CHAN5G(165, 0), + CHAN5G(184, 0), CHAN5G(188, 0), + CHAN5G(192, 0), CHAN5G(196, 0), + CHAN5G(200, 0), CHAN5G(204, 0), + CHAN5G(208, 0), CHAN5G(212, 0), + CHAN5G(216, 0), +}; + +static struct ieee80211_supported_band ath6kl_band_2ghz = { + .n_channels = ARRAY_SIZE(ath6kl_2ghz_channels), + .channels = ath6kl_2ghz_channels, + .n_bitrates = ath6kl_g_rates_size, + .bitrates = ath6kl_g_rates, + .ht_cap.cap = ath6kl_g_htcap, + .ht_cap.ht_supported = true, +}; + +static struct ieee80211_supported_band ath6kl_band_5ghz = { + .n_channels = ARRAY_SIZE(ath6kl_5ghz_a_channels), + .channels = ath6kl_5ghz_a_channels, + .n_bitrates = ath6kl_a_rates_size, + .bitrates = ath6kl_a_rates, + .ht_cap.cap = ath6kl_a_htcap, + .ht_cap.ht_supported = true, +}; + +#define CCKM_KRK_CIPHER_SUITE 0x004096ff /* use for KRK */ + +/* returns true if scheduled scan was stopped */ +static bool __ath6kl_cfg80211_sscan_stop(struct ath6kl_vif *vif) +{ + struct ath6kl *ar = vif->ar; + + if (!test_and_clear_bit(SCHED_SCANNING, &vif->flags)) + return false; + + del_timer_sync(&vif->sched_scan_timer); + + if (ar->state == ATH6KL_STATE_RECOVERY) + return true; + + ath6kl_wmi_enable_sched_scan_cmd(ar->wmi, vif->fw_vif_idx, false); + + return true; +} + +static void ath6kl_cfg80211_sscan_disable(struct ath6kl_vif *vif) +{ + struct ath6kl *ar = vif->ar; + bool stopped; + + stopped = __ath6kl_cfg80211_sscan_stop(vif); + + if (!stopped) + return; + + cfg80211_sched_scan_stopped(ar->wiphy, 0); +} + +static int ath6kl_set_wpa_version(struct ath6kl_vif *vif, + enum nl80211_wpa_versions wpa_version) +{ + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version); + + if (!wpa_version) { + vif->auth_mode = NONE_AUTH; + } else if (wpa_version & NL80211_WPA_VERSION_2) { + vif->auth_mode = WPA2_AUTH; + } else if (wpa_version & NL80211_WPA_VERSION_1) { + vif->auth_mode = WPA_AUTH; + } else { + ath6kl_err("%s: %u not supported\n", __func__, wpa_version); + return -ENOTSUPP; + } + + return 0; +} + +static int ath6kl_set_auth_type(struct ath6kl_vif *vif, + enum nl80211_auth_type auth_type) +{ + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type); + + switch (auth_type) { + case NL80211_AUTHTYPE_OPEN_SYSTEM: + vif->dot11_auth_mode = OPEN_AUTH; + break; + case NL80211_AUTHTYPE_SHARED_KEY: + vif->dot11_auth_mode = SHARED_AUTH; + break; + case NL80211_AUTHTYPE_NETWORK_EAP: + vif->dot11_auth_mode = LEAP_AUTH; + break; + + case NL80211_AUTHTYPE_AUTOMATIC: + vif->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH; + break; + + default: + ath6kl_err("%s: 0x%x not supported\n", __func__, auth_type); + return -ENOTSUPP; + } + + return 0; +} + +static int ath6kl_set_cipher(struct ath6kl_vif *vif, u32 cipher, bool ucast) +{ + u8 *ar_cipher = ucast ? &vif->prwise_crypto : &vif->grp_crypto; + u8 *ar_cipher_len = ucast ? &vif->prwise_crypto_len : + &vif->grp_crypto_len; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n", + __func__, cipher, ucast); + + switch (cipher) { + case 0: + /* our own hack to use value 0 as no crypto used */ + *ar_cipher = NONE_CRYPT; + *ar_cipher_len = 0; + break; + case WLAN_CIPHER_SUITE_WEP40: + *ar_cipher = WEP_CRYPT; + *ar_cipher_len = 5; + break; + case WLAN_CIPHER_SUITE_WEP104: + *ar_cipher = WEP_CRYPT; + *ar_cipher_len = 13; + break; + case WLAN_CIPHER_SUITE_TKIP: + *ar_cipher = TKIP_CRYPT; + *ar_cipher_len = 0; + break; + case WLAN_CIPHER_SUITE_CCMP: + *ar_cipher = AES_CRYPT; + *ar_cipher_len = 0; + break; + case WLAN_CIPHER_SUITE_SMS4: + *ar_cipher = WAPI_CRYPT; + *ar_cipher_len = 0; + break; + default: + ath6kl_err("cipher 0x%x not supported\n", cipher); + return -ENOTSUPP; + } + + return 0; +} + +static void ath6kl_set_key_mgmt(struct ath6kl_vif *vif, u32 key_mgmt) +{ + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt); + + if (key_mgmt == WLAN_AKM_SUITE_PSK) { + if (vif->auth_mode == WPA_AUTH) + vif->auth_mode = WPA_PSK_AUTH; + else if (vif->auth_mode == WPA2_AUTH) + vif->auth_mode = WPA2_PSK_AUTH; + } else if (key_mgmt == 0x00409600) { + if (vif->auth_mode == WPA_AUTH) + vif->auth_mode = WPA_AUTH_CCKM; + else if (vif->auth_mode == WPA2_AUTH) + vif->auth_mode = WPA2_AUTH_CCKM; + } else if (key_mgmt != WLAN_AKM_SUITE_8021X) { + vif->auth_mode = NONE_AUTH; + } +} + +static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif) +{ + struct ath6kl *ar = vif->ar; + + if (!test_bit(WMI_READY, &ar->flag)) { + ath6kl_err("wmi is not ready\n"); + return false; + } + + if (!test_bit(WLAN_ENABLED, &vif->flags)) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "wlan disabled\n"); + return false; + } + + return true; +} + +static bool ath6kl_is_wpa_ie(const u8 *pos) +{ + return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 && + pos[2] == 0x00 && pos[3] == 0x50 && + pos[4] == 0xf2 && pos[5] == 0x01; +} + +static bool ath6kl_is_rsn_ie(const u8 *pos) +{ + return pos[0] == WLAN_EID_RSN; +} + +static bool ath6kl_is_wps_ie(const u8 *pos) +{ + return (pos[0] == WLAN_EID_VENDOR_SPECIFIC && + pos[1] >= 4 && + pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2 && + pos[5] == 0x04); +} + +static int ath6kl_set_assoc_req_ies(struct ath6kl_vif *vif, const u8 *ies, + size_t ies_len) +{ + struct ath6kl *ar = vif->ar; + const u8 *pos; + u8 *buf = NULL; + size_t len = 0; + int ret; + + /* + * Clear previously set flag + */ + + ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG; + + /* + * Filter out RSN/WPA IE(s) + */ + + if (ies && ies_len) { + buf = kmalloc(ies_len, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + pos = ies; + + while (pos + 1 < ies + ies_len) { + if (pos + 2 + pos[1] > ies + ies_len) + break; + if (!(ath6kl_is_wpa_ie(pos) || ath6kl_is_rsn_ie(pos))) { + memcpy(buf + len, pos, 2 + pos[1]); + len += 2 + pos[1]; + } + + if (ath6kl_is_wps_ie(pos)) + ar->connect_ctrl_flags |= CONNECT_WPS_FLAG; + + pos += 2 + pos[1]; + } + } + + ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, + WMI_FRAME_ASSOC_REQ, buf, len); + kfree(buf); + return ret; +} + +static int ath6kl_nliftype_to_drv_iftype(enum nl80211_iftype type, u8 *nw_type) +{ + switch (type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + *nw_type = INFRA_NETWORK; + break; + case NL80211_IFTYPE_ADHOC: + *nw_type = ADHOC_NETWORK; + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + *nw_type = AP_NETWORK; + break; + default: + ath6kl_err("invalid interface type %u\n", type); + return -ENOTSUPP; + } + + return 0; +} + +static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type, + u8 *if_idx, u8 *nw_type) +{ + int i; + + if (ath6kl_nliftype_to_drv_iftype(type, nw_type)) + return false; + + if (ar->ibss_if_active || ((type == NL80211_IFTYPE_ADHOC) && + ar->num_vif)) + return false; + + if (type == NL80211_IFTYPE_STATION || + type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) { + for (i = 0; i < ar->vif_max; i++) { + if ((ar->avail_idx_map) & BIT(i)) { + *if_idx = i; + return true; + } + } + } + + if (type == NL80211_IFTYPE_P2P_CLIENT || + type == NL80211_IFTYPE_P2P_GO) { + for (i = ar->max_norm_iface; i < ar->vif_max; i++) { + if ((ar->avail_idx_map) & BIT(i)) { + *if_idx = i; + return true; + } + } + } + + return false; +} + +static bool ath6kl_is_tx_pending(struct ath6kl *ar) +{ + return ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0; +} + +static void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, + bool enable) +{ + int err; + + if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag))) + return; + + if (vif->nw_type != INFRA_NETWORK) + return; + + if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE, + vif->ar->fw_capabilities)) + return; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n", + enable ? "enable" : "disable"); + + err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi, + vif->fw_vif_idx, enable); + if (err) + ath6kl_err("failed to %s enhanced bmiss detection: %d\n", + enable ? "enable" : "disable", err); +} + +static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); + int status; + u8 nw_subtype = (ar->p2p) ? SUBTYPE_P2PDEV : SUBTYPE_NONE; + u16 interval; + + ath6kl_cfg80211_sscan_disable(vif); + + vif->sme_state = SME_CONNECTING; + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { + ath6kl_err("destroy in progress\n"); + return -EBUSY; + } + + if (test_bit(SKIP_SCAN, &ar->flag) && + ((sme->channel && sme->channel->center_freq == 0) || + (sme->bssid && is_zero_ether_addr(sme->bssid)))) { + ath6kl_err("SkipScan: channel or bssid invalid\n"); + return -EINVAL; + } + + if (down_interruptible(&ar->sem)) { + ath6kl_err("busy, couldn't get access\n"); + return -ERESTARTSYS; + } + + if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { + ath6kl_err("busy, destroy in progress\n"); + up(&ar->sem); + return -EBUSY; + } + + if (ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)]) { + /* + * sleep until the command queue drains + */ + wait_event_interruptible_timeout(ar->event_wq, + ath6kl_is_tx_pending(ar), + WMI_TIMEOUT); + if (signal_pending(current)) { + ath6kl_err("cmd queue drain timeout\n"); + up(&ar->sem); + return -EINTR; + } + } + + status = ath6kl_set_assoc_req_ies(vif, sme->ie, sme->ie_len); + if (status) { + up(&ar->sem); + return status; + } + + if (sme->ie == NULL || sme->ie_len == 0) + ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG; + + if (test_bit(CONNECTED, &vif->flags) && + vif->ssid_len == sme->ssid_len && + !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) { + vif->reconnect_flag = true; + status = ath6kl_wmi_reconnect_cmd(ar->wmi, vif->fw_vif_idx, + vif->req_bssid, + vif->ch_hint); + + up(&ar->sem); + if (status) { + ath6kl_err("wmi_reconnect_cmd failed\n"); + return -EIO; + } + return 0; + } else if (vif->ssid_len == sme->ssid_len && + !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) { + ath6kl_disconnect(vif); + } + + memset(vif->ssid, 0, sizeof(vif->ssid)); + vif->ssid_len = sme->ssid_len; + memcpy(vif->ssid, sme->ssid, sme->ssid_len); + + if (sme->channel) + vif->ch_hint = sme->channel->center_freq; + + memset(vif->req_bssid, 0, sizeof(vif->req_bssid)); + if (sme->bssid && !is_broadcast_ether_addr(sme->bssid)) + memcpy(vif->req_bssid, sme->bssid, sizeof(vif->req_bssid)); + + ath6kl_set_wpa_version(vif, sme->crypto.wpa_versions); + + status = ath6kl_set_auth_type(vif, sme->auth_type); + if (status) { + up(&ar->sem); + return status; + } + + if (sme->crypto.n_ciphers_pairwise) + ath6kl_set_cipher(vif, sme->crypto.ciphers_pairwise[0], true); + else + ath6kl_set_cipher(vif, 0, true); + + ath6kl_set_cipher(vif, sme->crypto.cipher_group, false); + + if (sme->crypto.n_akm_suites) + ath6kl_set_key_mgmt(vif, sme->crypto.akm_suites[0]); + + if ((sme->key_len) && + (vif->auth_mode == NONE_AUTH) && + (vif->prwise_crypto == WEP_CRYPT)) { + struct ath6kl_key *key = NULL; + + if (sme->key_idx > WMI_MAX_KEY_INDEX) { + ath6kl_err("key index %d out of bounds\n", + sme->key_idx); + up(&ar->sem); + return -ENOENT; + } + + key = &vif->keys[sme->key_idx]; + key->key_len = sme->key_len; + memcpy(key->key, sme->key, key->key_len); + key->cipher = vif->prwise_crypto; + vif->def_txkey_index = sme->key_idx; + + ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, sme->key_idx, + vif->prwise_crypto, + GROUP_USAGE | TX_USAGE, + key->key_len, + NULL, 0, + key->key, KEY_OP_INIT_VAL, NULL, + NO_SYNC_WMIFLAG); + } + + if (!ar->usr_bss_filter) { + clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); + if (ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, + ALL_BSS_FILTER, 0) != 0) { + ath6kl_err("couldn't set bss filtering\n"); + up(&ar->sem); + return -EIO; + } + } + + vif->nw_type = vif->next_mode; + + /* enable enhanced bmiss detection if applicable */ + ath6kl_cfg80211_sta_bmiss_enhance(vif, true); + + if (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) + nw_subtype = SUBTYPE_P2PCLIENT; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: connect called with authmode %d dot11 auth %d" + " PW crypto %d PW crypto len %d GRP crypto %d" + " GRP crypto len %d channel hint %u\n", + __func__, + vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto, + vif->prwise_crypto_len, vif->grp_crypto, + vif->grp_crypto_len, vif->ch_hint); + + vif->reconnect_flag = 0; + + if (vif->nw_type == INFRA_NETWORK) { + interval = max_t(u16, vif->listen_intvl_t, + ATH6KL_MAX_WOW_LISTEN_INTL); + status = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, + interval, + 0); + if (status) { + ath6kl_err("couldn't set listen intervel\n"); + up(&ar->sem); + return status; + } + } + + status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type, + vif->dot11_auth_mode, vif->auth_mode, + vif->prwise_crypto, + vif->prwise_crypto_len, + vif->grp_crypto, vif->grp_crypto_len, + vif->ssid_len, vif->ssid, + vif->req_bssid, vif->ch_hint, + ar->connect_ctrl_flags, nw_subtype); + + if (sme->bg_scan_period == 0) { + /* disable background scan if period is 0 */ + sme->bg_scan_period = 0xffff; + } else if (sme->bg_scan_period == -1) { + /* configure default value if not specified */ + sme->bg_scan_period = DEFAULT_BG_SCAN_PERIOD; + } + + ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0, 0, + sme->bg_scan_period, 0, 0, 0, 3, 0, 0, 0); + + up(&ar->sem); + + if (status == -EINVAL) { + memset(vif->ssid, 0, sizeof(vif->ssid)); + vif->ssid_len = 0; + ath6kl_err("invalid request\n"); + return -ENOENT; + } else if (status) { + ath6kl_err("ath6kl_wmi_connect_cmd failed\n"); + return -EIO; + } + + if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) && + ((vif->auth_mode == WPA_PSK_AUTH) || + (vif->auth_mode == WPA2_PSK_AUTH))) { + mod_timer(&vif->disconnect_timer, + jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL)); + } + + ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD; + set_bit(CONNECT_PEND, &vif->flags); + + return 0; +} + +static struct cfg80211_bss * +ath6kl_add_bss_if_needed(struct ath6kl_vif *vif, + enum network_type nw_type, + const u8 *bssid, + struct ieee80211_channel *chan, + const u8 *beacon_ie, + size_t beacon_ie_len) +{ + struct ath6kl *ar = vif->ar; + struct cfg80211_bss *bss; + u16 cap_val; + enum ieee80211_bss_type bss_type; + u8 *ie; + + if (nw_type & ADHOC_NETWORK) { + cap_val = WLAN_CAPABILITY_IBSS; + bss_type = IEEE80211_BSS_TYPE_IBSS; + } else { + cap_val = WLAN_CAPABILITY_ESS; + bss_type = IEEE80211_BSS_TYPE_ESS; + } + + bss = cfg80211_get_bss(ar->wiphy, chan, bssid, + vif->ssid, vif->ssid_len, + bss_type, IEEE80211_PRIVACY_ANY); + if (bss == NULL) { + /* + * Since cfg80211 may not yet know about the BSS, + * generate a partial entry until the first BSS info + * event becomes available. + * + * Prepend SSID element since it is not included in the Beacon + * IEs from the target. + */ + ie = kmalloc(2 + vif->ssid_len + beacon_ie_len, GFP_KERNEL); + if (ie == NULL) + return NULL; + ie[0] = WLAN_EID_SSID; + ie[1] = vif->ssid_len; + memcpy(ie + 2, vif->ssid, vif->ssid_len); + memcpy(ie + 2 + vif->ssid_len, beacon_ie, beacon_ie_len); + bss = cfg80211_inform_bss(ar->wiphy, chan, + CFG80211_BSS_FTYPE_UNKNOWN, + bssid, 0, cap_val, 100, + ie, 2 + vif->ssid_len + beacon_ie_len, + 0, GFP_KERNEL); + if (bss) + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "added bss %pM to cfg80211\n", bssid); + kfree(ie); + } else { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n"); + } + + return bss; +} + +void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel, + u8 *bssid, u16 listen_intvl, + u16 beacon_intvl, + enum network_type nw_type, + u8 beacon_ie_len, u8 assoc_req_len, + u8 assoc_resp_len, u8 *assoc_info) +{ + struct ieee80211_channel *chan; + struct ath6kl *ar = vif->ar; + struct cfg80211_bss *bss; + + /* capinfo + listen interval */ + u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16); + + /* capinfo + status code + associd */ + u8 assoc_resp_ie_offset = sizeof(u16) + sizeof(u16) + sizeof(u16); + + u8 *assoc_req_ie = assoc_info + beacon_ie_len + assoc_req_ie_offset; + u8 *assoc_resp_ie = assoc_info + beacon_ie_len + assoc_req_len + + assoc_resp_ie_offset; + + assoc_req_len -= assoc_req_ie_offset; + assoc_resp_len -= assoc_resp_ie_offset; + + /* + * Store Beacon interval here; DTIM period will be available only once + * a Beacon frame from the AP is seen. + */ + vif->assoc_bss_beacon_int = beacon_intvl; + clear_bit(DTIM_PERIOD_AVAIL, &vif->flags); + + if (nw_type & ADHOC_NETWORK) { + if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: ath6k not in ibss mode\n", __func__); + return; + } + } + + if (nw_type & INFRA_NETWORK) { + if (vif->wdev.iftype != NL80211_IFTYPE_STATION && + vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: ath6k not in station mode\n", __func__); + return; + } + } + + chan = ieee80211_get_channel(ar->wiphy, (int) channel); + + bss = ath6kl_add_bss_if_needed(vif, nw_type, bssid, chan, + assoc_info, beacon_ie_len); + if (!bss) { + ath6kl_err("could not add cfg80211 bss entry\n"); + return; + } + + if (nw_type & ADHOC_NETWORK) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n", + nw_type & ADHOC_CREATOR ? "creator" : "joiner"); + cfg80211_ibss_joined(vif->ndev, bssid, chan, GFP_KERNEL); + cfg80211_put_bss(ar->wiphy, bss); + return; + } + + if (vif->sme_state == SME_CONNECTING) { + /* inform connect result to cfg80211 */ + vif->sme_state = SME_CONNECTED; + cfg80211_connect_result(vif->ndev, bssid, + assoc_req_ie, assoc_req_len, + assoc_resp_ie, assoc_resp_len, + WLAN_STATUS_SUCCESS, GFP_KERNEL); + cfg80211_put_bss(ar->wiphy, bss); + } else if (vif->sme_state == SME_CONNECTED) { + struct cfg80211_roam_info roam_info = { + .links[0].bss = bss, + .req_ie = assoc_req_ie, + .req_ie_len = assoc_req_len, + .resp_ie = assoc_resp_ie, + .resp_ie_len = assoc_resp_len, + }; + /* inform roam event to cfg80211 */ + cfg80211_roamed(vif->ndev, &roam_info, GFP_KERNEL); + } +} + +static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy, + struct net_device *dev, u16 reason_code) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__, + reason_code); + + ath6kl_cfg80211_sscan_disable(vif); + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { + ath6kl_err("busy, destroy in progress\n"); + return -EBUSY; + } + + if (down_interruptible(&ar->sem)) { + ath6kl_err("busy, couldn't get access\n"); + return -ERESTARTSYS; + } + + vif->reconnect_flag = 0; + ath6kl_disconnect(vif); + memset(vif->ssid, 0, sizeof(vif->ssid)); + vif->ssid_len = 0; + + if (!test_bit(SKIP_SCAN, &ar->flag)) + memset(vif->req_bssid, 0, sizeof(vif->req_bssid)); + + up(&ar->sem); + + return 0; +} + +void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason, + u8 *bssid, u8 assoc_resp_len, + u8 *assoc_info, u16 proto_reason) +{ + struct ath6kl *ar = vif->ar; + + if (vif->scan_req) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + cfg80211_scan_done(vif->scan_req, &info); + vif->scan_req = NULL; + } + + if (vif->nw_type & ADHOC_NETWORK) { + if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: ath6k not in ibss mode\n", __func__); + return; + } + + if (vif->nw_type & INFRA_NETWORK) { + if (vif->wdev.iftype != NL80211_IFTYPE_STATION && + vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: ath6k not in station mode\n", __func__); + return; + } + } + + clear_bit(CONNECT_PEND, &vif->flags); + + if (vif->sme_state == SME_CONNECTING) { + cfg80211_connect_result(vif->ndev, + bssid, NULL, 0, + NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + GFP_KERNEL); + } else if (vif->sme_state == SME_CONNECTED) { + cfg80211_disconnected(vif->ndev, proto_reason, + NULL, 0, false, GFP_KERNEL); + } + + vif->sme_state = SME_DISCONNECTED; + + /* + * Send a disconnect command to target when a disconnect event is + * received with reason code other than 3 (DISCONNECT_CMD - disconnect + * request from host) to make the firmware stop trying to connect even + * after giving disconnect event. There will be one more disconnect + * event for this disconnect command with reason code DISCONNECT_CMD + * which won't be notified to cfg80211. + */ + if (reason != DISCONNECT_CMD) + ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx); +} + +static int ath6kl_set_probed_ssids(struct ath6kl *ar, + struct ath6kl_vif *vif, + struct cfg80211_ssid *ssids, int n_ssids, + struct cfg80211_match_set *match_set, + int n_match_ssid) +{ + u8 i, j, index_to_add, ssid_found = false; + struct ath6kl_cfg80211_match_probe_ssid ssid_list[MAX_PROBED_SSIDS]; + + memset(ssid_list, 0, sizeof(ssid_list)); + + if (n_ssids > MAX_PROBED_SSIDS || + n_match_ssid > MAX_PROBED_SSIDS) + return -EINVAL; + + for (i = 0; i < n_ssids; i++) { + memcpy(ssid_list[i].ssid.ssid, + ssids[i].ssid, + ssids[i].ssid_len); + ssid_list[i].ssid.ssid_len = ssids[i].ssid_len; + + if (ssids[i].ssid_len) + ssid_list[i].flag = SPECIFIC_SSID_FLAG; + else + ssid_list[i].flag = ANY_SSID_FLAG; + + if (ar->wiphy->max_match_sets != 0 && n_match_ssid == 0) + ssid_list[i].flag |= MATCH_SSID_FLAG; + } + + index_to_add = i; + + for (i = 0; i < n_match_ssid; i++) { + ssid_found = false; + + for (j = 0; j < n_ssids; j++) { + if ((match_set[i].ssid.ssid_len == + ssid_list[j].ssid.ssid_len) && + (!memcmp(ssid_list[j].ssid.ssid, + match_set[i].ssid.ssid, + match_set[i].ssid.ssid_len))) { + ssid_list[j].flag |= MATCH_SSID_FLAG; + ssid_found = true; + break; + } + } + + if (ssid_found) + continue; + + if (index_to_add >= MAX_PROBED_SSIDS) + continue; + + ssid_list[index_to_add].ssid.ssid_len = + match_set[i].ssid.ssid_len; + memcpy(ssid_list[index_to_add].ssid.ssid, + match_set[i].ssid.ssid, + match_set[i].ssid.ssid_len); + ssid_list[index_to_add].flag |= MATCH_SSID_FLAG; + index_to_add++; + } + + for (i = 0; i < index_to_add; i++) { + ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i, + ssid_list[i].flag, + ssid_list[i].ssid.ssid_len, + ssid_list[i].ssid.ssid); + } + + /* Make sure no old entries are left behind */ + for (i = index_to_add; i < MAX_PROBED_SSIDS; i++) { + ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i, + DISABLE_SSID_FLAG, 0, NULL); + } + + return 0; +} + +static int ath6kl_cfg80211_scan(struct wiphy *wiphy, + struct cfg80211_scan_request *request) +{ + struct ath6kl_vif *vif = ath6kl_vif_from_wdev(request->wdev); + struct ath6kl *ar = ath6kl_priv(vif->ndev); + s8 n_channels = 0; + u16 *channels = NULL; + int ret = 0; + u32 force_fg_scan = 0; + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + ath6kl_cfg80211_sscan_disable(vif); + + if (!ar->usr_bss_filter) { + clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); + ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, + ALL_BSS_FILTER, 0); + if (ret) { + ath6kl_err("couldn't set bss filtering\n"); + return ret; + } + } + + ret = ath6kl_set_probed_ssids(ar, vif, request->ssids, + request->n_ssids, NULL, 0); + if (ret < 0) + return ret; + + /* this also clears IE in fw if it's not set */ + ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, + WMI_FRAME_PROBE_REQ, + request->ie, request->ie_len); + if (ret) { + ath6kl_err("failed to set Probe Request appie for scan\n"); + return ret; + } + + /* + * Scan only the requested channels if the request specifies a set of + * channels. If the list is longer than the target supports, do not + * configure the list and instead, scan all available channels. + */ + if (request->n_channels > 0 && + request->n_channels <= WMI_MAX_CHANNELS) { + u8 i; + + n_channels = request->n_channels; + + channels = kcalloc(n_channels, sizeof(u16), GFP_KERNEL); + if (channels == NULL) { + ath6kl_warn("failed to set scan channels, scan all channels"); + n_channels = 0; + } + + for (i = 0; i < n_channels; i++) + channels[i] = request->channels[i]->center_freq; + } + + if (test_bit(CONNECTED, &vif->flags)) + force_fg_scan = 1; + + vif->scan_req = request; + + ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx, + WMI_LONG_SCAN, force_fg_scan, + false, 0, + ATH6KL_FG_SCAN_INTERVAL, + n_channels, channels, + request->no_cck, + request->rates); + if (ret) { + ath6kl_err("failed to start scan: %d\n", ret); + vif->scan_req = NULL; + } + + kfree(channels); + + return ret; +} + +void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted) +{ + struct ath6kl *ar = vif->ar; + struct cfg80211_scan_info info = { + .aborted = aborted, + }; + int i; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status%s\n", __func__, + aborted ? " aborted" : ""); + + if (!vif->scan_req) + return; + + if (aborted) + goto out; + + if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) { + for (i = 0; i < vif->scan_req->n_ssids; i++) { + ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, + i, DISABLE_SSID_FLAG, + 0, NULL); + } + } + +out: + cfg80211_scan_done(vif->scan_req, &info); + vif->scan_req = NULL; +} + +void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq, + enum wmi_phy_mode mode) +{ + struct cfg80211_chan_def chandef; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "channel switch notify nw_type %d freq %d mode %d\n", + vif->nw_type, freq, mode); + + cfg80211_chandef_create(&chandef, + ieee80211_get_channel(vif->ar->wiphy, freq), + (mode == WMI_11G_HT20 && + ath6kl_band_2ghz.ht_cap.ht_supported) ? + NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT); + + mutex_lock(&vif->wdev.mtx); + cfg80211_ch_switch_notify(vif->ndev, &chandef, 0); + mutex_unlock(&vif->wdev.mtx); +} + +static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, + int link_id, u8 key_index, bool pairwise, + const u8 *mac_addr, + struct key_params *params) +{ + struct ath6kl *ar = ath6kl_priv(ndev); + struct ath6kl_vif *vif = netdev_priv(ndev); + struct ath6kl_key *key = NULL; + int seq_len; + u8 key_usage; + u8 key_type; + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + if (params->cipher == CCKM_KRK_CIPHER_SUITE) { + if (params->key_len != WMI_KRK_LEN) + return -EINVAL; + return ath6kl_wmi_add_krk_cmd(ar->wmi, vif->fw_vif_idx, + params->key); + } + + if (key_index > WMI_MAX_KEY_INDEX) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: key index %d out of bounds\n", __func__, + key_index); + return -ENOENT; + } + + key = &vif->keys[key_index]; + memset(key, 0, sizeof(struct ath6kl_key)); + + if (pairwise) + key_usage = PAIRWISE_USAGE; + else + key_usage = GROUP_USAGE; + + seq_len = params->seq_len; + if (params->cipher == WLAN_CIPHER_SUITE_SMS4 && + seq_len > ATH6KL_KEY_SEQ_LEN) { + /* Only first half of the WPI PN is configured */ + seq_len = ATH6KL_KEY_SEQ_LEN; + } + if (params->key_len > WLAN_MAX_KEY_LEN || + seq_len > sizeof(key->seq)) + return -EINVAL; + + key->key_len = params->key_len; + memcpy(key->key, params->key, key->key_len); + key->seq_len = seq_len; + memcpy(key->seq, params->seq, key->seq_len); + key->cipher = params->cipher; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + key_type = WEP_CRYPT; + break; + + case WLAN_CIPHER_SUITE_TKIP: + key_type = TKIP_CRYPT; + break; + + case WLAN_CIPHER_SUITE_CCMP: + key_type = AES_CRYPT; + break; + case WLAN_CIPHER_SUITE_SMS4: + key_type = WAPI_CRYPT; + break; + + default: + return -ENOTSUPP; + } + + if (((vif->auth_mode == WPA_PSK_AUTH) || + (vif->auth_mode == WPA2_PSK_AUTH)) && + (key_usage & GROUP_USAGE)) + del_timer(&vif->disconnect_timer); + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n", + __func__, key_index, key->key_len, key_type, + key_usage, key->seq_len); + + if (vif->nw_type == AP_NETWORK && !pairwise && + (key_type == TKIP_CRYPT || key_type == AES_CRYPT || + key_type == WAPI_CRYPT)) { + ar->ap_mode_bkey.valid = true; + ar->ap_mode_bkey.key_index = key_index; + ar->ap_mode_bkey.key_type = key_type; + ar->ap_mode_bkey.key_len = key->key_len; + memcpy(ar->ap_mode_bkey.key, key->key, key->key_len); + if (!test_bit(CONNECTED, &vif->flags)) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "Delay initial group key configuration until AP mode has been started\n"); + /* + * The key will be set in ath6kl_connect_ap_mode() once + * the connected event is received from the target. + */ + return 0; + } + } + + if (vif->next_mode == AP_NETWORK && key_type == WEP_CRYPT && + !test_bit(CONNECTED, &vif->flags)) { + /* + * Store the key locally so that it can be re-configured after + * the AP mode has properly started + * (ath6kl_install_statioc_wep_keys). + */ + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "Delay WEP key configuration until AP mode has been started\n"); + vif->wep_key_list[key_index].key_len = key->key_len; + memcpy(vif->wep_key_list[key_index].key, key->key, + key->key_len); + return 0; + } + + return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, key_index, + key_type, key_usage, key->key_len, + key->seq, key->seq_len, key->key, + KEY_OP_INIT_VAL, + (u8 *) mac_addr, SYNC_BOTH_WMIFLAG); +} + +static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, + int link_id, u8 key_index, bool pairwise, + const u8 *mac_addr) +{ + struct ath6kl *ar = ath6kl_priv(ndev); + struct ath6kl_vif *vif = netdev_priv(ndev); + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index); + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + if (key_index > WMI_MAX_KEY_INDEX) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: key index %d out of bounds\n", __func__, + key_index); + return -ENOENT; + } + + if (!vif->keys[key_index].key_len) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: index %d is empty\n", __func__, key_index); + return 0; + } + + vif->keys[key_index].key_len = 0; + + return ath6kl_wmi_deletekey_cmd(ar->wmi, vif->fw_vif_idx, key_index); +} + +static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, + int link_id, u8 key_index, bool pairwise, + const u8 *mac_addr, void *cookie, + void (*callback) (void *cookie, + struct key_params *)) +{ + struct ath6kl_vif *vif = netdev_priv(ndev); + struct ath6kl_key *key = NULL; + struct key_params params; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index); + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + if (key_index > WMI_MAX_KEY_INDEX) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: key index %d out of bounds\n", __func__, + key_index); + return -ENOENT; + } + + key = &vif->keys[key_index]; + memset(¶ms, 0, sizeof(params)); + params.cipher = key->cipher; + params.key_len = key->key_len; + params.seq_len = key->seq_len; + params.seq = key->seq; + params.key = key->key; + + callback(cookie, ¶ms); + + return key->key_len ? 0 : -ENOENT; +} + +static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy, + struct net_device *ndev, int link_id, + u8 key_index, bool unicast, + bool multicast) +{ + struct ath6kl *ar = ath6kl_priv(ndev); + struct ath6kl_vif *vif = netdev_priv(ndev); + struct ath6kl_key *key = NULL; + u8 key_usage; + enum ath6kl_crypto_type key_type = NONE_CRYPT; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index); + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + if (key_index > WMI_MAX_KEY_INDEX) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: key index %d out of bounds\n", + __func__, key_index); + return -ENOENT; + } + + if (!vif->keys[key_index].key_len) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n", + __func__, key_index); + return -EINVAL; + } + + vif->def_txkey_index = key_index; + key = &vif->keys[vif->def_txkey_index]; + key_usage = GROUP_USAGE; + if (vif->prwise_crypto == WEP_CRYPT) + key_usage |= TX_USAGE; + if (unicast) + key_type = vif->prwise_crypto; + if (multicast) + key_type = vif->grp_crypto; + + if (vif->next_mode == AP_NETWORK && !test_bit(CONNECTED, &vif->flags)) + return 0; /* Delay until AP mode has been started */ + + return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, + vif->def_txkey_index, + key_type, key_usage, + key->key_len, key->seq, key->seq_len, + key->key, + KEY_OP_INIT_VAL, NULL, + SYNC_BOTH_WMIFLAG); +} + +void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, + bool ismcast) +{ + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast); + + cfg80211_michael_mic_failure(vif->ndev, vif->bssid, + (ismcast ? NL80211_KEYTYPE_GROUP : + NL80211_KEYTYPE_PAIRWISE), keyid, NULL, + GFP_KERNEL); +} + +static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) +{ + struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); + struct ath6kl_vif *vif; + int ret; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__, + changed); + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + if (changed & WIPHY_PARAM_RTS_THRESHOLD) { + ret = ath6kl_wmi_set_rts_cmd(ar->wmi, wiphy->rts_threshold); + if (ret != 0) { + ath6kl_err("ath6kl_wmi_set_rts_cmd failed\n"); + return -EIO; + } + } + + return 0; +} + +static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy, + struct wireless_dev *wdev, + enum nl80211_tx_power_setting type, + int mbm) +{ + struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); + struct ath6kl_vif *vif; + int dbm = MBM_TO_DBM(mbm); + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__, + type, dbm); + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + switch (type) { + case NL80211_TX_POWER_AUTOMATIC: + return 0; + case NL80211_TX_POWER_LIMITED: + ar->tx_pwr = dbm; + break; + default: + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x not supported\n", + __func__, type); + return -EOPNOTSUPP; + } + + ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, dbm); + + return 0; +} + +static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, + struct wireless_dev *wdev, + int *dbm) +{ + struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); + struct ath6kl_vif *vif; + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + if (test_bit(CONNECTED, &vif->flags)) { + ar->tx_pwr = 255; + + if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) { + ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n"); + return -EIO; + } + + wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 255, + 5 * HZ); + + if (signal_pending(current)) { + ath6kl_err("target did not respond\n"); + return -EINTR; + } + } + + *dbm = ar->tx_pwr; + return 0; +} + +static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy, + struct net_device *dev, + bool pmgmt, int timeout) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct wmi_power_mode_cmd mode; + struct ath6kl_vif *vif = netdev_priv(dev); + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n", + __func__, pmgmt, timeout); + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + if (pmgmt) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__); + mode.pwr_mode = REC_POWER; + } else { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__); + mode.pwr_mode = MAX_PERF_POWER; + } + + if (ath6kl_wmi_powermode_cmd(ar->wmi, vif->fw_vif_idx, + mode.pwr_mode) != 0) { + ath6kl_err("wmi_powermode_cmd failed\n"); + return -EIO; + } + + return 0; +} + +static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy, + const char *name, + unsigned char name_assign_type, + enum nl80211_iftype type, + struct vif_params *params) +{ + struct ath6kl *ar = wiphy_priv(wiphy); + struct wireless_dev *wdev; + u8 if_idx, nw_type; + + if (ar->num_vif == ar->vif_max) { + ath6kl_err("Reached maximum number of supported vif\n"); + return ERR_PTR(-EINVAL); + } + + if (!ath6kl_is_valid_iftype(ar, type, &if_idx, &nw_type)) { + ath6kl_err("Not a supported interface type\n"); + return ERR_PTR(-EINVAL); + } + + wdev = ath6kl_interface_add(ar, name, name_assign_type, type, if_idx, nw_type); + if (!wdev) + return ERR_PTR(-ENOMEM); + + ar->num_vif++; + + return wdev; +} + +static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct ath6kl *ar = wiphy_priv(wiphy); + struct ath6kl_vif *vif = netdev_priv(wdev->netdev); + + spin_lock_bh(&ar->list_lock); + list_del(&vif->list); + spin_unlock_bh(&ar->list_lock); + + ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag)); + + rtnl_lock(); + ath6kl_cfg80211_vif_cleanup(vif); + rtnl_unlock(); + + return 0; +} + +static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy, + struct net_device *ndev, + enum nl80211_iftype type, + struct vif_params *params) +{ + struct ath6kl_vif *vif = netdev_priv(ndev); + int i; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type); + + /* + * Don't bring up p2p on an interface which is not initialized + * for p2p operation where fw does not have capability to switch + * dynamically between non-p2p and p2p type interface. + */ + if (!test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, + vif->ar->fw_capabilities) && + (type == NL80211_IFTYPE_P2P_CLIENT || + type == NL80211_IFTYPE_P2P_GO)) { + if (vif->ar->vif_max == 1) { + if (vif->fw_vif_idx != 0) + return -EINVAL; + else + goto set_iface_type; + } + + for (i = vif->ar->max_norm_iface; i < vif->ar->vif_max; i++) { + if (i == vif->fw_vif_idx) + break; + } + + if (i == vif->ar->vif_max) { + ath6kl_err("Invalid interface to bring up P2P\n"); + return -EINVAL; + } + } + + /* need to clean up enhanced bmiss detection fw state */ + ath6kl_cfg80211_sta_bmiss_enhance(vif, false); + +set_iface_type: + switch (type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + vif->next_mode = INFRA_NETWORK; + break; + case NL80211_IFTYPE_ADHOC: + vif->next_mode = ADHOC_NETWORK; + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + vif->next_mode = AP_NETWORK; + break; + default: + ath6kl_err("invalid interface type %u\n", type); + return -EOPNOTSUPP; + } + + vif->wdev.iftype = type; + + return 0; +} + +static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_ibss_params *ibss_param) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); + int status; + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + vif->ssid_len = ibss_param->ssid_len; + memcpy(vif->ssid, ibss_param->ssid, vif->ssid_len); + + if (ibss_param->chandef.chan) + vif->ch_hint = ibss_param->chandef.chan->center_freq; + + if (ibss_param->channel_fixed) { + /* + * TODO: channel_fixed: The channel should be fixed, do not + * search for IBSSs to join on other channels. Target + * firmware does not support this feature, needs to be + * updated. + */ + return -EOPNOTSUPP; + } + + memset(vif->req_bssid, 0, sizeof(vif->req_bssid)); + if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid)) + memcpy(vif->req_bssid, ibss_param->bssid, + sizeof(vif->req_bssid)); + + ath6kl_set_wpa_version(vif, 0); + + status = ath6kl_set_auth_type(vif, NL80211_AUTHTYPE_OPEN_SYSTEM); + if (status) + return status; + + if (ibss_param->privacy) { + ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, true); + ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, false); + } else { + ath6kl_set_cipher(vif, 0, true); + ath6kl_set_cipher(vif, 0, false); + } + + vif->nw_type = vif->next_mode; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: connect called with authmode %d dot11 auth %d" + " PW crypto %d PW crypto len %d GRP crypto %d" + " GRP crypto len %d channel hint %u\n", + __func__, + vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto, + vif->prwise_crypto_len, vif->grp_crypto, + vif->grp_crypto_len, vif->ch_hint); + + status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type, + vif->dot11_auth_mode, vif->auth_mode, + vif->prwise_crypto, + vif->prwise_crypto_len, + vif->grp_crypto, vif->grp_crypto_len, + vif->ssid_len, vif->ssid, + vif->req_bssid, vif->ch_hint, + ar->connect_ctrl_flags, SUBTYPE_NONE); + set_bit(CONNECT_PEND, &vif->flags); + + return 0; +} + +static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy, + struct net_device *dev) +{ + struct ath6kl_vif *vif = netdev_priv(dev); + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + ath6kl_disconnect(vif); + memset(vif->ssid, 0, sizeof(vif->ssid)); + vif->ssid_len = 0; + + return 0; +} + +static const u32 cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + CCKM_KRK_CIPHER_SUITE, + WLAN_CIPHER_SUITE_SMS4, +}; + +static bool is_rate_legacy(s32 rate) +{ + static const s32 legacy[] = { 1000, 2000, 5500, 11000, + 6000, 9000, 12000, 18000, 24000, + 36000, 48000, 54000 + }; + u8 i; + + for (i = 0; i < ARRAY_SIZE(legacy); i++) + if (rate == legacy[i]) + return true; + + return false; +} + +static bool is_rate_ht20(s32 rate, u8 *mcs, bool *sgi) +{ + static const s32 ht20[] = { 6500, 13000, 19500, 26000, 39000, + 52000, 58500, 65000, 72200 + }; + u8 i; + + for (i = 0; i < ARRAY_SIZE(ht20); i++) { + if (rate == ht20[i]) { + if (i == ARRAY_SIZE(ht20) - 1) + /* last rate uses sgi */ + *sgi = true; + else + *sgi = false; + + *mcs = i; + return true; + } + } + return false; +} + +static bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi) +{ + static const s32 ht40[] = { 13500, 27000, 40500, 54000, + 81000, 108000, 121500, 135000, + 150000 + }; + u8 i; + + for (i = 0; i < ARRAY_SIZE(ht40); i++) { + if (rate == ht40[i]) { + if (i == ARRAY_SIZE(ht40) - 1) + /* last rate uses sgi */ + *sgi = true; + else + *sgi = false; + + *mcs = i; + return true; + } + } + + return false; +} + +static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, + const u8 *mac, struct station_info *sinfo) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); + long left; + bool sgi; + s32 rate; + int ret; + u8 mcs; + + if (memcmp(mac, vif->bssid, ETH_ALEN) != 0) + return -ENOENT; + + if (down_interruptible(&ar->sem)) + return -EBUSY; + + set_bit(STATS_UPDATE_PEND, &vif->flags); + + ret = ath6kl_wmi_get_stats_cmd(ar->wmi, vif->fw_vif_idx); + + if (ret != 0) { + up(&ar->sem); + return -EIO; + } + + left = wait_event_interruptible_timeout(ar->event_wq, + !test_bit(STATS_UPDATE_PEND, + &vif->flags), + WMI_TIMEOUT); + + up(&ar->sem); + + if (left == 0) + return -ETIMEDOUT; + else if (left < 0) + return left; + + if (vif->target_stats.rx_byte) { + sinfo->rx_bytes = vif->target_stats.rx_byte; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); + sinfo->rx_packets = vif->target_stats.rx_pkt; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); + } + + if (vif->target_stats.tx_byte) { + sinfo->tx_bytes = vif->target_stats.tx_byte; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); + sinfo->tx_packets = vif->target_stats.tx_pkt; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); + } + + sinfo->signal = vif->target_stats.cs_rssi; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); + + rate = vif->target_stats.tx_ucast_rate; + + if (is_rate_legacy(rate)) { + sinfo->txrate.legacy = rate / 100; + } else if (is_rate_ht20(rate, &mcs, &sgi)) { + if (sgi) { + sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + sinfo->txrate.mcs = mcs - 1; + } else { + sinfo->txrate.mcs = mcs; + } + + sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS; + sinfo->txrate.bw = RATE_INFO_BW_20; + } else if (is_rate_ht40(rate, &mcs, &sgi)) { + if (sgi) { + sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + sinfo->txrate.mcs = mcs - 1; + } else { + sinfo->txrate.mcs = mcs; + } + + sinfo->txrate.bw = RATE_INFO_BW_40; + sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS; + } else { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "invalid rate from stats: %d\n", rate); + ath6kl_debug_war(ar, ATH6KL_WAR_INVALID_RATE); + return 0; + } + + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + + if (test_bit(CONNECTED, &vif->flags) && + test_bit(DTIM_PERIOD_AVAIL, &vif->flags) && + vif->nw_type == INFRA_NETWORK) { + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM); + sinfo->bss_param.flags = 0; + sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period; + sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int; + } + + return 0; +} + +static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_pmksa *pmksa) +{ + struct ath6kl *ar = ath6kl_priv(netdev); + struct ath6kl_vif *vif = netdev_priv(netdev); + + return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid, + pmksa->pmkid, true); +} + +static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_pmksa *pmksa) +{ + struct ath6kl *ar = ath6kl_priv(netdev); + struct ath6kl_vif *vif = netdev_priv(netdev); + + return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid, + pmksa->pmkid, false); +} + +static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev) +{ + struct ath6kl *ar = ath6kl_priv(netdev); + struct ath6kl_vif *vif = netdev_priv(netdev); + + if (test_bit(CONNECTED, &vif->flags)) + return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, + vif->bssid, NULL, false); + return 0; +} + +static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif, + struct cfg80211_wowlan *wow, u32 *filter) +{ + int ret, pos; + u8 mask[WOW_PATTERN_SIZE]; + u16 i; + + /* Configure the patterns that we received from the user. */ + for (i = 0; i < wow->n_patterns; i++) { + /* + * Convert given nl80211 specific mask value to equivalent + * driver specific mask value and send it to the chip along + * with patterns. For example, If the mask value defined in + * struct cfg80211_wowlan is 0xA (equivalent binary is 1010), + * then equivalent driver specific mask value is + * "0xFF 0x00 0xFF 0x00". + */ + memset(&mask, 0, sizeof(mask)); + for (pos = 0; pos < wow->patterns[i].pattern_len; pos++) { + if (wow->patterns[i].mask[pos / 8] & (0x1 << (pos % 8))) + mask[pos] = 0xFF; + } + /* + * Note: Pattern's offset is not passed as part of wowlan + * parameter from CFG layer. So it's always passed as ZERO + * to the firmware. It means, given WOW patterns are always + * matched from the first byte of received pkt in the firmware. + */ + ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, + vif->fw_vif_idx, WOW_LIST_ID, + wow->patterns[i].pattern_len, + 0 /* pattern offset */, + wow->patterns[i].pattern, mask); + if (ret) + return ret; + } + + if (wow->disconnect) + *filter |= WOW_FILTER_OPTION_NWK_DISASSOC; + + if (wow->magic_pkt) + *filter |= WOW_FILTER_OPTION_MAGIC_PACKET; + + if (wow->gtk_rekey_failure) + *filter |= WOW_FILTER_OPTION_GTK_ERROR; + + if (wow->eap_identity_req) + *filter |= WOW_FILTER_OPTION_EAP_REQ; + + if (wow->four_way_handshake) + *filter |= WOW_FILTER_OPTION_8021X_4WAYHS; + + return 0; +} + +static int ath6kl_wow_ap(struct ath6kl *ar, struct ath6kl_vif *vif) +{ + static const u8 unicst_pattern[] = { 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08 }; + static const u8 unicst_mask[] = { 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x7f }; + u8 unicst_offset = 0; + static const u8 arp_pattern[] = { 0x08, 0x06 }; + static const u8 arp_mask[] = { 0xff, 0xff }; + u8 arp_offset = 20; + static const u8 discvr_pattern[] = { 0xe0, 0x00, 0x00, 0xf8 }; + static const u8 discvr_mask[] = { 0xf0, 0x00, 0x00, 0xf8 }; + u8 discvr_offset = 38; + static const u8 dhcp_pattern[] = { 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x43 /* port 67 */ }; + static const u8 dhcp_mask[] = { 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff /* port 67 */ }; + u8 dhcp_offset = 0; + int ret; + + /* Setup unicast IP, EAPOL-like and ARP pkt pattern */ + ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, + vif->fw_vif_idx, WOW_LIST_ID, + sizeof(unicst_pattern), unicst_offset, + unicst_pattern, unicst_mask); + if (ret) { + ath6kl_err("failed to add WOW unicast IP pattern\n"); + return ret; + } + + /* Setup all ARP pkt pattern */ + ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, + vif->fw_vif_idx, WOW_LIST_ID, + sizeof(arp_pattern), arp_offset, + arp_pattern, arp_mask); + if (ret) { + ath6kl_err("failed to add WOW ARP pattern\n"); + return ret; + } + + /* + * Setup multicast pattern for mDNS 224.0.0.251, + * SSDP 239.255.255.250 and LLMNR 224.0.0.252 + */ + ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, + vif->fw_vif_idx, WOW_LIST_ID, + sizeof(discvr_pattern), discvr_offset, + discvr_pattern, discvr_mask); + if (ret) { + ath6kl_err("failed to add WOW mDNS/SSDP/LLMNR pattern\n"); + return ret; + } + + /* Setup all DHCP broadcast pkt pattern */ + ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, + vif->fw_vif_idx, WOW_LIST_ID, + sizeof(dhcp_pattern), dhcp_offset, + dhcp_pattern, dhcp_mask); + if (ret) { + ath6kl_err("failed to add WOW DHCP broadcast pattern\n"); + return ret; + } + + return 0; +} + +static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif) +{ + struct net_device *ndev = vif->ndev; + static const u8 discvr_pattern[] = { 0xe0, 0x00, 0x00, 0xf8 }; + static const u8 discvr_mask[] = { 0xf0, 0x00, 0x00, 0xf8 }; + u8 discvr_offset = 38; + u8 mac_mask[ETH_ALEN]; + int ret; + + /* Setup unicast pkt pattern */ + eth_broadcast_addr(mac_mask); + ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, + vif->fw_vif_idx, WOW_LIST_ID, + ETH_ALEN, 0, ndev->dev_addr, + mac_mask); + if (ret) { + ath6kl_err("failed to add WOW unicast pattern\n"); + return ret; + } + + /* + * Setup multicast pattern for mDNS 224.0.0.251, + * SSDP 239.255.255.250 and LLMNR 224.0.0.252 + */ + if ((ndev->flags & IFF_ALLMULTI) || + (ndev->flags & IFF_MULTICAST && netdev_mc_count(ndev) > 0)) { + ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, + vif->fw_vif_idx, WOW_LIST_ID, + sizeof(discvr_pattern), discvr_offset, + discvr_pattern, discvr_mask); + if (ret) { + ath6kl_err("failed to add WOW mDNS/SSDP/LLMNR pattern\n"); + return ret; + } + } + + return 0; +} + +static int is_hsleep_mode_procsed(struct ath6kl_vif *vif) +{ + return test_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags); +} + +static bool is_ctrl_ep_empty(struct ath6kl *ar) +{ + return !ar->tx_pending[ar->ctrl_ep]; +} + +static int ath6kl_cfg80211_host_sleep(struct ath6kl *ar, struct ath6kl_vif *vif) +{ + int ret, left; + + clear_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags); + + ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, + ATH6KL_HOST_MODE_ASLEEP); + if (ret) + return ret; + + left = wait_event_interruptible_timeout(ar->event_wq, + is_hsleep_mode_procsed(vif), + WMI_TIMEOUT); + if (left == 0) { + ath6kl_warn("timeout, didn't get host sleep cmd processed event\n"); + ret = -ETIMEDOUT; + } else if (left < 0) { + ath6kl_warn("error while waiting for host sleep cmd processed event %d\n", + left); + ret = left; + } + + if (ar->tx_pending[ar->ctrl_ep]) { + left = wait_event_interruptible_timeout(ar->event_wq, + is_ctrl_ep_empty(ar), + WMI_TIMEOUT); + if (left == 0) { + ath6kl_warn("clear wmi ctrl data timeout\n"); + ret = -ETIMEDOUT; + } else if (left < 0) { + ath6kl_warn("clear wmi ctrl data failed: %d\n", left); + ret = left; + } + } + + return ret; +} + +static int ath6kl_wow_suspend_vif(struct ath6kl_vif *vif, + struct cfg80211_wowlan *wow, u32 *filter) +{ + struct ath6kl *ar = vif->ar; + struct in_device *in_dev; + struct in_ifaddr *ifa; + int ret; + u16 i, bmiss_time; + __be32 ips[MAX_IP_ADDRS]; + u8 index = 0; + + if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags) && + test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER, + ar->fw_capabilities)) { + ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, + vif->fw_vif_idx, false); + if (ret) + return ret; + } + + /* Clear existing WOW patterns */ + for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++) + ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx, + WOW_LIST_ID, i); + + /* + * Skip the default WOW pattern configuration + * if the driver receives any WOW patterns from + * the user. + */ + if (wow) + ret = ath6kl_wow_usr(ar, vif, wow, filter); + else if (vif->nw_type == AP_NETWORK) + ret = ath6kl_wow_ap(ar, vif); + else + ret = ath6kl_wow_sta(ar, vif); + + if (ret) + return ret; + + netif_stop_queue(vif->ndev); + + if (vif->nw_type != AP_NETWORK) { + ret = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, + ATH6KL_MAX_WOW_LISTEN_INTL, + 0); + if (ret) + return ret; + + /* Set listen interval x 15 times as bmiss time */ + bmiss_time = ATH6KL_MAX_WOW_LISTEN_INTL * 15; + if (bmiss_time > ATH6KL_MAX_BMISS_TIME) + bmiss_time = ATH6KL_MAX_BMISS_TIME; + + ret = ath6kl_wmi_bmisstime_cmd(ar->wmi, vif->fw_vif_idx, + bmiss_time, 0); + if (ret) + return ret; + + ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, + 0xFFFF, 0, 0xFFFF, 0, 0, 0, + 0, 0, 0, 0); + if (ret) + return ret; + } + + /* Setup own IP addr for ARP agent. */ + in_dev = __in_dev_get_rtnl(vif->ndev); + if (!in_dev) + return 0; + + ifa = rtnl_dereference(in_dev->ifa_list); + memset(&ips, 0, sizeof(ips)); + + /* Configure IP addr only if IP address count < MAX_IP_ADDRS */ + while (index < MAX_IP_ADDRS && ifa) { + ips[index] = ifa->ifa_local; + ifa = rtnl_dereference(ifa->ifa_next); + index++; + } + + if (ifa) { + ath6kl_err("total IP addr count is exceeding fw limit\n"); + return -EINVAL; + } + + ret = ath6kl_wmi_set_ip_cmd(ar->wmi, vif->fw_vif_idx, ips[0], ips[1]); + if (ret) { + ath6kl_err("fail to setup ip for arp agent\n"); + return ret; + } + + return ret; +} + +static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) +{ + struct ath6kl_vif *first_vif, *vif; + int ret = 0; + u32 filter = 0; + bool connected = false; + + /* enter / leave wow suspend on first vif always */ + first_vif = ath6kl_vif_first(ar); + if (WARN_ON(!first_vif) || + !ath6kl_cfg80211_ready(first_vif)) + return -EIO; + + if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST)) + return -EINVAL; + + /* install filters for each connected vif */ + spin_lock_bh(&ar->list_lock); + list_for_each_entry(vif, &ar->vif_list, list) { + if (!test_bit(CONNECTED, &vif->flags) || + !ath6kl_cfg80211_ready(vif)) + continue; + connected = true; + + ret = ath6kl_wow_suspend_vif(vif, wow, &filter); + if (ret) + break; + } + spin_unlock_bh(&ar->list_lock); + + if (!connected) + return -ENOTCONN; + else if (ret) + return ret; + + ar->state = ATH6KL_STATE_SUSPENDING; + + ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, first_vif->fw_vif_idx, + ATH6KL_WOW_MODE_ENABLE, + filter, + WOW_HOST_REQ_DELAY); + if (ret) + return ret; + + return ath6kl_cfg80211_host_sleep(ar, first_vif); +} + +static int ath6kl_wow_resume_vif(struct ath6kl_vif *vif) +{ + struct ath6kl *ar = vif->ar; + int ret; + + if (vif->nw_type != AP_NETWORK) { + ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, + 0, 0, 0, 0, 0, 0, 3, 0, 0, 0); + if (ret) + return ret; + + ret = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, + vif->listen_intvl_t, 0); + if (ret) + return ret; + + ret = ath6kl_wmi_bmisstime_cmd(ar->wmi, vif->fw_vif_idx, + vif->bmiss_time_t, 0); + if (ret) + return ret; + } + + if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags) && + test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER, + ar->fw_capabilities)) { + ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, + vif->fw_vif_idx, true); + if (ret) + return ret; + } + + netif_wake_queue(vif->ndev); + + return 0; +} + +static int ath6kl_wow_resume(struct ath6kl *ar) +{ + struct ath6kl_vif *vif; + int ret; + + vif = ath6kl_vif_first(ar); + if (WARN_ON(!vif) || + !ath6kl_cfg80211_ready(vif)) + return -EIO; + + ar->state = ATH6KL_STATE_RESUMING; + + ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, + ATH6KL_HOST_MODE_AWAKE); + if (ret) { + ath6kl_warn("Failed to configure host sleep mode for wow resume: %d\n", + ret); + goto cleanup; + } + + spin_lock_bh(&ar->list_lock); + list_for_each_entry(vif, &ar->vif_list, list) { + if (!test_bit(CONNECTED, &vif->flags) || + !ath6kl_cfg80211_ready(vif)) + continue; + ret = ath6kl_wow_resume_vif(vif); + if (ret) + break; + } + spin_unlock_bh(&ar->list_lock); + + if (ret) + goto cleanup; + + ar->state = ATH6KL_STATE_ON; + return 0; + +cleanup: + ar->state = ATH6KL_STATE_WOW; + return ret; +} + +static int ath6kl_cfg80211_deepsleep_suspend(struct ath6kl *ar) +{ + struct ath6kl_vif *vif; + int ret; + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + if (!test_bit(WMI_READY, &ar->flag)) { + ath6kl_err("deepsleep failed as wmi is not ready\n"); + return -EIO; + } + + ath6kl_cfg80211_stop_all(ar); + + /* Save the current power mode before enabling power save */ + ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode; + + ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER); + if (ret) + return ret; + + /* Disable WOW mode */ + ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx, + ATH6KL_WOW_MODE_DISABLE, + 0, 0); + if (ret) + return ret; + + /* Flush all non control pkts in TX path */ + ath6kl_tx_data_cleanup(ar); + + ret = ath6kl_cfg80211_host_sleep(ar, vif); + if (ret) + return ret; + + return 0; +} + +static int ath6kl_cfg80211_deepsleep_resume(struct ath6kl *ar) +{ + struct ath6kl_vif *vif; + int ret; + + vif = ath6kl_vif_first(ar); + + if (!vif) + return -EIO; + + if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) { + ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, + ar->wmi->saved_pwr_mode); + if (ret) + return ret; + } + + ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, + ATH6KL_HOST_MODE_AWAKE); + if (ret) + return ret; + + ar->state = ATH6KL_STATE_ON; + + /* Reset scan parameter to default values */ + ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, + 0, 0, 0, 0, 0, 0, 3, 0, 0, 0); + if (ret) + return ret; + + return 0; +} + +int ath6kl_cfg80211_suspend(struct ath6kl *ar, + enum ath6kl_cfg_suspend_mode mode, + struct cfg80211_wowlan *wow) +{ + struct ath6kl_vif *vif; + enum ath6kl_state prev_state; + int ret; + + switch (mode) { + case ATH6KL_CFG_SUSPEND_WOW: + + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode suspend\n"); + + /* Flush all non control pkts in TX path */ + ath6kl_tx_data_cleanup(ar); + + prev_state = ar->state; + + ret = ath6kl_wow_suspend(ar, wow); + if (ret) { + ar->state = prev_state; + return ret; + } + + ar->state = ATH6KL_STATE_WOW; + break; + + case ATH6KL_CFG_SUSPEND_DEEPSLEEP: + + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep suspend\n"); + + ret = ath6kl_cfg80211_deepsleep_suspend(ar); + if (ret) { + ath6kl_err("deepsleep suspend failed: %d\n", ret); + return ret; + } + + ar->state = ATH6KL_STATE_DEEPSLEEP; + + break; + + case ATH6KL_CFG_SUSPEND_CUTPOWER: + + ath6kl_cfg80211_stop_all(ar); + + if (ar->state == ATH6KL_STATE_OFF) { + ath6kl_dbg(ATH6KL_DBG_SUSPEND, + "suspend hw off, no action for cutpower\n"); + break; + } + + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "suspend cutting power\n"); + + ret = ath6kl_init_hw_stop(ar); + if (ret) { + ath6kl_warn("failed to stop hw during suspend: %d\n", + ret); + } + + ar->state = ATH6KL_STATE_CUTPOWER; + + break; + + default: + break; + } + + list_for_each_entry(vif, &ar->vif_list, list) + ath6kl_cfg80211_scan_complete_event(vif, true); + + return 0; +} +EXPORT_SYMBOL(ath6kl_cfg80211_suspend); + +int ath6kl_cfg80211_resume(struct ath6kl *ar) +{ + int ret; + + switch (ar->state) { + case ATH6KL_STATE_WOW: + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode resume\n"); + + ret = ath6kl_wow_resume(ar); + if (ret) { + ath6kl_warn("wow mode resume failed: %d\n", ret); + return ret; + } + + break; + + case ATH6KL_STATE_DEEPSLEEP: + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep resume\n"); + + ret = ath6kl_cfg80211_deepsleep_resume(ar); + if (ret) { + ath6kl_warn("deep sleep resume failed: %d\n", ret); + return ret; + } + break; + + case ATH6KL_STATE_CUTPOWER: + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "resume restoring power\n"); + + ret = ath6kl_init_hw_start(ar); + if (ret) { + ath6kl_warn("Failed to boot hw in resume: %d\n", ret); + return ret; + } + break; + + default: + break; + } + + return 0; +} +EXPORT_SYMBOL(ath6kl_cfg80211_resume); + +#ifdef CONFIG_PM + +/* hif layer decides what suspend mode to use */ +static int __ath6kl_cfg80211_suspend(struct wiphy *wiphy, + struct cfg80211_wowlan *wow) +{ + struct ath6kl *ar = wiphy_priv(wiphy); + + ath6kl_recovery_suspend(ar); + + return ath6kl_hif_suspend(ar, wow); +} + +static int __ath6kl_cfg80211_resume(struct wiphy *wiphy) +{ + struct ath6kl *ar = wiphy_priv(wiphy); + int err; + + err = ath6kl_hif_resume(ar); + if (err) + return err; + + ath6kl_recovery_resume(ar); + + return 0; +} + +/* + * FIXME: WOW suspend mode is selected if the host sdio controller supports + * both sdio irq wake up and keep power. The target pulls sdio data line to + * wake up the host when WOW pattern matches. This causes sdio irq handler + * is being called in the host side which internally hits ath6kl's RX path. + * + * Since sdio interrupt is not disabled, RX path executes even before + * the host executes the actual resume operation from PM module. + * + * In the current scenario, WOW resume should happen before start processing + * any data from the target. So It's required to perform WOW resume in RX path. + * Ideally we should perform WOW resume only in the actual platform + * resume path. This area needs bit rework to avoid WOW resume in RX path. + * + * ath6kl_check_wow_status() is called from ath6kl_rx(). + */ +void ath6kl_check_wow_status(struct ath6kl *ar) +{ + if (ar->state == ATH6KL_STATE_SUSPENDING) + return; + + if (ar->state == ATH6KL_STATE_WOW) + ath6kl_cfg80211_resume(ar); +} + +#else + +void ath6kl_check_wow_status(struct ath6kl *ar) +{ +} +#endif + +static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum nl80211_band band, + bool ht_enable) +{ + struct ath6kl_htcap *htcap = &vif->htcap[band]; + + if (htcap->ht_enable == ht_enable) + return 0; + + if (ht_enable) { + /* Set default ht capabilities */ + htcap->ht_enable = true; + htcap->cap_info = (band == NL80211_BAND_2GHZ) ? + ath6kl_g_htcap : ath6kl_a_htcap; + htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K; + } else /* Disable ht */ + memset(htcap, 0, sizeof(*htcap)); + + return ath6kl_wmi_set_htcap_cmd(vif->ar->wmi, vif->fw_vif_idx, + band, htcap); +} + +static int ath6kl_restore_htcap(struct ath6kl_vif *vif) +{ + struct wiphy *wiphy = vif->ar->wiphy; + int band, ret = 0; + + for (band = 0; band < NUM_NL80211_BANDS; band++) { + if (!wiphy->bands[band]) + continue; + + ret = ath6kl_set_htcap(vif, band, + wiphy->bands[band]->ht_cap.ht_supported); + if (ret) + return ret; + } + + return ret; +} + +static bool ath6kl_is_p2p_ie(const u8 *pos) +{ + return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 && + pos[2] == 0x50 && pos[3] == 0x6f && + pos[4] == 0x9a && pos[5] == 0x09; +} + +static int ath6kl_set_ap_probe_resp_ies(struct ath6kl_vif *vif, + const u8 *ies, size_t ies_len) +{ + struct ath6kl *ar = vif->ar; + const u8 *pos; + u8 *buf = NULL; + size_t len = 0; + int ret; + + /* + * Filter out P2P IE(s) since they will be included depending on + * the Probe Request frame in ath6kl_send_go_probe_resp(). + */ + + if (ies && ies_len) { + buf = kmalloc(ies_len, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + pos = ies; + while (pos + 1 < ies + ies_len) { + if (pos + 2 + pos[1] > ies + ies_len) + break; + if (!ath6kl_is_p2p_ie(pos)) { + memcpy(buf + len, pos, 2 + pos[1]); + len += 2 + pos[1]; + } + pos += 2 + pos[1]; + } + } + + ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, + WMI_FRAME_PROBE_RESP, buf, len); + kfree(buf); + return ret; +} + +static int ath6kl_set_ies(struct ath6kl_vif *vif, + struct cfg80211_beacon_data *info) +{ + struct ath6kl *ar = vif->ar; + int res; + + /* this also clears IE in fw if it's not set */ + res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, + WMI_FRAME_BEACON, + info->beacon_ies, + info->beacon_ies_len); + if (res) + return res; + + /* this also clears IE in fw if it's not set */ + res = ath6kl_set_ap_probe_resp_ies(vif, info->proberesp_ies, + info->proberesp_ies_len); + if (res) + return res; + + /* this also clears IE in fw if it's not set */ + res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, + WMI_FRAME_ASSOC_RESP, + info->assocresp_ies, + info->assocresp_ies_len); + if (res) + return res; + + return 0; +} + +static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon, + u8 *rsn_capab) +{ + const u8 *rsn_ie; + size_t rsn_ie_len; + u16 cnt; + + if (!beacon->tail) + return -EINVAL; + + rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, beacon->tail, beacon->tail_len); + if (!rsn_ie) + return -EINVAL; + + rsn_ie_len = *(rsn_ie + 1); + /* skip element id and length */ + rsn_ie += 2; + + /* skip version */ + if (rsn_ie_len < 2) + return -EINVAL; + rsn_ie += 2; + rsn_ie_len -= 2; + + /* skip group cipher suite */ + if (rsn_ie_len < 4) + return 0; + rsn_ie += 4; + rsn_ie_len -= 4; + + /* skip pairwise cipher suite */ + if (rsn_ie_len < 2) + return 0; + cnt = get_unaligned_le16(rsn_ie); + rsn_ie += (2 + cnt * 4); + rsn_ie_len -= (2 + cnt * 4); + + /* skip akm suite */ + if (rsn_ie_len < 2) + return 0; + cnt = get_unaligned_le16(rsn_ie); + rsn_ie += (2 + cnt * 4); + rsn_ie_len -= (2 + cnt * 4); + + if (rsn_ie_len < 2) + return 0; + + memcpy(rsn_capab, rsn_ie, 2); + + return 0; +} + +static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ap_settings *info) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); + struct ieee80211_mgmt *mgmt; + bool hidden = false; + u8 *ies; + struct wmi_connect_cmd p; + int res; + int i, ret; + u16 rsn_capab = 0; + int inactivity_timeout = 0; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s:\n", __func__); + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + if (vif->next_mode != AP_NETWORK) + return -EOPNOTSUPP; + + res = ath6kl_set_ies(vif, &info->beacon); + + ar->ap_mode_bkey.valid = false; + + ret = ath6kl_wmi_ap_set_beacon_intvl_cmd(ar->wmi, vif->fw_vif_idx, + info->beacon_interval); + + if (ret) + ath6kl_warn("Failed to set beacon interval: %d\n", ret); + + ret = ath6kl_wmi_ap_set_dtim_cmd(ar->wmi, vif->fw_vif_idx, + info->dtim_period); + + /* ignore error, just print a warning and continue normally */ + if (ret) + ath6kl_warn("Failed to set dtim_period in beacon: %d\n", ret); + + if (info->beacon.head == NULL) + return -EINVAL; + mgmt = (struct ieee80211_mgmt *) info->beacon.head; + ies = mgmt->u.beacon.variable; + if (ies > info->beacon.head + info->beacon.head_len) + return -EINVAL; + + if (info->ssid == NULL) + return -EINVAL; + memcpy(vif->ssid, info->ssid, info->ssid_len); + vif->ssid_len = info->ssid_len; + if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE) + hidden = true; + + res = ath6kl_wmi_ap_hidden_ssid(ar->wmi, vif->fw_vif_idx, hidden); + if (res) + return res; + + ret = ath6kl_set_auth_type(vif, info->auth_type); + if (ret) + return ret; + + memset(&p, 0, sizeof(p)); + + for (i = 0; i < info->crypto.n_akm_suites; i++) { + switch (info->crypto.akm_suites[i]) { + case WLAN_AKM_SUITE_8021X: + if (info->crypto.wpa_versions & NL80211_WPA_VERSION_1) + p.auth_mode |= WPA_AUTH; + if (info->crypto.wpa_versions & NL80211_WPA_VERSION_2) + p.auth_mode |= WPA2_AUTH; + break; + case WLAN_AKM_SUITE_PSK: + if (info->crypto.wpa_versions & NL80211_WPA_VERSION_1) + p.auth_mode |= WPA_PSK_AUTH; + if (info->crypto.wpa_versions & NL80211_WPA_VERSION_2) + p.auth_mode |= WPA2_PSK_AUTH; + break; + } + } + if (p.auth_mode == 0) + p.auth_mode = NONE_AUTH; + vif->auth_mode = p.auth_mode; + + for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) { + switch (info->crypto.ciphers_pairwise[i]) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + p.prwise_crypto_type |= WEP_CRYPT; + break; + case WLAN_CIPHER_SUITE_TKIP: + p.prwise_crypto_type |= TKIP_CRYPT; + break; + case WLAN_CIPHER_SUITE_CCMP: + p.prwise_crypto_type |= AES_CRYPT; + break; + case WLAN_CIPHER_SUITE_SMS4: + p.prwise_crypto_type |= WAPI_CRYPT; + break; + } + } + if (p.prwise_crypto_type == 0) { + p.prwise_crypto_type = NONE_CRYPT; + ath6kl_set_cipher(vif, 0, true); + } else if (info->crypto.n_ciphers_pairwise == 1) { + ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true); + } + + switch (info->crypto.cipher_group) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + p.grp_crypto_type = WEP_CRYPT; + break; + case WLAN_CIPHER_SUITE_TKIP: + p.grp_crypto_type = TKIP_CRYPT; + break; + case WLAN_CIPHER_SUITE_CCMP: + p.grp_crypto_type = AES_CRYPT; + break; + case WLAN_CIPHER_SUITE_SMS4: + p.grp_crypto_type = WAPI_CRYPT; + break; + default: + p.grp_crypto_type = NONE_CRYPT; + break; + } + ath6kl_set_cipher(vif, info->crypto.cipher_group, false); + + p.nw_type = AP_NETWORK; + vif->nw_type = vif->next_mode; + + p.ssid_len = vif->ssid_len; + memcpy(p.ssid, vif->ssid, vif->ssid_len); + p.dot11_auth_mode = vif->dot11_auth_mode; + p.ch = cpu_to_le16(info->chandef.chan->center_freq); + + /* Enable uAPSD support by default */ + res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true); + if (res < 0) + return res; + + if (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) { + p.nw_subtype = SUBTYPE_P2PGO; + } else { + /* + * Due to firmware limitation, it is not possible to + * do P2P mgmt operations in AP mode + */ + p.nw_subtype = SUBTYPE_NONE; + } + + if (info->inactivity_timeout) { + inactivity_timeout = info->inactivity_timeout; + + if (test_bit(ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS, + ar->fw_capabilities)) + inactivity_timeout = DIV_ROUND_UP(inactivity_timeout, + 60); + + res = ath6kl_wmi_set_inact_period(ar->wmi, vif->fw_vif_idx, + inactivity_timeout); + if (res < 0) + return res; + } + + if (ath6kl_set_htcap(vif, info->chandef.chan->band, + cfg80211_get_chandef_type(&info->chandef) + != NL80211_CHAN_NO_HT)) + return -EIO; + + /* + * Get the PTKSA replay counter in the RSN IE. Supplicant + * will use the RSN IE in M3 message and firmware has to + * advertise the same in beacon/probe response. Send + * the complete RSN IE capability field to firmware + */ + if (!ath6kl_get_rsn_capab(&info->beacon, (u8 *) &rsn_capab) && + test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, + ar->fw_capabilities)) { + res = ath6kl_wmi_set_ie_cmd(ar->wmi, vif->fw_vif_idx, + WLAN_EID_RSN, WMI_RSN_IE_CAPB, + (const u8 *) &rsn_capab, + sizeof(rsn_capab)); + vif->rsn_capab = rsn_capab; + if (res < 0) + return res; + } + + memcpy(&vif->profile, &p, sizeof(p)); + res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p); + if (res < 0) + return res; + + return 0; +} + +static int ath6kl_change_beacon(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_beacon_data *beacon) +{ + struct ath6kl_vif *vif = netdev_priv(dev); + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + if (vif->next_mode != AP_NETWORK) + return -EOPNOTSUPP; + + return ath6kl_set_ies(vif, beacon); +} + +static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev, + unsigned int link_id) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); + + if (vif->nw_type != AP_NETWORK) + return -EOPNOTSUPP; + if (!test_bit(CONNECTED, &vif->flags)) + return -ENOTCONN; + + ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx); + clear_bit(CONNECTED, &vif->flags); + netif_carrier_off(vif->ndev); + + /* Restore ht setting in firmware */ + return ath6kl_restore_htcap(vif); +} + +static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + +static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev, + struct station_del_parameters *params) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); + const u8 *addr = params->mac ? params->mac : bcast_addr; + + return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, WMI_AP_DEAUTH, + addr, WLAN_REASON_PREV_AUTH_NOT_VALID); +} + +static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev, + const u8 *mac, + struct station_parameters *params) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); + int err; + + if (vif->nw_type != AP_NETWORK) + return -EOPNOTSUPP; + + err = cfg80211_check_station_change(wiphy, params, + CFG80211_STA_AP_MLME_CLIENT); + if (err) + return err; + + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED)) + return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, + WMI_AP_MLME_AUTHORIZE, mac, 0); + return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, + WMI_AP_MLME_UNAUTHORIZE, mac, 0); +} + +static int ath6kl_remain_on_channel(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct ieee80211_channel *chan, + unsigned int duration, + u64 *cookie) +{ + struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev); + struct ath6kl *ar = ath6kl_priv(vif->ndev); + u32 id; + + /* TODO: if already pending or ongoing remain-on-channel, + * return -EBUSY */ + id = ++vif->last_roc_id; + if (id == 0) { + /* Do not use 0 as the cookie value */ + id = ++vif->last_roc_id; + } + *cookie = id; + + return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx, + chan->center_freq, duration); +} + +static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy, + struct wireless_dev *wdev, + u64 cookie) +{ + struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev); + struct ath6kl *ar = ath6kl_priv(vif->ndev); + + if (cookie != vif->last_roc_id) + return -ENOENT; + vif->last_cancel_roc_id = cookie; + + return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx); +} + +static int ath6kl_send_go_probe_resp(struct ath6kl_vif *vif, + const u8 *buf, size_t len, + unsigned int freq) +{ + struct ath6kl *ar = vif->ar; + const u8 *pos; + u8 *p2p; + int p2p_len; + int ret; + const struct ieee80211_mgmt *mgmt; + + mgmt = (const struct ieee80211_mgmt *) buf; + + /* Include P2P IE(s) from the frame generated in user space. */ + + p2p = kmalloc(len, GFP_KERNEL); + if (p2p == NULL) + return -ENOMEM; + p2p_len = 0; + + pos = mgmt->u.probe_resp.variable; + while (pos + 1 < buf + len) { + if (pos + 2 + pos[1] > buf + len) + break; + if (ath6kl_is_p2p_ie(pos)) { + memcpy(p2p + p2p_len, pos, 2 + pos[1]); + p2p_len += 2 + pos[1]; + } + pos += 2 + pos[1]; + } + + ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, vif->fw_vif_idx, freq, + mgmt->da, p2p, p2p_len); + kfree(p2p); + return ret; +} + +static bool ath6kl_mgmt_powersave_ap(struct ath6kl_vif *vif, + u32 id, + u32 freq, + u32 wait, + const u8 *buf, + size_t len, + bool *more_data, + bool no_cck) +{ + struct ieee80211_mgmt *mgmt; + struct ath6kl_sta *conn; + bool is_psq_empty = false; + struct ath6kl_mgmt_buff *mgmt_buf; + size_t mgmt_buf_size; + struct ath6kl *ar = vif->ar; + + mgmt = (struct ieee80211_mgmt *) buf; + if (is_multicast_ether_addr(mgmt->da)) + return false; + + conn = ath6kl_find_sta(vif, mgmt->da); + if (!conn) + return false; + + if (conn->sta_flags & STA_PS_SLEEP) { + if (!(conn->sta_flags & STA_PS_POLLED)) { + /* Queue the frames if the STA is sleeping */ + mgmt_buf_size = len + sizeof(struct ath6kl_mgmt_buff); + mgmt_buf = kmalloc(mgmt_buf_size, GFP_KERNEL); + if (!mgmt_buf) + return false; + + INIT_LIST_HEAD(&mgmt_buf->list); + mgmt_buf->id = id; + mgmt_buf->freq = freq; + mgmt_buf->wait = wait; + mgmt_buf->len = len; + mgmt_buf->no_cck = no_cck; + memcpy(mgmt_buf->buf, buf, len); + spin_lock_bh(&conn->psq_lock); + is_psq_empty = skb_queue_empty(&conn->psq) && + (conn->mgmt_psq_len == 0); + list_add_tail(&mgmt_buf->list, &conn->mgmt_psq); + conn->mgmt_psq_len++; + spin_unlock_bh(&conn->psq_lock); + + /* + * If this is the first pkt getting queued + * for this STA, update the PVB for this + * STA. + */ + if (is_psq_empty) + ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, + conn->aid, 1); + return true; + } + + /* + * This tx is because of a PsPoll. + * Determine if MoreData bit has to be set. + */ + spin_lock_bh(&conn->psq_lock); + if (!skb_queue_empty(&conn->psq) || (conn->mgmt_psq_len != 0)) + *more_data = true; + spin_unlock_bh(&conn->psq_lock); + } + + return false; +} + +/* Check if SSID length is greater than DIRECT- */ +static bool ath6kl_is_p2p_go_ssid(const u8 *buf, size_t len) +{ + const struct ieee80211_mgmt *mgmt; + mgmt = (const struct ieee80211_mgmt *) buf; + + /* variable[1] contains the SSID tag length */ + if (buf + len >= &mgmt->u.probe_resp.variable[1] && + (mgmt->u.probe_resp.variable[1] > P2P_WILDCARD_SSID_LEN)) { + return true; + } + + return false; +} + +static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_mgmt_tx_params *params, u64 *cookie) +{ + struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev); + struct ath6kl *ar = ath6kl_priv(vif->ndev); + struct ieee80211_channel *chan = params->chan; + const u8 *buf = params->buf; + size_t len = params->len; + unsigned int wait = params->wait; + bool no_cck = params->no_cck; + u32 id, freq; + const struct ieee80211_mgmt *mgmt; + bool more_data, queued; + + /* default to the current channel, but use the one specified as argument + * if any + */ + freq = vif->ch_hint; + if (chan) + freq = chan->center_freq; + + /* never send freq zero to the firmware */ + if (WARN_ON(freq == 0)) + return -EINVAL; + + mgmt = (const struct ieee80211_mgmt *) buf; + if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) && + ieee80211_is_probe_resp(mgmt->frame_control) && + ath6kl_is_p2p_go_ssid(buf, len)) { + /* + * Send Probe Response frame in GO mode using a separate WMI + * command to allow the target to fill in the generic IEs. + */ + *cookie = 0; /* TX status not supported */ + return ath6kl_send_go_probe_resp(vif, buf, len, freq); + } + + id = vif->send_action_id++; + if (id == 0) { + /* + * 0 is a reserved value in the WMI command and shall not be + * used for the command. + */ + id = vif->send_action_id++; + } + + *cookie = id; + + /* AP mode Power saving processing */ + if (vif->nw_type == AP_NETWORK) { + queued = ath6kl_mgmt_powersave_ap(vif, id, freq, wait, buf, len, + &more_data, no_cck); + if (queued) + return 0; + } + + return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, freq, + wait, buf, len, no_cck); +} + +static int ath6kl_get_antenna(struct wiphy *wiphy, + u32 *tx_ant, u32 *rx_ant) +{ + struct ath6kl *ar = wiphy_priv(wiphy); + *tx_ant = ar->hw.tx_ant; + *rx_ant = ar->hw.rx_ant; + return 0; +} + +static void ath6kl_update_mgmt_frame_registrations(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct mgmt_frame_regs *upd) +{ + struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev); + + /* + * FIXME: send WMI_PROBE_REQ_REPORT_CMD here instead of hardcoding + * the reporting in the target all the time, this callback + * *is* allowed to sleep after all. + */ + vif->probe_req_report = + upd->interface_stypes & BIT(IEEE80211_STYPE_PROBE_REQ >> 4); +} + +static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_sched_scan_request *request) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); + u16 interval; + int ret, rssi_thold; + int n_match_sets = request->n_match_sets; + + /* + * If there's a matchset w/o an SSID, then assume it's just for + * the RSSI (nothing else is currently supported) and ignore it. + * The device only supports a global RSSI filter that we set below. + */ + if (n_match_sets == 1 && !request->match_sets[0].ssid.ssid_len) + n_match_sets = 0; + + if (ar->state != ATH6KL_STATE_ON) + return -EIO; + + if (vif->sme_state != SME_DISCONNECTED) + return -EBUSY; + + ath6kl_cfg80211_scan_complete_event(vif, true); + + ret = ath6kl_set_probed_ssids(ar, vif, request->ssids, + request->n_ssids, + request->match_sets, + n_match_sets); + if (ret < 0) + return ret; + + if (!n_match_sets) { + ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, + ALL_BSS_FILTER, 0); + if (ret < 0) + return ret; + } else { + ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, + MATCHED_SSID_FILTER, 0); + if (ret < 0) + return ret; + } + + if (test_bit(ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD, + ar->fw_capabilities)) { + if (request->min_rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF) + rssi_thold = 0; + else if (request->min_rssi_thold < -127) + rssi_thold = -127; + else + rssi_thold = request->min_rssi_thold; + + ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx, + rssi_thold); + if (ret) { + ath6kl_err("failed to set RSSI threshold for scan\n"); + return ret; + } + } + + /* fw uses seconds, also make sure that it's >0 */ + interval = max_t(u16, 1, request->scan_plans[0].interval); + + ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, + interval, interval, + vif->bg_scan_period, 0, 0, 0, 3, 0, 0, 0); + + /* this also clears IE in fw if it's not set */ + ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, + WMI_FRAME_PROBE_REQ, + request->ie, request->ie_len); + if (ret) { + ath6kl_warn("Failed to set probe request IE for scheduled scan: %d\n", + ret); + return ret; + } + + ret = ath6kl_wmi_enable_sched_scan_cmd(ar->wmi, vif->fw_vif_idx, true); + if (ret) + return ret; + + set_bit(SCHED_SCANNING, &vif->flags); + + return 0; +} + +static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy, + struct net_device *dev, u64 reqid) +{ + struct ath6kl_vif *vif = netdev_priv(dev); + bool stopped; + + stopped = __ath6kl_cfg80211_sscan_stop(vif); + + if (!stopped) + return -EIO; + + return 0; +} + +static int ath6kl_cfg80211_set_bitrate(struct wiphy *wiphy, + struct net_device *dev, + unsigned int link_id, + const u8 *addr, + const struct cfg80211_bitrate_mask *mask) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); + + return ath6kl_wmi_set_bitrate_mask(ar->wmi, vif->fw_vif_idx, + mask); +} + +static int ath6kl_cfg80211_set_txe_config(struct wiphy *wiphy, + struct net_device *dev, + u32 rate, u32 pkts, u32 intvl) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); + + if (vif->nw_type != INFRA_NETWORK || + !test_bit(ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, ar->fw_capabilities)) + return -EOPNOTSUPP; + + if (vif->sme_state != SME_CONNECTED) + return -ENOTCONN; + + /* save this since the firmware won't report the interval */ + vif->txe_intvl = intvl; + + return ath6kl_wmi_set_txe_notify(ar->wmi, vif->fw_vif_idx, + rate, pkts, intvl); +} + +static const struct ieee80211_txrx_stypes +ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = { + [NL80211_IFTYPE_STATION] = { + .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_RESP >> 4), + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_AP] = { + .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_RESP >> 4), + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_P2P_CLIENT] = { + .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_RESP >> 4), + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_P2P_GO] = { + .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_RESP >> 4), + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, +}; + +static struct cfg80211_ops ath6kl_cfg80211_ops = { + .add_virtual_intf = ath6kl_cfg80211_add_iface, + .del_virtual_intf = ath6kl_cfg80211_del_iface, + .change_virtual_intf = ath6kl_cfg80211_change_iface, + .scan = ath6kl_cfg80211_scan, + .connect = ath6kl_cfg80211_connect, + .disconnect = ath6kl_cfg80211_disconnect, + .add_key = ath6kl_cfg80211_add_key, + .get_key = ath6kl_cfg80211_get_key, + .del_key = ath6kl_cfg80211_del_key, + .set_default_key = ath6kl_cfg80211_set_default_key, + .set_wiphy_params = ath6kl_cfg80211_set_wiphy_params, + .set_tx_power = ath6kl_cfg80211_set_txpower, + .get_tx_power = ath6kl_cfg80211_get_txpower, + .set_power_mgmt = ath6kl_cfg80211_set_power_mgmt, + .join_ibss = ath6kl_cfg80211_join_ibss, + .leave_ibss = ath6kl_cfg80211_leave_ibss, + .get_station = ath6kl_get_station, + .set_pmksa = ath6kl_set_pmksa, + .del_pmksa = ath6kl_del_pmksa, + .flush_pmksa = ath6kl_flush_pmksa, + CFG80211_TESTMODE_CMD(ath6kl_tm_cmd) +#ifdef CONFIG_PM + .suspend = __ath6kl_cfg80211_suspend, + .resume = __ath6kl_cfg80211_resume, +#endif + .start_ap = ath6kl_start_ap, + .change_beacon = ath6kl_change_beacon, + .stop_ap = ath6kl_stop_ap, + .del_station = ath6kl_del_station, + .change_station = ath6kl_change_station, + .remain_on_channel = ath6kl_remain_on_channel, + .cancel_remain_on_channel = ath6kl_cancel_remain_on_channel, + .mgmt_tx = ath6kl_mgmt_tx, + .update_mgmt_frame_registrations = + ath6kl_update_mgmt_frame_registrations, + .get_antenna = ath6kl_get_antenna, + .sched_scan_start = ath6kl_cfg80211_sscan_start, + .sched_scan_stop = ath6kl_cfg80211_sscan_stop, + .set_bitrate_mask = ath6kl_cfg80211_set_bitrate, + .set_cqm_txe_config = ath6kl_cfg80211_set_txe_config, +}; + +void ath6kl_cfg80211_stop(struct ath6kl_vif *vif) +{ + ath6kl_cfg80211_sscan_disable(vif); + + switch (vif->sme_state) { + case SME_DISCONNECTED: + break; + case SME_CONNECTING: + cfg80211_connect_result(vif->ndev, vif->bssid, NULL, 0, + NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + GFP_KERNEL); + break; + case SME_CONNECTED: + cfg80211_disconnected(vif->ndev, 0, NULL, 0, true, GFP_KERNEL); + break; + } + + if (vif->ar->state != ATH6KL_STATE_RECOVERY && + (test_bit(CONNECTED, &vif->flags) || + test_bit(CONNECT_PEND, &vif->flags))) + ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx); + + vif->sme_state = SME_DISCONNECTED; + clear_bit(CONNECTED, &vif->flags); + clear_bit(CONNECT_PEND, &vif->flags); + + /* Stop netdev queues, needed during recovery */ + netif_stop_queue(vif->ndev); + netif_carrier_off(vif->ndev); + + /* disable scanning */ + if (vif->ar->state != ATH6KL_STATE_RECOVERY && + ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF, + 0, 0, 0, 0, 0, 0, 0, 0, 0) != 0) + ath6kl_warn("failed to disable scan during stop\n"); + + ath6kl_cfg80211_scan_complete_event(vif, true); +} + +void ath6kl_cfg80211_stop_all(struct ath6kl *ar) +{ + struct ath6kl_vif *vif; + + vif = ath6kl_vif_first(ar); + if (!vif && ar->state != ATH6KL_STATE_RECOVERY) { + /* save the current power mode before enabling power save */ + ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode; + + if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0) + ath6kl_warn("ath6kl_deep_sleep_enable: wmi_powermode_cmd failed\n"); + return; + } + + /* + * FIXME: we should take ar->list_lock to protect changes in the + * vif_list, but that's not trivial to do as ath6kl_cfg80211_stop() + * sleeps. + */ + list_for_each_entry(vif, &ar->vif_list, list) + ath6kl_cfg80211_stop(vif); +} + +static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ath6kl *ar = wiphy_priv(wiphy); + u32 rates[NUM_NL80211_BANDS]; + int ret, i; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "cfg reg_notify %c%c%s%s initiator %d hint_type %d\n", + request->alpha2[0], request->alpha2[1], + request->intersect ? " intersect" : "", + request->processed ? " processed" : "", + request->initiator, request->user_reg_hint_type); + + if (request->user_reg_hint_type != NL80211_USER_REG_HINT_CELL_BASE) + return; + + ret = ath6kl_wmi_set_regdomain_cmd(ar->wmi, request->alpha2); + if (ret) { + ath6kl_err("failed to set regdomain: %d\n", ret); + return; + } + + /* + * Firmware will apply the regdomain change only after a scan is + * issued and it will send a WMI_REGDOMAIN_EVENTID when it has been + * changed. + */ + + for (i = 0; i < NUM_NL80211_BANDS; i++) + if (wiphy->bands[i]) + rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1; + + + ret = ath6kl_wmi_beginscan_cmd(ar->wmi, 0, WMI_LONG_SCAN, false, + false, 0, ATH6KL_FG_SCAN_INTERVAL, + 0, NULL, false, rates); + if (ret) { + ath6kl_err("failed to start scan for a regdomain change: %d\n", + ret); + return; + } +} + +static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif) +{ + vif->aggr_cntxt = aggr_init(vif); + if (!vif->aggr_cntxt) { + ath6kl_err("failed to initialize aggr\n"); + return -ENOMEM; + } + + timer_setup(&vif->disconnect_timer, disconnect_timer_handler, 0); + timer_setup(&vif->sched_scan_timer, ath6kl_wmi_sscan_timer, 0); + + set_bit(WMM_ENABLED, &vif->flags); + spin_lock_init(&vif->if_lock); + + INIT_LIST_HEAD(&vif->mc_filter); + + return 0; +} + +void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready) +{ + static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + bool discon_issued; + + netif_stop_queue(vif->ndev); + + clear_bit(WLAN_ENABLED, &vif->flags); + + if (wmi_ready) { + discon_issued = test_bit(CONNECTED, &vif->flags) || + test_bit(CONNECT_PEND, &vif->flags); + ath6kl_disconnect(vif); + del_timer(&vif->disconnect_timer); + + if (discon_issued) + ath6kl_disconnect_event(vif, DISCONNECT_CMD, + (vif->nw_type & AP_NETWORK) ? + bcast_mac : vif->bssid, + 0, NULL, 0); + } + + if (vif->scan_req) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + cfg80211_scan_done(vif->scan_req, &info); + vif->scan_req = NULL; + } + + /* need to clean up enhanced bmiss detection fw state */ + ath6kl_cfg80211_sta_bmiss_enhance(vif, false); +} + +void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif) +{ + struct ath6kl *ar = vif->ar; + struct ath6kl_mc_filter *mc_filter, *tmp; + + aggr_module_destroy(vif->aggr_cntxt); + + ar->avail_idx_map |= BIT(vif->fw_vif_idx); + + if (vif->nw_type == ADHOC_NETWORK) + ar->ibss_if_active = false; + + list_for_each_entry_safe(mc_filter, tmp, &vif->mc_filter, list) { + list_del(&mc_filter->list); + kfree(mc_filter); + } + + cfg80211_unregister_netdevice(vif->ndev); + + ar->num_vif--; +} + +static const char ath6kl_gstrings_sta_stats[][ETH_GSTRING_LEN] = { + /* Common stats names used by many drivers. */ + "tx_pkts_nic", "tx_bytes_nic", "rx_pkts_nic", "rx_bytes_nic", + + /* TX stats. */ + "d_tx_ucast_pkts", "d_tx_bcast_pkts", + "d_tx_ucast_bytes", "d_tx_bcast_bytes", + "d_tx_rts_ok", "d_tx_error", "d_tx_fail", + "d_tx_retry", "d_tx_multi_retry", "d_tx_rts_fail", + "d_tx_tkip_counter_measures", + + /* RX Stats. */ + "d_rx_ucast_pkts", "d_rx_ucast_rate", "d_rx_bcast_pkts", + "d_rx_ucast_bytes", "d_rx_bcast_bytes", "d_rx_frag_pkt", + "d_rx_error", "d_rx_crc_err", "d_rx_keycache_miss", + "d_rx_decrypt_crc_err", "d_rx_duplicate_frames", + "d_rx_mic_err", "d_rx_tkip_format_err", "d_rx_ccmp_format_err", + "d_rx_ccmp_replay_err", + + /* Misc stats. */ + "d_beacon_miss", "d_num_connects", "d_num_disconnects", + "d_beacon_avg_rssi", "d_arp_received", "d_arp_matched", + "d_arp_replied" +}; + +#define ATH6KL_STATS_LEN ARRAY_SIZE(ath6kl_gstrings_sta_stats) + +static int ath6kl_get_sset_count(struct net_device *dev, int sset) +{ + int rv = 0; + + if (sset == ETH_SS_STATS) + rv += ATH6KL_STATS_LEN; + + if (rv == 0) + return -EOPNOTSUPP; + return rv; +} + +static void ath6kl_get_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct ath6kl_vif *vif = netdev_priv(dev); + struct ath6kl *ar = vif->ar; + int i = 0; + struct target_stats *tgt_stats; + + memset(data, 0, sizeof(u64) * ATH6KL_STATS_LEN); + + ath6kl_read_tgt_stats(ar, vif); + + tgt_stats = &vif->target_stats; + + data[i++] = tgt_stats->tx_ucast_pkt + tgt_stats->tx_bcast_pkt; + data[i++] = tgt_stats->tx_ucast_byte + tgt_stats->tx_bcast_byte; + data[i++] = tgt_stats->rx_ucast_pkt + tgt_stats->rx_bcast_pkt; + data[i++] = tgt_stats->rx_ucast_byte + tgt_stats->rx_bcast_byte; + + data[i++] = tgt_stats->tx_ucast_pkt; + data[i++] = tgt_stats->tx_bcast_pkt; + data[i++] = tgt_stats->tx_ucast_byte; + data[i++] = tgt_stats->tx_bcast_byte; + data[i++] = tgt_stats->tx_rts_success_cnt; + data[i++] = tgt_stats->tx_err; + data[i++] = tgt_stats->tx_fail_cnt; + data[i++] = tgt_stats->tx_retry_cnt; + data[i++] = tgt_stats->tx_mult_retry_cnt; + data[i++] = tgt_stats->tx_rts_fail_cnt; + data[i++] = tgt_stats->tkip_cnter_measures_invoked; + + data[i++] = tgt_stats->rx_ucast_pkt; + data[i++] = tgt_stats->rx_ucast_rate; + data[i++] = tgt_stats->rx_bcast_pkt; + data[i++] = tgt_stats->rx_ucast_byte; + data[i++] = tgt_stats->rx_bcast_byte; + data[i++] = tgt_stats->rx_frgment_pkt; + data[i++] = tgt_stats->rx_err; + data[i++] = tgt_stats->rx_crc_err; + data[i++] = tgt_stats->rx_key_cache_miss; + data[i++] = tgt_stats->rx_decrypt_err; + data[i++] = tgt_stats->rx_dupl_frame; + data[i++] = tgt_stats->tkip_local_mic_fail; + data[i++] = tgt_stats->tkip_fmt_err; + data[i++] = tgt_stats->ccmp_fmt_err; + data[i++] = tgt_stats->ccmp_replays; + + data[i++] = tgt_stats->cs_bmiss_cnt; + data[i++] = tgt_stats->cs_connect_cnt; + data[i++] = tgt_stats->cs_discon_cnt; + data[i++] = tgt_stats->cs_ave_beacon_rssi; + data[i++] = tgt_stats->arp_received; + data[i++] = tgt_stats->arp_matched; + data[i++] = tgt_stats->arp_replied; + + if (i != ATH6KL_STATS_LEN) { + WARN_ON_ONCE(1); + ath6kl_err("ethtool stats error, i: %d STATS_LEN: %d\n", + i, (int)ATH6KL_STATS_LEN); + } +} + +/* These stats are per NIC, not really per vdev, so we just ignore dev. */ +static void ath6kl_get_strings(struct net_device *dev, u32 sset, u8 *data) +{ + int sz_sta_stats = 0; + + if (sset == ETH_SS_STATS) { + sz_sta_stats = sizeof(ath6kl_gstrings_sta_stats); + memcpy(data, ath6kl_gstrings_sta_stats, sz_sta_stats); + } +} + +static const struct ethtool_ops ath6kl_ethtool_ops = { + .get_drvinfo = cfg80211_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = ath6kl_get_strings, + .get_ethtool_stats = ath6kl_get_stats, + .get_sset_count = ath6kl_get_sset_count, +}; + +struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name, + unsigned char name_assign_type, + enum nl80211_iftype type, + u8 fw_vif_idx, u8 nw_type) +{ + struct net_device *ndev; + struct ath6kl_vif *vif; + u8 addr[ETH_ALEN]; + + ndev = alloc_netdev(sizeof(*vif), name, name_assign_type, ether_setup); + if (!ndev) + return NULL; + + vif = netdev_priv(ndev); + ndev->ieee80211_ptr = &vif->wdev; + vif->wdev.wiphy = ar->wiphy; + vif->ar = ar; + vif->ndev = ndev; + SET_NETDEV_DEV(ndev, wiphy_dev(vif->wdev.wiphy)); + vif->wdev.netdev = ndev; + vif->wdev.iftype = type; + vif->fw_vif_idx = fw_vif_idx; + vif->nw_type = nw_type; + vif->next_mode = nw_type; + vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL; + vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME; + vif->bg_scan_period = 0; + vif->htcap[NL80211_BAND_2GHZ].ht_enable = true; + vif->htcap[NL80211_BAND_5GHZ].ht_enable = true; + + ether_addr_copy(addr, ar->mac_addr); + if (fw_vif_idx != 0) { + addr[0] = (addr[0] ^ (1 << fw_vif_idx)) | 0x2; + if (test_bit(ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR, + ar->fw_capabilities)) + addr[4] ^= 0x80; + } + eth_hw_addr_set(ndev, addr); + + init_netdev(ndev); + + ath6kl_init_control_info(vif); + + if (ath6kl_cfg80211_vif_init(vif)) + goto err; + + netdev_set_default_ethtool_ops(ndev, &ath6kl_ethtool_ops); + + if (cfg80211_register_netdevice(ndev)) + goto err; + + ar->avail_idx_map &= ~BIT(fw_vif_idx); + vif->sme_state = SME_DISCONNECTED; + set_bit(WLAN_ENABLED, &vif->flags); + ar->wlan_pwr_state = WLAN_POWER_STATE_ON; + + if (type == NL80211_IFTYPE_ADHOC) + ar->ibss_if_active = true; + + spin_lock_bh(&ar->list_lock); + list_add_tail(&vif->list, &ar->vif_list); + spin_unlock_bh(&ar->list_lock); + + return &vif->wdev; + +err: + aggr_module_destroy(vif->aggr_cntxt); + free_netdev(ndev); + return NULL; +} + +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support ath6kl_wowlan_support = { + .flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_GTK_REKEY_FAILURE | + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_4WAY_HANDSHAKE, + .n_patterns = WOW_MAX_FILTERS_PER_LIST, + .pattern_min_len = 1, + .pattern_max_len = WOW_PATTERN_SIZE, +}; +#endif + +int ath6kl_cfg80211_init(struct ath6kl *ar) +{ + struct wiphy *wiphy = ar->wiphy; + bool band_2gig = false, band_5gig = false, ht = false; + int ret; + + wiphy->mgmt_stypes = ath6kl_mgmt_stypes; + + wiphy->max_remain_on_channel_duration = 5000; + + /* set device pointer for wiphy */ + set_wiphy_dev(wiphy, ar->dev); + + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_AP); + if (ar->p2p) { + wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_P2P_CLIENT); + } + + if (IS_ENABLED(CONFIG_ATH6KL_REGDOMAIN) && + test_bit(ATH6KL_FW_CAPABILITY_REGDOMAIN, ar->fw_capabilities)) { + wiphy->reg_notifier = ath6kl_cfg80211_reg_notify; + ar->wiphy->features |= NL80211_FEATURE_CELL_BASE_REG_HINTS; + } + + /* max num of ssids that can be probed during scanning */ + wiphy->max_scan_ssids = MAX_PROBED_SSIDS; + + /* max num of ssids that can be matched after scan */ + if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST, + ar->fw_capabilities)) + wiphy->max_match_sets = MAX_PROBED_SSIDS; + + wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */ + switch (ar->hw.cap) { + case WMI_11AN_CAP: + ht = true; + fallthrough; + case WMI_11A_CAP: + band_5gig = true; + break; + case WMI_11GN_CAP: + ht = true; + fallthrough; + case WMI_11G_CAP: + band_2gig = true; + break; + case WMI_11AGN_CAP: + ht = true; + fallthrough; + case WMI_11AG_CAP: + band_2gig = true; + band_5gig = true; + break; + default: + ath6kl_err("invalid phy capability!\n"); + return -EINVAL; + } + + /* + * Even if the fw has HT support, advertise HT cap only when + * the firmware has support to override RSN capability, otherwise + * 4-way handshake would fail. + */ + if (!(ht && + test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, + ar->fw_capabilities))) { + ath6kl_band_2ghz.ht_cap.cap = 0; + ath6kl_band_2ghz.ht_cap.ht_supported = false; + ath6kl_band_5ghz.ht_cap.cap = 0; + ath6kl_band_5ghz.ht_cap.ht_supported = false; + + if (ht) + ath6kl_err("Firmware lacks RSN-CAP-OVERRIDE, so HT (802.11n) is disabled."); + } + + if (test_bit(ATH6KL_FW_CAPABILITY_64BIT_RATES, + ar->fw_capabilities)) { + ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff; + ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff; + ath6kl_band_2ghz.ht_cap.mcs.rx_mask[1] = 0xff; + ath6kl_band_5ghz.ht_cap.mcs.rx_mask[1] = 0xff; + ar->hw.tx_ant = 0x3; /* mask, 2 antenna */ + ar->hw.rx_ant = 0x3; + } else { + ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff; + ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff; + ar->hw.tx_ant = 1; + ar->hw.rx_ant = 1; + } + + wiphy->available_antennas_tx = ar->hw.tx_ant; + wiphy->available_antennas_rx = ar->hw.rx_ant; + + if (band_2gig) + wiphy->bands[NL80211_BAND_2GHZ] = &ath6kl_band_2ghz; + if (band_5gig) + wiphy->bands[NL80211_BAND_5GHZ] = &ath6kl_band_5ghz; + + wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + + wiphy->cipher_suites = cipher_suites; + wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); + +#ifdef CONFIG_PM + wiphy->wowlan = &ath6kl_wowlan_support; +#endif + + wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS; + + ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM | + WIPHY_FLAG_HAVE_AP_SME | + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; + + if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, ar->fw_capabilities)) + ar->wiphy->max_sched_scan_reqs = 1; + + if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT, + ar->fw_capabilities)) + ar->wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER; + + ar->wiphy->probe_resp_offload = + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; + + ret = wiphy_register(wiphy); + if (ret < 0) { + ath6kl_err("couldn't register wiphy device\n"); + return ret; + } + + ar->wiphy_registered = true; + + return 0; +} + +void ath6kl_cfg80211_cleanup(struct ath6kl *ar) +{ + wiphy_unregister(ar->wiphy); + + ar->wiphy_registered = false; +} + +struct ath6kl *ath6kl_cfg80211_create(void) +{ + struct ath6kl *ar; + struct wiphy *wiphy; + + /* create a new wiphy for use with cfg80211 */ + wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl)); + + if (!wiphy) { + ath6kl_err("couldn't allocate wiphy device\n"); + return NULL; + } + + ar = wiphy_priv(wiphy); + ar->wiphy = wiphy; + + return ar; +} + +/* Note: ar variable must not be accessed after calling this! */ +void ath6kl_cfg80211_destroy(struct ath6kl *ar) +{ + int i; + + for (i = 0; i < AP_MAX_NUM_STA; i++) + kfree(ar->sta_list[i].aggr_conn); + + wiphy_free(ar->wiphy); +} + diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h new file mode 100644 index 000000000..5aa57a763 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ATH6KL_CFG80211_H +#define ATH6KL_CFG80211_H + +enum ath6kl_cfg_suspend_mode { + ATH6KL_CFG_SUSPEND_DEEPSLEEP, + ATH6KL_CFG_SUSPEND_CUTPOWER, + ATH6KL_CFG_SUSPEND_WOW, +}; + +struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name, + unsigned char name_assign_type, + enum nl80211_iftype type, + u8 fw_vif_idx, u8 nw_type); +void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq, + enum wmi_phy_mode mode); +void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted); + +void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel, + u8 *bssid, u16 listen_intvl, + u16 beacon_intvl, + enum network_type nw_type, + u8 beacon_ie_len, u8 assoc_req_len, + u8 assoc_resp_len, u8 *assoc_info); + +void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason, + u8 *bssid, u8 assoc_resp_len, + u8 *assoc_info, u16 proto_reason); + +void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, + bool ismcast); + +int ath6kl_cfg80211_suspend(struct ath6kl *ar, + enum ath6kl_cfg_suspend_mode mode, + struct cfg80211_wowlan *wow); + +int ath6kl_cfg80211_resume(struct ath6kl *ar); + +void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif); + +void ath6kl_cfg80211_stop(struct ath6kl_vif *vif); +void ath6kl_cfg80211_stop_all(struct ath6kl *ar); + +int ath6kl_cfg80211_init(struct ath6kl *ar); +void ath6kl_cfg80211_cleanup(struct ath6kl *ar); + +struct ath6kl *ath6kl_cfg80211_create(void); +void ath6kl_cfg80211_destroy(struct ath6kl *ar); + +#endif /* ATH6KL_CFG80211_H */ diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h new file mode 100644 index 000000000..d6e5234f6 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/common.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2010-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef COMMON_H +#define COMMON_H + +#include <linux/netdevice.h> + +#define ATH6KL_MAX_IE 256 + +__printf(2, 3) void ath6kl_printk(const char *level, const char *fmt, ...); + +/* + * Reflects the version of binary interface exposed by ATH6KL target + * firmware. Needs to be incremented by 1 for any change in the firmware + * that requires upgrade of the driver on the host side for the change to + * work correctly + */ +#define ATH6KL_ABI_VERSION 1 + +#define SIGNAL_QUALITY_METRICS_NUM_MAX 2 + +enum { + SIGNAL_QUALITY_METRICS_SNR = 0, + SIGNAL_QUALITY_METRICS_RSSI, + SIGNAL_QUALITY_METRICS_ALL, +}; + +/* + * Data Path + */ + +#define WMI_MAX_TX_DATA_FRAME_LENGTH \ + (1500 + sizeof(struct wmi_data_hdr) + \ + sizeof(struct ethhdr) + \ + sizeof(struct ath6kl_llc_snap_hdr)) + +/* An AMSDU frame */ /* The MAX AMSDU length of AR6003 is 3839 */ +#define WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH \ + (3840 + sizeof(struct wmi_data_hdr) + \ + sizeof(struct ethhdr) + \ + sizeof(struct ath6kl_llc_snap_hdr)) + +#define EPPING_ALIGNMENT_PAD \ + (((sizeof(struct htc_frame_hdr) + 3) & (~0x3)) \ + - sizeof(struct htc_frame_hdr)) + +struct ath6kl_llc_snap_hdr { + u8 dsap; + u8 ssap; + u8 cntl; + u8 org_code[3]; + __be16 eth_type; +} __packed; + +enum ath6kl_crypto_type { + NONE_CRYPT = 0x01, + WEP_CRYPT = 0x02, + TKIP_CRYPT = 0x04, + AES_CRYPT = 0x08, + WAPI_CRYPT = 0x10, +}; + +struct htc_endpoint_credit_dist; +struct ath6kl; +struct ath6kl_htcap; +enum htc_credit_dist_reason; +struct ath6kl_htc_credit_info; + +struct sk_buff *ath6kl_buf_alloc(int size); +#endif /* COMMON_H */ diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c new file mode 100644 index 000000000..4f0a7a185 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/core.c @@ -0,0 +1,367 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "core.h" + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/export.h> +#include <linux/vmalloc.h> + +#include "debug.h" +#include "hif-ops.h" +#include "htc-ops.h" +#include "cfg80211.h" + +unsigned int debug_mask; +static unsigned int suspend_mode; +static unsigned int wow_mode; +static unsigned int uart_debug; +static unsigned int uart_rate = 115200; +static unsigned int ath6kl_p2p; +static unsigned int testmode; +static unsigned int recovery_enable; +static unsigned int heart_beat_poll; + +module_param(debug_mask, uint, 0644); +module_param(suspend_mode, uint, 0644); +module_param(wow_mode, uint, 0644); +module_param(uart_debug, uint, 0644); +module_param(uart_rate, uint, 0644); +module_param(ath6kl_p2p, uint, 0644); +module_param(testmode, uint, 0644); +module_param(recovery_enable, uint, 0644); +module_param(heart_beat_poll, uint, 0644); +MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error"); +MODULE_PARM_DESC(heart_beat_poll, + "Enable fw error detection periodic polling in msecs - Also set recovery_enable for this to be effective"); + + +void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb) +{ + ath6kl_htc_tx_complete(ar, skb); +} +EXPORT_SYMBOL(ath6kl_core_tx_complete); + +void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe) +{ + ath6kl_htc_rx_complete(ar, skb, pipe); +} +EXPORT_SYMBOL(ath6kl_core_rx_complete); + +int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type) +{ + struct ath6kl_bmi_target_info targ_info; + struct wireless_dev *wdev; + int ret = 0, i; + + switch (htc_type) { + case ATH6KL_HTC_TYPE_MBOX: + ath6kl_htc_mbox_attach(ar); + break; + case ATH6KL_HTC_TYPE_PIPE: + ath6kl_htc_pipe_attach(ar); + break; + default: + WARN_ON(1); + return -ENOMEM; + } + + ar->ath6kl_wq = create_singlethread_workqueue("ath6kl"); + if (!ar->ath6kl_wq) + return -ENOMEM; + + ret = ath6kl_bmi_init(ar); + if (ret) + goto err_wq; + + /* + * Turn on power to get hardware (target) version and leave power + * on delibrately as we will boot the hardware anyway within few + * seconds. + */ + ret = ath6kl_hif_power_on(ar); + if (ret) + goto err_bmi_cleanup; + + ret = ath6kl_bmi_get_target_info(ar, &targ_info); + if (ret) + goto err_power_off; + + ar->version.target_ver = le32_to_cpu(targ_info.version); + ar->target_type = le32_to_cpu(targ_info.type); + ar->wiphy->hw_version = le32_to_cpu(targ_info.version); + + ret = ath6kl_init_hw_params(ar); + if (ret) + goto err_power_off; + + ar->htc_target = ath6kl_htc_create(ar); + + if (!ar->htc_target) { + ret = -ENOMEM; + goto err_power_off; + } + + ar->testmode = testmode; + + ret = ath6kl_init_fetch_firmwares(ar); + if (ret) + goto err_htc_cleanup; + + /* FIXME: we should free all firmwares in the error cases below */ + + /* + * Backwards compatibility support for older ar6004 firmware images + * which do not set these feature flags. + */ + if (ar->target_type == TARGET_TYPE_AR6004 && + ar->fw_api <= 4) { + __set_bit(ATH6KL_FW_CAPABILITY_64BIT_RATES, + ar->fw_capabilities); + __set_bit(ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS, + ar->fw_capabilities); + + if (ar->hw.id == AR6004_HW_1_3_VERSION) + __set_bit(ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT, + ar->fw_capabilities); + } + + /* Indicate that WMI is enabled (although not ready yet) */ + set_bit(WMI_ENABLED, &ar->flag); + ar->wmi = ath6kl_wmi_init(ar); + if (!ar->wmi) { + ath6kl_err("failed to initialize wmi\n"); + ret = -EIO; + goto err_htc_cleanup; + } + + ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi); + + /* setup access class priority mappings */ + ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */ + ar->ac_stream_pri_map[WMM_AC_BE] = 1; + ar->ac_stream_pri_map[WMM_AC_VI] = 2; + ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */ + + /* allocate some buffers that handle larger AMSDU frames */ + ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS); + + ath6kl_cookie_init(ar); + + ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER | + ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST; + + if (suspend_mode && + suspend_mode >= WLAN_POWER_STATE_CUT_PWR && + suspend_mode <= WLAN_POWER_STATE_WOW) + ar->suspend_mode = suspend_mode; + else + ar->suspend_mode = 0; + + if (suspend_mode == WLAN_POWER_STATE_WOW && + (wow_mode == WLAN_POWER_STATE_CUT_PWR || + wow_mode == WLAN_POWER_STATE_DEEP_SLEEP)) + ar->wow_suspend_mode = wow_mode; + else + ar->wow_suspend_mode = 0; + + if (uart_debug) + ar->conf_flags |= ATH6KL_CONF_UART_DEBUG; + ar->hw.uarttx_rate = uart_rate; + + set_bit(FIRST_BOOT, &ar->flag); + + ath6kl_debug_init(ar); + + ret = ath6kl_init_hw_start(ar); + if (ret) { + ath6kl_err("Failed to start hardware: %d\n", ret); + goto err_rxbuf_cleanup; + } + + /* give our connected endpoints some buffers */ + ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep); + ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]); + + ret = ath6kl_cfg80211_init(ar); + if (ret) + goto err_rxbuf_cleanup; + + ret = ath6kl_debug_init_fs(ar); + if (ret) { + wiphy_unregister(ar->wiphy); + goto err_rxbuf_cleanup; + } + + for (i = 0; i < ar->vif_max; i++) + ar->avail_idx_map |= BIT(i); + + rtnl_lock(); + wiphy_lock(ar->wiphy); + + /* Add an initial station interface */ + wdev = ath6kl_interface_add(ar, "wlan%d", NET_NAME_ENUM, + NL80211_IFTYPE_STATION, 0, INFRA_NETWORK); + + wiphy_unlock(ar->wiphy); + rtnl_unlock(); + + if (!wdev) { + ath6kl_err("Failed to instantiate a network device\n"); + ret = -ENOMEM; + wiphy_unregister(ar->wiphy); + goto err_rxbuf_cleanup; + } + + ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n", + __func__, wdev->netdev->name, wdev->netdev, ar); + + ar->fw_recovery.enable = !!recovery_enable; + if (!ar->fw_recovery.enable) + return ret; + + if (heart_beat_poll && + test_bit(ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL, + ar->fw_capabilities)) + ar->fw_recovery.hb_poll = heart_beat_poll; + + ath6kl_recovery_init(ar); + + return ret; + +err_rxbuf_cleanup: + ath6kl_debug_cleanup(ar); + ath6kl_htc_flush_rx_buf(ar->htc_target); + ath6kl_cleanup_amsdu_rxbufs(ar); + ath6kl_wmi_shutdown(ar->wmi); + clear_bit(WMI_ENABLED, &ar->flag); + ar->wmi = NULL; +err_htc_cleanup: + ath6kl_htc_cleanup(ar->htc_target); +err_power_off: + ath6kl_hif_power_off(ar); +err_bmi_cleanup: + ath6kl_bmi_cleanup(ar); +err_wq: + destroy_workqueue(ar->ath6kl_wq); + + return ret; +} +EXPORT_SYMBOL(ath6kl_core_init); + +struct ath6kl *ath6kl_core_create(struct device *dev) +{ + struct ath6kl *ar; + u8 ctr; + + ar = ath6kl_cfg80211_create(); + if (!ar) + return NULL; + + ar->p2p = !!ath6kl_p2p; + ar->dev = dev; + + ar->vif_max = 1; + + ar->max_norm_iface = 1; + + spin_lock_init(&ar->lock); + spin_lock_init(&ar->mcastpsq_lock); + spin_lock_init(&ar->list_lock); + + init_waitqueue_head(&ar->event_wq); + sema_init(&ar->sem, 1); + + INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue); + INIT_LIST_HEAD(&ar->vif_list); + + clear_bit(WMI_ENABLED, &ar->flag); + clear_bit(SKIP_SCAN, &ar->flag); + clear_bit(DESTROY_IN_PROGRESS, &ar->flag); + + ar->tx_pwr = 0; + ar->intra_bss = 1; + ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD; + + ar->state = ATH6KL_STATE_OFF; + + memset((u8 *)ar->sta_list, 0, + AP_MAX_NUM_STA * sizeof(struct ath6kl_sta)); + + /* Init the PS queues */ + for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { + spin_lock_init(&ar->sta_list[ctr].psq_lock); + skb_queue_head_init(&ar->sta_list[ctr].psq); + skb_queue_head_init(&ar->sta_list[ctr].apsdq); + ar->sta_list[ctr].mgmt_psq_len = 0; + INIT_LIST_HEAD(&ar->sta_list[ctr].mgmt_psq); + ar->sta_list[ctr].aggr_conn = + kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL); + if (!ar->sta_list[ctr].aggr_conn) { + ath6kl_err("Failed to allocate memory for sta aggregation information\n"); + ath6kl_core_destroy(ar); + return NULL; + } + } + + skb_queue_head_init(&ar->mcastpsq); + + memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3); + + return ar; +} +EXPORT_SYMBOL(ath6kl_core_create); + +void ath6kl_core_cleanup(struct ath6kl *ar) +{ + ath6kl_hif_power_off(ar); + + ath6kl_recovery_cleanup(ar); + + destroy_workqueue(ar->ath6kl_wq); + + if (ar->htc_target) + ath6kl_htc_cleanup(ar->htc_target); + + ath6kl_cookie_cleanup(ar); + + ath6kl_cleanup_amsdu_rxbufs(ar); + + ath6kl_bmi_cleanup(ar); + + ath6kl_debug_cleanup(ar); + + kfree(ar->fw_board); + kfree(ar->fw_otp); + vfree(ar->fw); + kfree(ar->fw_patch); + kfree(ar->fw_testscript); + + ath6kl_cfg80211_cleanup(ar); +} +EXPORT_SYMBOL(ath6kl_core_cleanup); + +void ath6kl_core_destroy(struct ath6kl *ar) +{ + ath6kl_cfg80211_destroy(ar); +} +EXPORT_SYMBOL(ath6kl_core_destroy); + +MODULE_AUTHOR("Qualcomm Atheros"); +MODULE_DESCRIPTION("Core module for AR600x SDIO and USB devices."); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h new file mode 100644 index 000000000..77e052336 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/core.h @@ -0,0 +1,992 @@ +/* + * Copyright (c) 2010-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef CORE_H +#define CORE_H + +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/firmware.h> +#include <linux/sched.h> +#include <linux/circ_buf.h> +#include <net/cfg80211.h> +#include "htc.h" +#include "wmi.h" +#include "bmi.h" +#include "target.h" + +#define MAX_ATH6KL 1 +#define ATH6KL_MAX_RX_BUFFERS 16 +#define ATH6KL_BUFFER_SIZE 1664 +#define ATH6KL_MAX_AMSDU_RX_BUFFERS 4 +#define ATH6KL_AMSDU_REFILL_THRESHOLD 3 +#define ATH6KL_AMSDU_BUFFER_SIZE (WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH + 128) +#define MAX_MSDU_SUBFRAME_PAYLOAD_LEN 1508 +#define MIN_MSDU_SUBFRAME_PAYLOAD_LEN 46 + +#define USER_SAVEDKEYS_STAT_INIT 0 +#define USER_SAVEDKEYS_STAT_RUN 1 + +#define ATH6KL_TX_TIMEOUT 10 +#define ATH6KL_MAX_ENDPOINTS 4 +#define MAX_NODE_NUM 15 + +#define ATH6KL_APSD_ALL_FRAME 0xFFFF +#define ATH6KL_APSD_NUM_OF_AC 0x4 +#define ATH6KL_APSD_FRAME_MASK 0xF + +/* Extra bytes for htc header alignment */ +#define ATH6KL_HTC_ALIGN_BYTES 3 + +/* MAX_HI_COOKIE_NUM are reserved for high priority traffic */ +#define MAX_DEF_COOKIE_NUM 180 +#define MAX_HI_COOKIE_NUM 18 /* 10% of MAX_COOKIE_NUM */ +#define MAX_COOKIE_NUM (MAX_DEF_COOKIE_NUM + MAX_HI_COOKIE_NUM) + +#define MAX_DEFAULT_SEND_QUEUE_DEPTH (MAX_DEF_COOKIE_NUM / WMM_NUM_AC) + +#define DISCON_TIMER_INTVAL 10000 /* in msec */ + +/* Channel dwell time in fg scan */ +#define ATH6KL_FG_SCAN_INTERVAL 50 /* in ms */ + +/* includes also the null byte */ +#define ATH6KL_FIRMWARE_MAGIC "QCA-ATH6KL" + +enum ath6kl_fw_ie_type { + ATH6KL_FW_IE_FW_VERSION = 0, + ATH6KL_FW_IE_TIMESTAMP = 1, + ATH6KL_FW_IE_OTP_IMAGE = 2, + ATH6KL_FW_IE_FW_IMAGE = 3, + ATH6KL_FW_IE_PATCH_IMAGE = 4, + ATH6KL_FW_IE_RESERVED_RAM_SIZE = 5, + ATH6KL_FW_IE_CAPABILITIES = 6, + ATH6KL_FW_IE_PATCH_ADDR = 7, + ATH6KL_FW_IE_BOARD_ADDR = 8, + ATH6KL_FW_IE_VIF_MAX = 9, +}; + +enum ath6kl_fw_capability { + ATH6KL_FW_CAPABILITY_HOST_P2P = 0, + ATH6KL_FW_CAPABILITY_SCHED_SCAN = 1, + + /* + * Firmware is capable of supporting P2P mgmt operations on a + * station interface. After group formation, the station + * interface will become a P2P client/GO interface as the case may be + */ + ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, + + /* + * Firmware has support to cleanup inactive stations + * in AP mode. + */ + ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT, + + /* Firmware has support to override rsn cap of rsn ie */ + ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, + + /* + * Multicast support in WOW and host awake mode. + * Allow all multicast in host awake mode. + * Apply multicast filter in WOW mode. + */ + ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER, + + /* Firmware supports enhanced bmiss detection */ + ATH6KL_FW_CAPABILITY_BMISS_ENHANCE, + + /* + * FW supports matching of ssid in schedule scan + */ + ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST, + + /* Firmware supports filtering BSS results by RSSI */ + ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD, + + /* FW sets mac_addr[4] ^= 0x80 for newly created interfaces */ + ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR, + + /* Firmware supports TX error rate notification */ + ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, + + /* supports WMI_SET_REGDOMAIN_CMDID command */ + ATH6KL_FW_CAPABILITY_REGDOMAIN, + + /* Firmware supports sched scan decoupled from host sleep */ + ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, + + /* + * Firmware capability for hang detection through heart beat + * challenge messages. + */ + ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL, + + /* WMI_SET_TX_SELECT_RATES_CMDID uses 64 bit size rate table */ + ATH6KL_FW_CAPABILITY_64BIT_RATES, + + /* WMI_AP_CONN_INACT_CMDID uses minutes as units */ + ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS, + + /* use low priority endpoint for all data */ + ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT, + + /* ratetable is the 2 stream version (max MCS15) */ + ATH6KL_FW_CAPABILITY_RATETABLE_MCS15, + + /* firmware doesn't support IP checksumming */ + ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM, + + /* this needs to be last */ + ATH6KL_FW_CAPABILITY_MAX, +}; + +#define ATH6KL_CAPABILITY_LEN (ALIGN(ATH6KL_FW_CAPABILITY_MAX, 32) / 32) + +struct ath6kl_fw_ie { + __le32 id; + __le32 len; + u8 data[]; +}; + +enum ath6kl_hw_flags { + ATH6KL_HW_SDIO_CRC_ERROR_WAR = BIT(3), +}; + +#define ATH6KL_FW_API2_FILE "fw-2.bin" +#define ATH6KL_FW_API3_FILE "fw-3.bin" +#define ATH6KL_FW_API4_FILE "fw-4.bin" +#define ATH6KL_FW_API5_FILE "fw-5.bin" + +/* AR6003 1.0 definitions */ +#define AR6003_HW_1_0_VERSION 0x300002ba + +/* AR6003 2.0 definitions */ +#define AR6003_HW_2_0_VERSION 0x30000384 +#define AR6003_HW_2_0_PATCH_DOWNLOAD_ADDRESS 0x57e910 +#define AR6003_HW_2_0_FW_DIR "ath6k/AR6003/hw2.0" +#define AR6003_HW_2_0_OTP_FILE "otp.bin.z77" +#define AR6003_HW_2_0_FIRMWARE_FILE "athwlan.bin.z77" +#define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "athtcmd_ram.bin" +#define AR6003_HW_2_0_PATCH_FILE "data.patch.bin" +#define AR6003_HW_2_0_BOARD_DATA_FILE AR6003_HW_2_0_FW_DIR "/bdata.bin" +#define AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE \ + AR6003_HW_2_0_FW_DIR "/bdata.SD31.bin" + +/* AR6003 3.0 definitions */ +#define AR6003_HW_2_1_1_VERSION 0x30000582 +#define AR6003_HW_2_1_1_FW_DIR "ath6k/AR6003/hw2.1.1" +#define AR6003_HW_2_1_1_OTP_FILE "otp.bin" +#define AR6003_HW_2_1_1_FIRMWARE_FILE "athwlan.bin" +#define AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE "athtcmd_ram.bin" +#define AR6003_HW_2_1_1_UTF_FIRMWARE_FILE "utf.bin" +#define AR6003_HW_2_1_1_TESTSCRIPT_FILE "nullTestFlow.bin" +#define AR6003_HW_2_1_1_PATCH_FILE "data.patch.bin" +#define AR6003_HW_2_1_1_BOARD_DATA_FILE AR6003_HW_2_1_1_FW_DIR "/bdata.bin" +#define AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE \ + AR6003_HW_2_1_1_FW_DIR "/bdata.SD31.bin" + +/* AR6004 1.0 definitions */ +#define AR6004_HW_1_0_VERSION 0x30000623 +#define AR6004_HW_1_0_FW_DIR "ath6k/AR6004/hw1.0" +#define AR6004_HW_1_0_FIRMWARE_FILE "fw.ram.bin" +#define AR6004_HW_1_0_BOARD_DATA_FILE AR6004_HW_1_0_FW_DIR "/bdata.bin" +#define AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE \ + AR6004_HW_1_0_FW_DIR "/bdata.DB132.bin" + +/* AR6004 1.1 definitions */ +#define AR6004_HW_1_1_VERSION 0x30000001 +#define AR6004_HW_1_1_FW_DIR "ath6k/AR6004/hw1.1" +#define AR6004_HW_1_1_FIRMWARE_FILE "fw.ram.bin" +#define AR6004_HW_1_1_BOARD_DATA_FILE AR6004_HW_1_1_FW_DIR "/bdata.bin" +#define AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE \ + AR6004_HW_1_1_FW_DIR "/bdata.DB132.bin" + +/* AR6004 1.2 definitions */ +#define AR6004_HW_1_2_VERSION 0x300007e8 +#define AR6004_HW_1_2_FW_DIR "ath6k/AR6004/hw1.2" +#define AR6004_HW_1_2_FIRMWARE_FILE "fw.ram.bin" +#define AR6004_HW_1_2_BOARD_DATA_FILE AR6004_HW_1_2_FW_DIR "/bdata.bin" +#define AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE \ + AR6004_HW_1_2_FW_DIR "/bdata.bin" + +/* AR6004 1.3 definitions */ +#define AR6004_HW_1_3_VERSION 0x31c8088a +#define AR6004_HW_1_3_FW_DIR "ath6k/AR6004/hw1.3" +#define AR6004_HW_1_3_FIRMWARE_FILE "fw.ram.bin" +#define AR6004_HW_1_3_TCMD_FIRMWARE_FILE "utf.bin" +#define AR6004_HW_1_3_UTF_FIRMWARE_FILE "utf.bin" +#define AR6004_HW_1_3_TESTSCRIPT_FILE "nullTestFlow.bin" +#define AR6004_HW_1_3_BOARD_DATA_FILE AR6004_HW_1_3_FW_DIR "/bdata.bin" +#define AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE AR6004_HW_1_3_FW_DIR "/bdata.bin" + +/* AR6004 3.0 definitions */ +#define AR6004_HW_3_0_VERSION 0x31C809F8 +#define AR6004_HW_3_0_FW_DIR "ath6k/AR6004/hw3.0" +#define AR6004_HW_3_0_FIRMWARE_FILE "fw.ram.bin" +#define AR6004_HW_3_0_TCMD_FIRMWARE_FILE "utf.bin" +#define AR6004_HW_3_0_UTF_FIRMWARE_FILE "utf.bin" +#define AR6004_HW_3_0_TESTSCRIPT_FILE "nullTestFlow.bin" +#define AR6004_HW_3_0_BOARD_DATA_FILE AR6004_HW_3_0_FW_DIR "/bdata.bin" +#define AR6004_HW_3_0_DEFAULT_BOARD_DATA_FILE AR6004_HW_3_0_FW_DIR "/bdata.bin" + +/* Per STA data, used in AP mode */ +#define STA_PS_AWAKE BIT(0) +#define STA_PS_SLEEP BIT(1) +#define STA_PS_POLLED BIT(2) +#define STA_PS_APSD_TRIGGER BIT(3) +#define STA_PS_APSD_EOSP BIT(4) + +/* HTC TX packet tagging definitions */ +#define ATH6KL_CONTROL_PKT_TAG HTC_TX_PACKET_TAG_USER_DEFINED +#define ATH6KL_DATA_PKT_TAG (ATH6KL_CONTROL_PKT_TAG + 1) + +#define AR6003_CUST_DATA_SIZE 16 + +#define AGGR_WIN_IDX(x, y) ((x) % (y)) +#define AGGR_INCR_IDX(x, y) AGGR_WIN_IDX(((x) + 1), (y)) +#define AGGR_DCRM_IDX(x, y) AGGR_WIN_IDX(((x) - 1), (y)) +#define ATH6KL_MAX_SEQ_NO 0xFFF +#define ATH6KL_NEXT_SEQ_NO(x) (((x) + 1) & ATH6KL_MAX_SEQ_NO) + +#define NUM_OF_TIDS 8 +#define AGGR_SZ_DEFAULT 8 + +#define AGGR_WIN_SZ_MIN 2 +#define AGGR_WIN_SZ_MAX 8 + +#define TID_WINDOW_SZ(_x) ((_x) << 1) + +#define AGGR_NUM_OF_FREE_NETBUFS 16 + +#define AGGR_RX_TIMEOUT 100 /* in ms */ + +#define WMI_TIMEOUT (2 * HZ) + +#define MBOX_YIELD_LIMIT 99 + +#define ATH6KL_DEFAULT_LISTEN_INTVAL 100 /* in TUs */ +#define ATH6KL_DEFAULT_BMISS_TIME 1500 +#define ATH6KL_MAX_WOW_LISTEN_INTL 300 /* in TUs */ +#define ATH6KL_MAX_BMISS_TIME 5000 + +/* configuration lags */ +/* + * ATH6KL_CONF_IGNORE_ERP_BARKER: Ignore the barker premable in + * ERP IE of beacon to determine the short premable support when + * sending (Re)Assoc req. + * ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN: Don't send the power + * module state transition failure events which happen during + * scan, to the host. + */ +#define ATH6KL_CONF_IGNORE_ERP_BARKER BIT(0) +#define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1) +#define ATH6KL_CONF_ENABLE_11N BIT(2) +#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3) +#define ATH6KL_CONF_UART_DEBUG BIT(4) + +#define P2P_WILDCARD_SSID_LEN 7 /* DIRECT- */ + +enum wlan_low_pwr_state { + WLAN_POWER_STATE_ON, + WLAN_POWER_STATE_CUT_PWR, + WLAN_POWER_STATE_DEEP_SLEEP, + WLAN_POWER_STATE_WOW +}; + +enum sme_state { + SME_DISCONNECTED, + SME_CONNECTING, + SME_CONNECTED +}; + +struct skb_hold_q { + struct sk_buff *skb; + bool is_amsdu; + u16 seq_no; +}; + +struct rxtid { + bool aggr; + bool timer_mon; + u16 win_sz; + u16 seq_next; + u32 hold_q_sz; + struct skb_hold_q *hold_q; + struct sk_buff_head q; + + /* + * lock mainly protects seq_next and hold_q. Movement of seq_next + * needs to be protected between aggr_timeout() and + * aggr_process_recv_frm(). hold_q will be holding the pending + * reorder frames and it's access should also be protected. + * Some of the other fields like hold_q_sz, win_sz and aggr are + * initialized/reset when receiving addba/delba req, also while + * deleting aggr state all the pending buffers are flushed before + * resetting these fields, so there should not be any race in accessing + * these fields. + */ + spinlock_t lock; +}; + +struct rxtid_stats { + u32 num_into_aggr; + u32 num_dups; + u32 num_oow; + u32 num_mpdu; + u32 num_amsdu; + u32 num_delivered; + u32 num_timeouts; + u32 num_hole; + u32 num_bar; +}; + +struct aggr_info_conn { + u8 aggr_sz; + u8 timer_scheduled; + struct timer_list timer; + struct net_device *dev; + struct rxtid rx_tid[NUM_OF_TIDS]; + struct rxtid_stats stat[NUM_OF_TIDS]; + struct aggr_info *aggr_info; +}; + +struct aggr_info { + struct aggr_info_conn *aggr_conn; + struct sk_buff_head rx_amsdu_freeq; +}; + +struct ath6kl_wep_key { + u8 key_index; + u8 key_len; + u8 key[64]; +}; + +#define ATH6KL_KEY_SEQ_LEN 8 + +struct ath6kl_key { + u8 key[WLAN_MAX_KEY_LEN]; + u8 key_len; + u8 seq[ATH6KL_KEY_SEQ_LEN]; + u8 seq_len; + u32 cipher; +}; + +struct ath6kl_node_mapping { + u8 mac_addr[ETH_ALEN]; + u8 ep_id; + u8 tx_pend; +}; + +struct ath6kl_cookie { + struct sk_buff *skb; + u32 map_no; + struct htc_packet htc_pkt; + struct ath6kl_cookie *arc_list_next; +}; + +struct ath6kl_mgmt_buff { + struct list_head list; + u32 freq; + u32 wait; + u32 id; + bool no_cck; + size_t len; + u8 buf[]; +}; + +struct ath6kl_sta { + u16 sta_flags; + u8 mac[ETH_ALEN]; + u8 aid; + u8 keymgmt; + u8 ucipher; + u8 auth; + u8 wpa_ie[ATH6KL_MAX_IE]; + struct sk_buff_head psq; + + /* protects psq, mgmt_psq, apsdq, and mgmt_psq_len fields */ + spinlock_t psq_lock; + + struct list_head mgmt_psq; + size_t mgmt_psq_len; + u8 apsd_info; + struct sk_buff_head apsdq; + struct aggr_info_conn *aggr_conn; +}; + +struct ath6kl_version { + u32 target_ver; + u32 wlan_ver; + u32 abi_ver; +}; + +struct ath6kl_bmi { + u32 cmd_credits; + bool done_sent; + u8 *cmd_buf; + u32 max_data_size; + u32 max_cmd_size; +}; + +struct target_stats { + u64 tx_pkt; + u64 tx_byte; + u64 tx_ucast_pkt; + u64 tx_ucast_byte; + u64 tx_mcast_pkt; + u64 tx_mcast_byte; + u64 tx_bcast_pkt; + u64 tx_bcast_byte; + u64 tx_rts_success_cnt; + u64 tx_pkt_per_ac[4]; + + u64 tx_err; + u64 tx_fail_cnt; + u64 tx_retry_cnt; + u64 tx_mult_retry_cnt; + u64 tx_rts_fail_cnt; + + u64 rx_pkt; + u64 rx_byte; + u64 rx_ucast_pkt; + u64 rx_ucast_byte; + u64 rx_mcast_pkt; + u64 rx_mcast_byte; + u64 rx_bcast_pkt; + u64 rx_bcast_byte; + u64 rx_frgment_pkt; + + u64 rx_err; + u64 rx_crc_err; + u64 rx_key_cache_miss; + u64 rx_decrypt_err; + u64 rx_dupl_frame; + + u64 tkip_local_mic_fail; + u64 tkip_cnter_measures_invoked; + u64 tkip_replays; + u64 tkip_fmt_err; + u64 ccmp_fmt_err; + u64 ccmp_replays; + + u64 pwr_save_fail_cnt; + + u64 cs_bmiss_cnt; + u64 cs_low_rssi_cnt; + u64 cs_connect_cnt; + u64 cs_discon_cnt; + + s32 tx_ucast_rate; + s32 rx_ucast_rate; + + u32 lq_val; + + u32 wow_pkt_dropped; + u16 wow_evt_discarded; + + s16 noise_floor_calib; + s16 cs_rssi; + s16 cs_ave_beacon_rssi; + u8 cs_ave_beacon_snr; + u8 cs_last_roam_msec; + u8 cs_snr; + + u8 wow_host_pkt_wakeups; + u8 wow_host_evt_wakeups; + + u32 arp_received; + u32 arp_matched; + u32 arp_replied; +}; + +struct ath6kl_mbox_info { + u32 htc_addr; + u32 htc_ext_addr; + u32 htc_ext_sz; + + u32 block_size; + + u32 gmbox_addr; + + u32 gmbox_sz; +}; + +/* + * 802.11i defines an extended IV for use with non-WEP ciphers. + * When the EXTIV bit is set in the key id byte an additional + * 4 bytes immediately follow the IV for TKIP. For CCMP the + * EXTIV bit is likewise set but the 8 bytes represent the + * CCMP header rather than IV+extended-IV. + */ + +#define ATH6KL_KEYBUF_SIZE 16 +#define ATH6KL_MICBUF_SIZE (8+8) /* space for both tx and rx */ + +#define ATH6KL_KEY_XMIT 0x01 +#define ATH6KL_KEY_RECV 0x02 +#define ATH6KL_KEY_DEFAULT 0x80 /* default xmit key */ + +/* Initial group key for AP mode */ +struct ath6kl_req_key { + bool valid; + u8 key_index; + int key_type; + u8 key[WLAN_MAX_KEY_LEN]; + u8 key_len; +}; + +enum ath6kl_hif_type { + ATH6KL_HIF_TYPE_SDIO, + ATH6KL_HIF_TYPE_USB, +}; + +enum ath6kl_htc_type { + ATH6KL_HTC_TYPE_MBOX, + ATH6KL_HTC_TYPE_PIPE, +}; + +/* Max number of filters that hw supports */ +#define ATH6K_MAX_MC_FILTERS_PER_LIST 7 +struct ath6kl_mc_filter { + struct list_head list; + char hw_addr[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE]; +}; + +struct ath6kl_htcap { + bool ht_enable; + u8 ampdu_factor; + unsigned short cap_info; +}; + +/* + * Driver's maximum limit, note that some firmwares support only one vif + * and the runtime (current) limit must be checked from ar->vif_max. + */ +#define ATH6KL_VIF_MAX 3 + +/* vif flags info */ +enum ath6kl_vif_state { + CONNECTED, + CONNECT_PEND, + WMM_ENABLED, + NETQ_STOPPED, + DTIM_EXPIRED, + CLEAR_BSSFILTER_ON_BEACON, + DTIM_PERIOD_AVAIL, + WLAN_ENABLED, + STATS_UPDATE_PEND, + HOST_SLEEP_MODE_CMD_PROCESSED, + NETDEV_MCAST_ALL_ON, + NETDEV_MCAST_ALL_OFF, + SCHED_SCANNING, +}; + +struct ath6kl_vif { + struct list_head list; + struct wireless_dev wdev; + struct net_device *ndev; + struct ath6kl *ar; + /* Lock to protect vif specific net_stats and flags */ + spinlock_t if_lock; + u8 fw_vif_idx; + unsigned long flags; + int ssid_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 dot11_auth_mode; + u8 auth_mode; + u8 prwise_crypto; + u8 prwise_crypto_len; + u8 grp_crypto; + u8 grp_crypto_len; + u8 def_txkey_index; + u8 next_mode; + u8 nw_type; + u8 bssid[ETH_ALEN]; + u8 req_bssid[ETH_ALEN]; + u16 ch_hint; + u16 bss_ch; + struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; + struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1]; + struct aggr_info *aggr_cntxt; + struct ath6kl_htcap htcap[NUM_NL80211_BANDS]; + + struct timer_list disconnect_timer; + struct timer_list sched_scan_timer; + + struct cfg80211_scan_request *scan_req; + enum sme_state sme_state; + int reconnect_flag; + u32 last_roc_id; + u32 last_cancel_roc_id; + u32 send_action_id; + bool probe_req_report; + u16 assoc_bss_beacon_int; + u16 listen_intvl_t; + u16 bmiss_time_t; + u32 txe_intvl; + u16 bg_scan_period; + u8 assoc_bss_dtim_period; + struct target_stats target_stats; + struct wmi_connect_cmd profile; + u16 rsn_capab; + + struct list_head mc_filter; +}; + +static inline struct ath6kl_vif *ath6kl_vif_from_wdev(struct wireless_dev *wdev) +{ + return container_of(wdev, struct ath6kl_vif, wdev); +} + +#define WOW_LIST_ID 0 +#define WOW_HOST_REQ_DELAY 500 /* ms */ + +#define ATH6KL_SCHED_SCAN_RESULT_DELAY 5000 /* ms */ + +/* Flag info */ +enum ath6kl_dev_state { + WMI_ENABLED, + WMI_READY, + WMI_CTRL_EP_FULL, + TESTMODE, + DESTROY_IN_PROGRESS, + SKIP_SCAN, + ROAM_TBL_PEND, + FIRST_BOOT, + RECOVERY_CLEANUP, +}; + +enum ath6kl_state { + ATH6KL_STATE_OFF, + ATH6KL_STATE_ON, + ATH6KL_STATE_SUSPENDING, + ATH6KL_STATE_RESUMING, + ATH6KL_STATE_DEEPSLEEP, + ATH6KL_STATE_CUTPOWER, + ATH6KL_STATE_WOW, + ATH6KL_STATE_RECOVERY, +}; + +/* Fw error recovery */ +#define ATH6KL_HB_RESP_MISS_THRES 5 + +enum ath6kl_fw_err { + ATH6KL_FW_ASSERT, + ATH6KL_FW_HB_RESP_FAILURE, + ATH6KL_FW_EP_FULL, +}; + +struct ath6kl { + struct device *dev; + struct wiphy *wiphy; + + enum ath6kl_state state; + unsigned int testmode; + + struct ath6kl_bmi bmi; + const struct ath6kl_hif_ops *hif_ops; + const struct ath6kl_htc_ops *htc_ops; + struct wmi *wmi; + int tx_pending[ENDPOINT_MAX]; + int total_tx_data_pend; + struct htc_target *htc_target; + enum ath6kl_hif_type hif_type; + void *hif_priv; + struct list_head vif_list; + /* Lock to avoid race in vif_list entries among add/del/traverse */ + spinlock_t list_lock; + u8 num_vif; + unsigned int vif_max; + u8 max_norm_iface; + u8 avail_idx_map; + + /* + * Protects at least amsdu_rx_buffer_queue, ath6kl_alloc_cookie() + * calls, tx_pending and total_tx_data_pend. + */ + spinlock_t lock; + + struct semaphore sem; + u8 lrssi_roam_threshold; + struct ath6kl_version version; + u32 target_type; + u8 tx_pwr; + struct ath6kl_node_mapping node_map[MAX_NODE_NUM]; + u8 ibss_ps_enable; + bool ibss_if_active; + u8 node_num; + u8 next_ep_id; + struct ath6kl_cookie *cookie_list; + u32 cookie_count; + enum htc_endpoint_id ac2ep_map[WMM_NUM_AC]; + bool ac_stream_active[WMM_NUM_AC]; + u8 ac_stream_pri_map[WMM_NUM_AC]; + u8 hiac_stream_active_pri; + u8 ep2ac_map[ENDPOINT_MAX]; + enum htc_endpoint_id ctrl_ep; + struct ath6kl_htc_credit_info credit_state_info; + u32 connect_ctrl_flags; + u32 user_key_ctrl; + u8 usr_bss_filter; + struct ath6kl_sta sta_list[AP_MAX_NUM_STA]; + u8 sta_list_index; + struct ath6kl_req_key ap_mode_bkey; + struct sk_buff_head mcastpsq; + u32 want_ch_switch; + u16 last_ch; + + /* + * FIXME: protects access to mcastpsq but is actually useless as + * all skbe_queue_*() functions provide serialisation themselves + */ + spinlock_t mcastpsq_lock; + + u8 intra_bss; + struct wmi_ap_mode_stat ap_stats; + u8 ap_country_code[3]; + struct list_head amsdu_rx_buffer_queue; + u8 rx_meta_ver; + enum wlan_low_pwr_state wlan_pwr_state; + u8 mac_addr[ETH_ALEN]; +#define AR_MCAST_FILTER_MAC_ADDR_SIZE 4 + struct { + void *rx_report; + size_t rx_report_len; + } tm; + + struct ath6kl_hw { + u32 id; + const char *name; + u32 dataset_patch_addr; + u32 app_load_addr; + u32 app_start_override_addr; + u32 board_ext_data_addr; + u32 reserved_ram_size; + u32 board_addr; + u32 refclk_hz; + u32 uarttx_pin; + u32 uarttx_rate; + u32 testscript_addr; + u8 tx_ant; + u8 rx_ant; + enum wmi_phy_cap cap; + + u32 flags; + + struct ath6kl_hw_fw { + const char *dir; + const char *otp; + const char *fw; + const char *tcmd; + const char *patch; + const char *utf; + const char *testscript; + } fw; + + const char *fw_board; + const char *fw_default_board; + } hw; + + u16 conf_flags; + u16 suspend_mode; + u16 wow_suspend_mode; + wait_queue_head_t event_wq; + struct ath6kl_mbox_info mbox_info; + + struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM]; + unsigned long flag; + + u8 *fw_board; + size_t fw_board_len; + + u8 *fw_otp; + size_t fw_otp_len; + + u8 *fw; + size_t fw_len; + + u8 *fw_patch; + size_t fw_patch_len; + + u8 *fw_testscript; + size_t fw_testscript_len; + + unsigned int fw_api; + unsigned long fw_capabilities[ATH6KL_CAPABILITY_LEN]; + + struct workqueue_struct *ath6kl_wq; + + struct dentry *debugfs_phy; + + bool p2p; + + bool wiphy_registered; + + struct ath6kl_fw_recovery { + struct work_struct recovery_work; + unsigned long err_reason; + unsigned long hb_poll; + struct timer_list hb_timer; + u32 seq_num; + bool hb_pending; + u8 hb_misscnt; + bool enable; + } fw_recovery; + +#ifdef CONFIG_ATH6KL_DEBUG + struct { + struct sk_buff_head fwlog_queue; + struct completion fwlog_completion; + bool fwlog_open; + + u32 fwlog_mask; + + unsigned int dbgfs_diag_reg; + u32 diag_reg_addr_wr; + u32 diag_reg_val_wr; + + struct { + unsigned int invalid_rate; + } war_stats; + + u8 *roam_tbl; + unsigned int roam_tbl_len; + + u8 keepalive; + u8 disc_timeout; + } debug; +#endif /* CONFIG_ATH6KL_DEBUG */ +}; + +static inline struct ath6kl *ath6kl_priv(struct net_device *dev) +{ + return ((struct ath6kl_vif *) netdev_priv(dev))->ar; +} + +static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar, + u32 item_offset) +{ + u32 addr = 0; + + if (ar->target_type == TARGET_TYPE_AR6003) + addr = ATH6KL_AR6003_HI_START_ADDR + item_offset; + else if (ar->target_type == TARGET_TYPE_AR6004) + addr = ATH6KL_AR6004_HI_START_ADDR + item_offset; + + return addr; +} + +int ath6kl_configure_target(struct ath6kl *ar); +void ath6kl_detect_error(unsigned long ptr); +void disconnect_timer_handler(struct timer_list *t); +void init_netdev(struct net_device *dev); +void ath6kl_cookie_init(struct ath6kl *ar); +void ath6kl_cookie_cleanup(struct ath6kl *ar); +void ath6kl_rx(struct htc_target *target, struct htc_packet *packet); +void ath6kl_tx_complete(struct htc_target *context, + struct list_head *packet_queue); +enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, + struct htc_packet *packet); +void ath6kl_stop_txrx(struct ath6kl *ar); +void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar); +int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value); +int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length); +int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value); +int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length); +int ath6kl_read_fwlogs(struct ath6kl *ar); +void ath6kl_init_profile_info(struct ath6kl_vif *vif); +void ath6kl_tx_data_cleanup(struct ath6kl *ar); + +struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar); +void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie); +netdev_tx_t ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev); + +struct aggr_info *aggr_init(struct ath6kl_vif *vif); +void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info, + struct aggr_info_conn *aggr_conn); +void ath6kl_rx_refill(struct htc_target *target, + enum htc_endpoint_id endpoint); +void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count); +struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target, + enum htc_endpoint_id endpoint, + int len); +void aggr_module_destroy(struct aggr_info *aggr_info); +void aggr_reset_state(struct aggr_info_conn *aggr_conn); + +struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr); +struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid); + +void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver, + enum wmi_phy_cap cap); +int ath6kl_control_tx(void *devt, struct sk_buff *skb, + enum htc_endpoint_id eid); +void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, + u8 *bssid, u16 listen_int, + u16 beacon_int, enum network_type net_type, + u8 beacon_ie_len, u8 assoc_req_len, + u8 assoc_resp_len, u8 *assoc_info); +void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel); +void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, + u8 keymgmt, u8 ucipher, u8 auth, + u8 assoc_req_len, u8 *assoc_info, u8 apsd_info); +void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, + u8 *bssid, u8 assoc_resp_len, + u8 *assoc_info, u16 prot_reason_status); +void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast); +void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr); +void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status); +void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len); +void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active); +enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac); + +void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid); + +void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif); +void ath6kl_disconnect(struct ath6kl_vif *vif); +void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid); +void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no, + u8 win_sz); +void ath6kl_wakeup_event(void *dev); + +void ath6kl_init_control_info(struct ath6kl_vif *vif); +struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar); +void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready); +int ath6kl_init_hw_start(struct ath6kl *ar); +int ath6kl_init_hw_stop(struct ath6kl *ar); +int ath6kl_init_fetch_firmwares(struct ath6kl *ar); +int ath6kl_init_hw_params(struct ath6kl *ar); + +void ath6kl_check_wow_status(struct ath6kl *ar); + +void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb); +void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe); + +struct ath6kl *ath6kl_core_create(struct device *dev); +int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type); +void ath6kl_core_cleanup(struct ath6kl *ar); +void ath6kl_core_destroy(struct ath6kl *ar); + +/* Fw error recovery */ +void ath6kl_init_hw_restart(struct ath6kl *ar); +void ath6kl_recovery_err_notify(struct ath6kl *ar, enum ath6kl_fw_err reason); +void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie); +void ath6kl_recovery_init(struct ath6kl *ar); +void ath6kl_recovery_cleanup(struct ath6kl *ar); +void ath6kl_recovery_suspend(struct ath6kl *ar); +void ath6kl_recovery_resume(struct ath6kl *ar); +#endif /* CORE_H */ diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c new file mode 100644 index 000000000..433a047f3 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/debug.c @@ -0,0 +1,1873 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "core.h" + +#include <linux/skbuff.h> +#include <linux/fs.h> +#include <linux/vmalloc.h> +#include <linux/export.h> + +#include "debug.h" +#include "target.h" + +struct ath6kl_fwlog_slot { + __le32 timestamp; + __le32 length; + + /* max ATH6KL_FWLOG_PAYLOAD_SIZE bytes */ + u8 payload[]; +}; + +#define ATH6KL_FWLOG_MAX_ENTRIES 20 + +#define ATH6KL_FWLOG_VALID_MASK 0x1ffff + +void ath6kl_printk(const char *level, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + printk("%sath6kl: %pV", level, &vaf); + + va_end(args); +} +EXPORT_SYMBOL(ath6kl_printk); + +void ath6kl_info(const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + ath6kl_printk(KERN_INFO, "%pV", &vaf); + trace_ath6kl_log_info(&vaf); + va_end(args); +} +EXPORT_SYMBOL(ath6kl_info); + +void ath6kl_err(const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + ath6kl_printk(KERN_ERR, "%pV", &vaf); + trace_ath6kl_log_err(&vaf); + va_end(args); +} +EXPORT_SYMBOL(ath6kl_err); + +void ath6kl_warn(const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + ath6kl_printk(KERN_WARNING, "%pV", &vaf); + trace_ath6kl_log_warn(&vaf); + va_end(args); +} +EXPORT_SYMBOL(ath6kl_warn); + +int ath6kl_read_tgt_stats(struct ath6kl *ar, struct ath6kl_vif *vif) +{ + long left; + + if (down_interruptible(&ar->sem)) + return -EBUSY; + + set_bit(STATS_UPDATE_PEND, &vif->flags); + + if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) { + up(&ar->sem); + return -EIO; + } + + left = wait_event_interruptible_timeout(ar->event_wq, + !test_bit(STATS_UPDATE_PEND, + &vif->flags), WMI_TIMEOUT); + + up(&ar->sem); + + if (left <= 0) + return -ETIMEDOUT; + + return 0; +} +EXPORT_SYMBOL(ath6kl_read_tgt_stats); + +#ifdef CONFIG_ATH6KL_DEBUG + +void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (debug_mask & mask) + ath6kl_printk(KERN_DEBUG, "%pV", &vaf); + + trace_ath6kl_log_dbg(mask, &vaf); + + va_end(args); +} +EXPORT_SYMBOL(ath6kl_dbg); + +void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask, + const char *msg, const char *prefix, + const void *buf, size_t len) +{ + if (debug_mask & mask) { + if (msg) + ath6kl_dbg(mask, "%s\n", msg); + + print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len); + } + + /* tracing code doesn't like null strings :/ */ + trace_ath6kl_log_dbg_dump(msg ? msg : "", prefix ? prefix : "", + buf, len); +} +EXPORT_SYMBOL(ath6kl_dbg_dump); + +#define REG_OUTPUT_LEN_PER_LINE 25 +#define REGTYPE_STR_LEN 100 + +struct ath6kl_diag_reg_info { + u32 reg_start; + u32 reg_end; + const char *reg_info; +}; + +static const struct ath6kl_diag_reg_info diag_reg[] = { + { 0x20000, 0x200fc, "General DMA and Rx registers" }, + { 0x28000, 0x28900, "MAC PCU register & keycache" }, + { 0x20800, 0x20a40, "QCU" }, + { 0x21000, 0x212f0, "DCU" }, + { 0x4000, 0x42e4, "RTC" }, + { 0x540000, 0x540000 + (256 * 1024), "RAM" }, + { 0x29800, 0x2B210, "Base Band" }, + { 0x1C000, 0x1C748, "Analog" }, +}; + +void ath6kl_dump_registers(struct ath6kl_device *dev, + struct ath6kl_irq_proc_registers *irq_proc_reg, + struct ath6kl_irq_enable_reg *irq_enable_reg) +{ + ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n")); + + if (irq_proc_reg != NULL) { + ath6kl_dbg(ATH6KL_DBG_IRQ, + "Host Int status: 0x%x\n", + irq_proc_reg->host_int_status); + ath6kl_dbg(ATH6KL_DBG_IRQ, + "CPU Int status: 0x%x\n", + irq_proc_reg->cpu_int_status); + ath6kl_dbg(ATH6KL_DBG_IRQ, + "Error Int status: 0x%x\n", + irq_proc_reg->error_int_status); + ath6kl_dbg(ATH6KL_DBG_IRQ, + "Counter Int status: 0x%x\n", + irq_proc_reg->counter_int_status); + ath6kl_dbg(ATH6KL_DBG_IRQ, + "Mbox Frame: 0x%x\n", + irq_proc_reg->mbox_frame); + ath6kl_dbg(ATH6KL_DBG_IRQ, + "Rx Lookahead Valid: 0x%x\n", + irq_proc_reg->rx_lkahd_valid); + ath6kl_dbg(ATH6KL_DBG_IRQ, + "Rx Lookahead 0: 0x%x\n", + irq_proc_reg->rx_lkahd[0]); + ath6kl_dbg(ATH6KL_DBG_IRQ, + "Rx Lookahead 1: 0x%x\n", + irq_proc_reg->rx_lkahd[1]); + + if (dev->ar->mbox_info.gmbox_addr != 0) { + /* + * If the target supports GMBOX hardware, dump some + * additional state. + */ + ath6kl_dbg(ATH6KL_DBG_IRQ, + "GMBOX Host Int status 2: 0x%x\n", + irq_proc_reg->host_int_status2); + ath6kl_dbg(ATH6KL_DBG_IRQ, + "GMBOX RX Avail: 0x%x\n", + irq_proc_reg->gmbox_rx_avail); + ath6kl_dbg(ATH6KL_DBG_IRQ, + "GMBOX lookahead alias 0: 0x%x\n", + irq_proc_reg->rx_gmbox_lkahd_alias[0]); + ath6kl_dbg(ATH6KL_DBG_IRQ, + "GMBOX lookahead alias 1: 0x%x\n", + irq_proc_reg->rx_gmbox_lkahd_alias[1]); + } + } + + if (irq_enable_reg != NULL) { + ath6kl_dbg(ATH6KL_DBG_IRQ, + "Int status Enable: 0x%x\n", + irq_enable_reg->int_status_en); + ath6kl_dbg(ATH6KL_DBG_IRQ, "Counter Int status Enable: 0x%x\n", + irq_enable_reg->cntr_int_status_en); + } + ath6kl_dbg(ATH6KL_DBG_IRQ, "<------------------------------->\n"); +} + +static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist) +{ + ath6kl_dbg(ATH6KL_DBG_CREDIT, + "--- endpoint: %d svc_id: 0x%X ---\n", + ep_dist->endpoint, ep_dist->svc_id); + ath6kl_dbg(ATH6KL_DBG_CREDIT, " dist_flags : 0x%X\n", + ep_dist->dist_flags); + ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_norm : %d\n", + ep_dist->cred_norm); + ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_min : %d\n", + ep_dist->cred_min); + ath6kl_dbg(ATH6KL_DBG_CREDIT, " credits : %d\n", + ep_dist->credits); + ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_assngd : %d\n", + ep_dist->cred_assngd); + ath6kl_dbg(ATH6KL_DBG_CREDIT, " seek_cred : %d\n", + ep_dist->seek_cred); + ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_sz : %d\n", + ep_dist->cred_sz); + ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_per_msg : %d\n", + ep_dist->cred_per_msg); + ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_to_dist : %d\n", + ep_dist->cred_to_dist); + ath6kl_dbg(ATH6KL_DBG_CREDIT, " txq_depth : %d\n", + get_queue_depth(&ep_dist->htc_ep->txq)); + ath6kl_dbg(ATH6KL_DBG_CREDIT, + "----------------------------------\n"); +} + +/* FIXME: move to htc.c */ +void dump_cred_dist_stats(struct htc_target *target) +{ + struct htc_endpoint_credit_dist *ep_list; + + list_for_each_entry(ep_list, &target->cred_dist_list, list) + dump_cred_dist(ep_list); + + ath6kl_dbg(ATH6KL_DBG_CREDIT, + "credit distribution total %d free %d\n", + target->credit_info->total_avail_credits, + target->credit_info->cur_free_credits); +} + +void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war) +{ + switch (war) { + case ATH6KL_WAR_INVALID_RATE: + ar->debug.war_stats.invalid_rate++; + break; + } +} + +static ssize_t read_file_war_stats(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + char *buf; + unsigned int len = 0, buf_len = 1500; + ssize_t ret_cnt; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%25s\n", + "Workaround stats"); + len += scnprintf(buf + len, buf_len - len, "%25s\n\n", + "================="); + len += scnprintf(buf + len, buf_len - len, "%20s %10u\n", + "Invalid rates", ar->debug.war_stats.invalid_rate); + + if (WARN_ON(len > buf_len)) + len = buf_len; + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + return ret_cnt; +} + +static const struct file_operations fops_war_stats = { + .read = read_file_war_stats, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len) +{ + struct ath6kl_fwlog_slot *slot; + struct sk_buff *skb; + size_t slot_len; + + if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE)) + return; + + slot_len = sizeof(*slot) + ATH6KL_FWLOG_PAYLOAD_SIZE; + + skb = alloc_skb(slot_len, GFP_KERNEL); + if (!skb) + return; + + slot = skb_put(skb, slot_len); + slot->timestamp = cpu_to_le32(jiffies); + slot->length = cpu_to_le32(len); + memcpy(slot->payload, buf, len); + + /* Need to pad each record to fixed length ATH6KL_FWLOG_PAYLOAD_SIZE */ + memset(slot->payload + len, 0, ATH6KL_FWLOG_PAYLOAD_SIZE - len); + + spin_lock(&ar->debug.fwlog_queue.lock); + + __skb_queue_tail(&ar->debug.fwlog_queue, skb); + complete(&ar->debug.fwlog_completion); + + /* drop oldest entries */ + while (skb_queue_len(&ar->debug.fwlog_queue) > + ATH6KL_FWLOG_MAX_ENTRIES) { + skb = __skb_dequeue(&ar->debug.fwlog_queue); + kfree_skb(skb); + } + + spin_unlock(&ar->debug.fwlog_queue.lock); + + return; +} + +static int ath6kl_fwlog_open(struct inode *inode, struct file *file) +{ + struct ath6kl *ar = inode->i_private; + + if (ar->debug.fwlog_open) + return -EBUSY; + + ar->debug.fwlog_open = true; + + file->private_data = inode->i_private; + return 0; +} + +static int ath6kl_fwlog_release(struct inode *inode, struct file *file) +{ + struct ath6kl *ar = inode->i_private; + + ar->debug.fwlog_open = false; + + return 0; +} + +static ssize_t ath6kl_fwlog_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct sk_buff *skb; + ssize_t ret_cnt; + size_t len = 0; + char *buf; + + buf = vmalloc(count); + if (!buf) + return -ENOMEM; + + /* read undelivered logs from firmware */ + ath6kl_read_fwlogs(ar); + + spin_lock(&ar->debug.fwlog_queue.lock); + + while ((skb = __skb_dequeue(&ar->debug.fwlog_queue))) { + if (skb->len > count - len) { + /* not enough space, put skb back and leave */ + __skb_queue_head(&ar->debug.fwlog_queue, skb); + break; + } + + + memcpy(buf + len, skb->data, skb->len); + len += skb->len; + + kfree_skb(skb); + } + + spin_unlock(&ar->debug.fwlog_queue.lock); + + /* FIXME: what to do if len == 0? */ + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + vfree(buf); + + return ret_cnt; +} + +static const struct file_operations fops_fwlog = { + .open = ath6kl_fwlog_open, + .release = ath6kl_fwlog_release, + .read = ath6kl_fwlog_read, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_fwlog_block_read(struct file *file, + char __user *user_buf, + size_t count, + loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct sk_buff *skb; + ssize_t ret_cnt; + size_t len = 0, not_copied; + char *buf; + int ret; + + buf = vmalloc(count); + if (!buf) + return -ENOMEM; + + spin_lock(&ar->debug.fwlog_queue.lock); + + if (skb_queue_len(&ar->debug.fwlog_queue) == 0) { + /* we must init under queue lock */ + init_completion(&ar->debug.fwlog_completion); + + spin_unlock(&ar->debug.fwlog_queue.lock); + + ret = wait_for_completion_interruptible( + &ar->debug.fwlog_completion); + if (ret == -ERESTARTSYS) { + vfree(buf); + return ret; + } + + spin_lock(&ar->debug.fwlog_queue.lock); + } + + while ((skb = __skb_dequeue(&ar->debug.fwlog_queue))) { + if (skb->len > count - len) { + /* not enough space, put skb back and leave */ + __skb_queue_head(&ar->debug.fwlog_queue, skb); + break; + } + + + memcpy(buf + len, skb->data, skb->len); + len += skb->len; + + kfree_skb(skb); + } + + spin_unlock(&ar->debug.fwlog_queue.lock); + + /* FIXME: what to do if len == 0? */ + + not_copied = copy_to_user(user_buf, buf, len); + if (not_copied != 0) { + ret_cnt = -EFAULT; + goto out; + } + + *ppos = *ppos + len; + + ret_cnt = len; + +out: + vfree(buf); + + return ret_cnt; +} + +static const struct file_operations fops_fwlog_block = { + .open = ath6kl_fwlog_open, + .release = ath6kl_fwlog_release, + .read = ath6kl_fwlog_block_read, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_fwlog_mask_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + char buf[16]; + int len; + + len = snprintf(buf, sizeof(buf), "0x%x\n", ar->debug.fwlog_mask); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath6kl_fwlog_mask_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + int ret; + + ret = kstrtou32_from_user(user_buf, count, 0, &ar->debug.fwlog_mask); + if (ret) + return ret; + + ret = ath6kl_wmi_config_debug_module_cmd(ar->wmi, + ATH6KL_FWLOG_VALID_MASK, + ar->debug.fwlog_mask); + if (ret) + return ret; + + return count; +} + +static const struct file_operations fops_fwlog_mask = { + .open = simple_open, + .read = ath6kl_fwlog_mask_read, + .write = ath6kl_fwlog_mask_write, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct ath6kl_vif *vif; + struct target_stats *tgt_stats; + char *buf; + unsigned int len = 0, buf_len = 1500; + int i; + ssize_t ret_cnt; + int rv; + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + rv = ath6kl_read_tgt_stats(ar, vif); + if (rv < 0) { + kfree(buf); + return rv; + } + + tgt_stats = &vif->target_stats; + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%25s\n", + "Target Tx stats"); + len += scnprintf(buf + len, buf_len - len, "%25s\n\n", + "================="); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Ucast packets", tgt_stats->tx_ucast_pkt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Bcast packets", tgt_stats->tx_bcast_pkt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Ucast byte", tgt_stats->tx_ucast_byte); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Bcast byte", tgt_stats->tx_bcast_byte); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Rts success cnt", tgt_stats->tx_rts_success_cnt); + for (i = 0; i < 4; i++) + len += scnprintf(buf + len, buf_len - len, + "%18s %d %10llu\n", "PER on ac", + i, tgt_stats->tx_pkt_per_ac[i]); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Error", tgt_stats->tx_err); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Fail count", tgt_stats->tx_fail_cnt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Retry count", tgt_stats->tx_retry_cnt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Multi retry cnt", tgt_stats->tx_mult_retry_cnt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Rts fail cnt", tgt_stats->tx_rts_fail_cnt); + len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n\n", + "TKIP counter measure used", + tgt_stats->tkip_cnter_measures_invoked); + + len += scnprintf(buf + len, buf_len - len, "%25s\n", + "Target Rx stats"); + len += scnprintf(buf + len, buf_len - len, "%25s\n", + "================="); + + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Ucast packets", tgt_stats->rx_ucast_pkt); + len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", + "Ucast Rate", tgt_stats->rx_ucast_rate); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Bcast packets", tgt_stats->rx_bcast_pkt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Ucast byte", tgt_stats->rx_ucast_byte); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Bcast byte", tgt_stats->rx_bcast_byte); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Fragmented pkt", tgt_stats->rx_frgment_pkt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Error", tgt_stats->rx_err); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "CRC Err", tgt_stats->rx_crc_err); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Key cache miss", tgt_stats->rx_key_cache_miss); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Decrypt Err", tgt_stats->rx_decrypt_err); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Duplicate frame", tgt_stats->rx_dupl_frame); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Tkip Mic failure", tgt_stats->tkip_local_mic_fail); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "TKIP format err", tgt_stats->tkip_fmt_err); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "CCMP format Err", tgt_stats->ccmp_fmt_err); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n\n", + "CCMP Replay Err", tgt_stats->ccmp_replays); + + len += scnprintf(buf + len, buf_len - len, "%25s\n", + "Misc Target stats"); + len += scnprintf(buf + len, buf_len - len, "%25s\n", + "================="); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Beacon Miss count", tgt_stats->cs_bmiss_cnt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Num Connects", tgt_stats->cs_connect_cnt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Num disconnects", tgt_stats->cs_discon_cnt); + len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", + "Beacon avg rssi", tgt_stats->cs_ave_beacon_rssi); + len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", + "ARP pkt received", tgt_stats->arp_received); + len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", + "ARP pkt matched", tgt_stats->arp_matched); + len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", + "ARP pkt replied", tgt_stats->arp_replied); + + if (len > buf_len) + len = buf_len; + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + return ret_cnt; +} + +static const struct file_operations fops_tgt_stats = { + .read = read_file_tgt_stats, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +#define print_credit_info(fmt_str, ep_list_field) \ + (len += scnprintf(buf + len, buf_len - len, fmt_str, \ + ep_list->ep_list_field)) +#define CREDIT_INFO_DISPLAY_STRING_LEN 200 +#define CREDIT_INFO_LEN 128 + +static ssize_t read_file_credit_dist_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct htc_target *target = ar->htc_target; + struct htc_endpoint_credit_dist *ep_list; + char *buf; + unsigned int buf_len, len = 0; + ssize_t ret_cnt; + + buf_len = CREDIT_INFO_DISPLAY_STRING_LEN + + get_queue_depth(&target->cred_dist_list) * CREDIT_INFO_LEN; + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len += scnprintf(buf + len, buf_len - len, "%25s%5d\n", + "Total Avail Credits: ", + target->credit_info->total_avail_credits); + len += scnprintf(buf + len, buf_len - len, "%25s%5d\n", + "Free credits :", + target->credit_info->cur_free_credits); + + len += scnprintf(buf + len, buf_len - len, + " Epid Flags Cred_norm Cred_min Credits Cred_assngd" + " Seek_cred Cred_sz Cred_per_msg Cred_to_dist" + " qdepth\n"); + + list_for_each_entry(ep_list, &target->cred_dist_list, list) { + print_credit_info(" %2d", endpoint); + print_credit_info("%10x", dist_flags); + print_credit_info("%8d", cred_norm); + print_credit_info("%9d", cred_min); + print_credit_info("%9d", credits); + print_credit_info("%10d", cred_assngd); + print_credit_info("%13d", seek_cred); + print_credit_info("%12d", cred_sz); + print_credit_info("%9d", cred_per_msg); + print_credit_info("%14d", cred_to_dist); + len += scnprintf(buf + len, buf_len - len, "%12d\n", + get_queue_depth(&ep_list->htc_ep->txq)); + } + + if (len > buf_len) + len = buf_len; + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + return ret_cnt; +} + +static const struct file_operations fops_credit_dist_stats = { + .read = read_file_credit_dist_stats, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static unsigned int print_endpoint_stat(struct htc_target *target, char *buf, + unsigned int buf_len, unsigned int len, + int offset, const char *name) +{ + int i; + struct htc_endpoint_stats *ep_st; + u32 *counter; + + len += scnprintf(buf + len, buf_len - len, "%s:", name); + for (i = 0; i < ENDPOINT_MAX; i++) { + ep_st = &target->endpoint[i].ep_st; + counter = ((u32 *) ep_st) + (offset / 4); + len += scnprintf(buf + len, buf_len - len, " %u", *counter); + } + len += scnprintf(buf + len, buf_len - len, "\n"); + + return len; +} + +static ssize_t ath6kl_endpoint_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct htc_target *target = ar->htc_target; + char *buf; + unsigned int buf_len, len = 0; + ssize_t ret_cnt; + + buf_len = sizeof(struct htc_endpoint_stats) / sizeof(u32) * + (25 + ENDPOINT_MAX * 11); + buf = kmalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + +#define EPSTAT(name) \ + do { \ + len = print_endpoint_stat(target, buf, buf_len, len, \ + offsetof(struct htc_endpoint_stats, \ + name), \ + #name); \ + } while (0) + + EPSTAT(cred_low_indicate); + EPSTAT(tx_issued); + EPSTAT(tx_pkt_bundled); + EPSTAT(tx_bundles); + EPSTAT(tx_dropped); + EPSTAT(tx_cred_rpt); + EPSTAT(cred_rpt_from_rx); + EPSTAT(cred_rpt_from_other); + EPSTAT(cred_rpt_ep0); + EPSTAT(cred_from_rx); + EPSTAT(cred_from_other); + EPSTAT(cred_from_ep0); + EPSTAT(cred_cosumd); + EPSTAT(cred_retnd); + EPSTAT(rx_pkts); + EPSTAT(rx_lkahds); + EPSTAT(rx_bundl); + EPSTAT(rx_bundle_lkahd); + EPSTAT(rx_bundle_from_hdr); + EPSTAT(rx_alloc_thresh_hit); + EPSTAT(rxalloc_thresh_byte); +#undef EPSTAT + + if (len > buf_len) + len = buf_len; + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + return ret_cnt; +} + +static ssize_t ath6kl_endpoint_stats_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct htc_target *target = ar->htc_target; + int ret, i; + u32 val; + struct htc_endpoint_stats *ep_st; + + ret = kstrtou32_from_user(user_buf, count, 0, &val); + if (ret) + return ret; + if (val == 0) { + for (i = 0; i < ENDPOINT_MAX; i++) { + ep_st = &target->endpoint[i].ep_st; + memset(ep_st, 0, sizeof(*ep_st)); + } + } + + return count; +} + +static const struct file_operations fops_endpoint_stats = { + .open = simple_open, + .read = ath6kl_endpoint_stats_read, + .write = ath6kl_endpoint_stats_write, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static unsigned long ath6kl_get_num_reg(void) +{ + int i; + unsigned long n_reg = 0; + + for (i = 0; i < ARRAY_SIZE(diag_reg); i++) + n_reg = n_reg + + (diag_reg[i].reg_end - diag_reg[i].reg_start) / 4 + 1; + + return n_reg; +} + +static bool ath6kl_dbg_is_diag_reg_valid(u32 reg_addr) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(diag_reg); i++) { + if (reg_addr >= diag_reg[i].reg_start && + reg_addr <= diag_reg[i].reg_end) + return true; + } + + return false; +} + +static ssize_t ath6kl_regread_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + u8 buf[50]; + unsigned int len = 0; + + if (ar->debug.dbgfs_diag_reg) + len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", + ar->debug.dbgfs_diag_reg); + else + len += scnprintf(buf + len, sizeof(buf) - len, + "All diag registers\n"); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath6kl_regread_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + unsigned long reg_addr; + + if (kstrtoul_from_user(user_buf, count, 0, ®_addr)) + return -EINVAL; + + if ((reg_addr % 4) != 0) + return -EINVAL; + + if (reg_addr && !ath6kl_dbg_is_diag_reg_valid(reg_addr)) + return -EINVAL; + + ar->debug.dbgfs_diag_reg = reg_addr; + + return count; +} + +static const struct file_operations fops_diag_reg_read = { + .read = ath6kl_regread_read, + .write = ath6kl_regread_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int ath6kl_regdump_open(struct inode *inode, struct file *file) +{ + struct ath6kl *ar = inode->i_private; + u8 *buf; + unsigned long int reg_len; + unsigned int len = 0, n_reg; + u32 addr; + __le32 reg_val; + int i, status; + + /* Dump all the registers if no register is specified */ + if (!ar->debug.dbgfs_diag_reg) + n_reg = ath6kl_get_num_reg(); + else + n_reg = 1; + + reg_len = n_reg * REG_OUTPUT_LEN_PER_LINE; + if (n_reg > 1) + reg_len += REGTYPE_STR_LEN; + + buf = vmalloc(reg_len); + if (!buf) + return -ENOMEM; + + if (n_reg == 1) { + addr = ar->debug.dbgfs_diag_reg; + + status = ath6kl_diag_read32(ar, + TARG_VTOP(ar->target_type, addr), + (u32 *)®_val); + if (status) + goto fail_reg_read; + + len += scnprintf(buf + len, reg_len - len, + "0x%06x 0x%08x\n", addr, le32_to_cpu(reg_val)); + goto done; + } + + for (i = 0; i < ARRAY_SIZE(diag_reg); i++) { + len += scnprintf(buf + len, reg_len - len, + "%s\n", diag_reg[i].reg_info); + for (addr = diag_reg[i].reg_start; + addr <= diag_reg[i].reg_end; addr += 4) { + status = ath6kl_diag_read32(ar, + TARG_VTOP(ar->target_type, addr), + (u32 *)®_val); + if (status) + goto fail_reg_read; + + len += scnprintf(buf + len, reg_len - len, + "0x%06x 0x%08x\n", + addr, le32_to_cpu(reg_val)); + } + } + +done: + file->private_data = buf; + return 0; + +fail_reg_read: + ath6kl_warn("Unable to read memory:%u\n", addr); + vfree(buf); + return -EIO; +} + +static ssize_t ath6kl_regdump_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + u8 *buf = file->private_data; + return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); +} + +static int ath6kl_regdump_release(struct inode *inode, struct file *file) +{ + vfree(file->private_data); + return 0; +} + +static const struct file_operations fops_reg_dump = { + .open = ath6kl_regdump_open, + .read = ath6kl_regdump_read, + .release = ath6kl_regdump_release, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_lrssi_roam_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + unsigned long lrssi_roam_threshold; + int ret; + + if (kstrtoul_from_user(user_buf, count, 0, &lrssi_roam_threshold)) + return -EINVAL; + + ar->lrssi_roam_threshold = lrssi_roam_threshold; + + ret = ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold); + + if (ret) + return ret; + return count; +} + +static ssize_t ath6kl_lrssi_roam_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + char buf[32]; + unsigned int len; + + len = snprintf(buf, sizeof(buf), "%u\n", ar->lrssi_roam_threshold); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_lrssi_roam_threshold = { + .read = ath6kl_lrssi_roam_read, + .write = ath6kl_lrssi_roam_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_regwrite_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + u8 buf[32]; + unsigned int len = 0; + + len = scnprintf(buf, sizeof(buf), "Addr: 0x%x Val: 0x%x\n", + ar->debug.diag_reg_addr_wr, ar->debug.diag_reg_val_wr); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath6kl_regwrite_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + char buf[32]; + char *sptr, *token; + unsigned int len = 0; + u32 reg_addr, reg_val; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + sptr = buf; + + token = strsep(&sptr, "="); + if (!token) + return -EINVAL; + + if (kstrtou32(token, 0, ®_addr)) + return -EINVAL; + + if (!ath6kl_dbg_is_diag_reg_valid(reg_addr)) + return -EINVAL; + + if (kstrtou32(sptr, 0, ®_val)) + return -EINVAL; + + ar->debug.diag_reg_addr_wr = reg_addr; + ar->debug.diag_reg_val_wr = reg_val; + + if (ath6kl_diag_write32(ar, ar->debug.diag_reg_addr_wr, + cpu_to_le32(ar->debug.diag_reg_val_wr))) + return -EIO; + + return count; +} + +static const struct file_operations fops_diag_reg_write = { + .read = ath6kl_regwrite_read, + .write = ath6kl_regwrite_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf, + size_t len) +{ + const struct wmi_target_roam_tbl *tbl; + u16 num_entries; + + if (len < sizeof(*tbl)) + return -EINVAL; + + tbl = (const struct wmi_target_roam_tbl *) buf; + num_entries = le16_to_cpu(tbl->num_entries); + if (struct_size(tbl, info, num_entries) > len) + return -EINVAL; + + if (ar->debug.roam_tbl == NULL || + ar->debug.roam_tbl_len < (unsigned int) len) { + kfree(ar->debug.roam_tbl); + ar->debug.roam_tbl = kmalloc(len, GFP_ATOMIC); + if (ar->debug.roam_tbl == NULL) + return -ENOMEM; + } + + memcpy(ar->debug.roam_tbl, buf, len); + ar->debug.roam_tbl_len = len; + + if (test_bit(ROAM_TBL_PEND, &ar->flag)) { + clear_bit(ROAM_TBL_PEND, &ar->flag); + wake_up(&ar->event_wq); + } + + return 0; +} + +static ssize_t ath6kl_roam_table_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + int ret; + long left; + struct wmi_target_roam_tbl *tbl; + u16 num_entries, i; + char *buf; + unsigned int len, buf_len; + ssize_t ret_cnt; + + if (down_interruptible(&ar->sem)) + return -EBUSY; + + set_bit(ROAM_TBL_PEND, &ar->flag); + + ret = ath6kl_wmi_get_roam_tbl_cmd(ar->wmi); + if (ret) { + up(&ar->sem); + return ret; + } + + left = wait_event_interruptible_timeout( + ar->event_wq, !test_bit(ROAM_TBL_PEND, &ar->flag), WMI_TIMEOUT); + up(&ar->sem); + + if (left <= 0) + return -ETIMEDOUT; + + if (ar->debug.roam_tbl == NULL) + return -ENOMEM; + + tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl; + num_entries = le16_to_cpu(tbl->num_entries); + + buf_len = 100 + num_entries * 100; + buf = kzalloc(buf_len, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + len = 0; + len += scnprintf(buf + len, buf_len - len, + "roam_mode=%u\n\n" + "# roam_util bssid rssi rssidt last_rssi util bias\n", + le16_to_cpu(tbl->roam_mode)); + + for (i = 0; i < num_entries; i++) { + struct wmi_bss_roam_info *info = &tbl->info[i]; + len += scnprintf(buf + len, buf_len - len, + "%d %pM %d %d %d %d %d\n", + a_sle32_to_cpu(info->roam_util), info->bssid, + info->rssi, info->rssidt, info->last_rssi, + info->util, info->bias); + } + + if (len > buf_len) + len = buf_len; + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + return ret_cnt; +} + +static const struct file_operations fops_roam_table = { + .read = ath6kl_roam_table_read, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_force_roam_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + int ret; + char buf[20]; + size_t len; + u8 bssid[ETH_ALEN]; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + + if (!mac_pton(buf, bssid)) + return -EINVAL; + + ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid); + if (ret) + return ret; + + return count; +} + +static const struct file_operations fops_force_roam = { + .write = ath6kl_force_roam_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_roam_mode_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + int ret; + char buf[20]; + size_t len; + enum wmi_roam_mode mode; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + if (len > 0 && buf[len - 1] == '\n') + buf[len - 1] = '\0'; + + if (strcasecmp(buf, "default") == 0) + mode = WMI_DEFAULT_ROAM_MODE; + else if (strcasecmp(buf, "bssbias") == 0) + mode = WMI_HOST_BIAS_ROAM_MODE; + else if (strcasecmp(buf, "lock") == 0) + mode = WMI_LOCK_BSS_MODE; + else + return -EINVAL; + + ret = ath6kl_wmi_set_roam_mode_cmd(ar->wmi, mode); + if (ret) + return ret; + + return count; +} + +static const struct file_operations fops_roam_mode = { + .write = ath6kl_roam_mode_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive) +{ + ar->debug.keepalive = keepalive; +} + +static ssize_t ath6kl_keepalive_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + char buf[16]; + int len; + + len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.keepalive); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath6kl_keepalive_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + int ret; + u8 val; + + ret = kstrtou8_from_user(user_buf, count, 0, &val); + if (ret) + return ret; + + ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, 0, val); + if (ret) + return ret; + + return count; +} + +static const struct file_operations fops_keepalive = { + .open = simple_open, + .read = ath6kl_keepalive_read, + .write = ath6kl_keepalive_write, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout) +{ + ar->debug.disc_timeout = timeout; +} + +static ssize_t ath6kl_disconnect_timeout_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + char buf[16]; + int len; + + len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.disc_timeout); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath6kl_disconnect_timeout_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + int ret; + u8 val; + + ret = kstrtou8_from_user(user_buf, count, 0, &val); + if (ret) + return ret; + + ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, 0, val); + if (ret) + return ret; + + return count; +} + +static const struct file_operations fops_disconnect_timeout = { + .open = simple_open, + .read = ath6kl_disconnect_timeout_read, + .write = ath6kl_disconnect_timeout_write, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_create_qos_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct ath6kl_vif *vif; + char buf[200]; + ssize_t len; + char *sptr, *token; + struct wmi_create_pstream_cmd pstream; + u32 val32; + u16 val16; + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + sptr = buf; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &pstream.user_pri)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &pstream.traffic_direc)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &pstream.traffic_class)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &pstream.traffic_type)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &pstream.voice_psc_cap)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.min_service_int = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.max_service_int = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.inactivity_int = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.suspension_int = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.service_start_time = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &pstream.tsid)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou16(token, 0, &val16)) + return -EINVAL; + pstream.nominal_msdu = cpu_to_le16(val16); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou16(token, 0, &val16)) + return -EINVAL; + pstream.max_msdu = cpu_to_le16(val16); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.min_data_rate = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.mean_data_rate = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.peak_data_rate = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.max_burst_size = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.delay_bound = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.min_phy_rate = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.sba = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.medium_time = cpu_to_le32(val32); + + pstream.nominal_phy = le32_to_cpu(pstream.min_phy_rate) / 1000000; + + ath6kl_wmi_create_pstream_cmd(ar->wmi, vif->fw_vif_idx, &pstream); + + return count; +} + +static const struct file_operations fops_create_qos = { + .write = ath6kl_create_qos_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_delete_qos_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct ath6kl_vif *vif; + char buf[100]; + ssize_t len; + char *sptr, *token; + u8 traffic_class; + u8 tsid; + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + sptr = buf; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &traffic_class)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &tsid)) + return -EINVAL; + + ath6kl_wmi_delete_pstream_cmd(ar->wmi, vif->fw_vif_idx, + traffic_class, tsid); + + return count; +} + +static const struct file_operations fops_delete_qos = { + .write = ath6kl_delete_qos_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_bgscan_int_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct ath6kl_vif *vif; + u16 bgscan_int; + char buf[32]; + ssize_t len; + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtou16(buf, 0, &bgscan_int)) + return -EINVAL; + + if (bgscan_int == 0) + bgscan_int = 0xffff; + + vif->bg_scan_period = bgscan_int; + + ath6kl_wmi_scanparams_cmd(ar->wmi, 0, 0, 0, bgscan_int, 0, 0, 0, 3, + 0, 0, 0); + + return count; +} + +static const struct file_operations fops_bgscan_int = { + .write = ath6kl_bgscan_int_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_listen_int_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct ath6kl_vif *vif; + u16 listen_interval; + char buf[32]; + ssize_t len; + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtou16(buf, 0, &listen_interval)) + return -EINVAL; + + if ((listen_interval < 15) || (listen_interval > 3000)) + return -EINVAL; + + vif->listen_intvl_t = listen_interval; + ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, + vif->listen_intvl_t, 0); + + return count; +} + +static ssize_t ath6kl_listen_int_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct ath6kl_vif *vif; + char buf[32]; + int len; + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + len = scnprintf(buf, sizeof(buf), "%u\n", vif->listen_intvl_t); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_listen_int = { + .read = ath6kl_listen_int_read, + .write = ath6kl_listen_int_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_power_params_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + u8 buf[100]; + unsigned int len = 0; + char *sptr, *token; + u16 idle_period, ps_poll_num, dtim, + tx_wakeup, num_tx; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + sptr = buf; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou16(token, 0, &idle_period)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou16(token, 0, &ps_poll_num)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou16(token, 0, &dtim)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou16(token, 0, &tx_wakeup)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou16(token, 0, &num_tx)) + return -EINVAL; + + ath6kl_wmi_pmparams_cmd(ar->wmi, 0, idle_period, ps_poll_num, + dtim, tx_wakeup, num_tx, 0); + + return count; +} + +static const struct file_operations fops_power_params = { + .write = ath6kl_power_params_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +void ath6kl_debug_init(struct ath6kl *ar) +{ + skb_queue_head_init(&ar->debug.fwlog_queue); + init_completion(&ar->debug.fwlog_completion); + + /* + * Actually we are lying here but don't know how to read the mask + * value from the firmware. + */ + ar->debug.fwlog_mask = 0; +} + +/* + * Initialisation needs to happen in two stages as fwlog events can come + * before cfg80211 is initialised, and debugfs depends on cfg80211 + * initialisation. + */ +int ath6kl_debug_init_fs(struct ath6kl *ar) +{ + ar->debugfs_phy = debugfs_create_dir("ath6kl", + ar->wiphy->debugfsdir); + if (!ar->debugfs_phy) + return -ENOMEM; + + debugfs_create_file("tgt_stats", 0400, ar->debugfs_phy, ar, + &fops_tgt_stats); + + if (ar->hif_type == ATH6KL_HIF_TYPE_SDIO) + debugfs_create_file("credit_dist_stats", 0400, + ar->debugfs_phy, ar, + &fops_credit_dist_stats); + + debugfs_create_file("endpoint_stats", 0600, + ar->debugfs_phy, ar, &fops_endpoint_stats); + + debugfs_create_file("fwlog", 0400, ar->debugfs_phy, ar, &fops_fwlog); + + debugfs_create_file("fwlog_block", 0400, ar->debugfs_phy, ar, + &fops_fwlog_block); + + debugfs_create_file("fwlog_mask", 0600, ar->debugfs_phy, + ar, &fops_fwlog_mask); + + debugfs_create_file("reg_addr", 0600, ar->debugfs_phy, ar, + &fops_diag_reg_read); + + debugfs_create_file("reg_dump", 0400, ar->debugfs_phy, ar, + &fops_reg_dump); + + debugfs_create_file("lrssi_roam_threshold", 0600, + ar->debugfs_phy, ar, &fops_lrssi_roam_threshold); + + debugfs_create_file("reg_write", 0600, + ar->debugfs_phy, ar, &fops_diag_reg_write); + + debugfs_create_file("war_stats", 0400, ar->debugfs_phy, ar, + &fops_war_stats); + + debugfs_create_file("roam_table", 0400, ar->debugfs_phy, ar, + &fops_roam_table); + + debugfs_create_file("force_roam", 0200, ar->debugfs_phy, ar, + &fops_force_roam); + + debugfs_create_file("roam_mode", 0200, ar->debugfs_phy, ar, + &fops_roam_mode); + + debugfs_create_file("keepalive", 0600, ar->debugfs_phy, ar, + &fops_keepalive); + + debugfs_create_file("disconnect_timeout", 0600, + ar->debugfs_phy, ar, &fops_disconnect_timeout); + + debugfs_create_file("create_qos", 0200, ar->debugfs_phy, ar, + &fops_create_qos); + + debugfs_create_file("delete_qos", 0200, ar->debugfs_phy, ar, + &fops_delete_qos); + + debugfs_create_file("bgscan_interval", 0200, + ar->debugfs_phy, ar, &fops_bgscan_int); + + debugfs_create_file("listen_interval", 0600, + ar->debugfs_phy, ar, &fops_listen_int); + + debugfs_create_file("power_params", 0200, ar->debugfs_phy, ar, + &fops_power_params); + + return 0; +} + +void ath6kl_debug_cleanup(struct ath6kl *ar) +{ + skb_queue_purge(&ar->debug.fwlog_queue); + complete(&ar->debug.fwlog_completion); + kfree(ar->debug.roam_tbl); +} + +#endif diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h new file mode 100644 index 000000000..942975729 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/debug.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef DEBUG_H +#define DEBUG_H + +#include "hif.h" +#include "trace.h" + +enum ATH6K_DEBUG_MASK { + ATH6KL_DBG_CREDIT = BIT(0), + /* hole */ + ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */ + ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */ + ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */ + ATH6KL_DBG_HTC = BIT(5), + ATH6KL_DBG_HIF = BIT(6), + ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */ + /* hole */ + /* hole */ + ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */ + ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */ + ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */ + ATH6KL_DBG_WLAN_CFG = BIT(13), /* cfg80211 i/f file tracing */ + ATH6KL_DBG_RAW_BYTES = BIT(14), /* dump tx/rx frames */ + ATH6KL_DBG_AGGR = BIT(15), /* aggregation */ + ATH6KL_DBG_SDIO = BIT(16), + ATH6KL_DBG_SDIO_DUMP = BIT(17), + ATH6KL_DBG_BOOT = BIT(18), /* driver init and fw boot */ + ATH6KL_DBG_WMI_DUMP = BIT(19), + ATH6KL_DBG_SUSPEND = BIT(20), + ATH6KL_DBG_USB = BIT(21), + ATH6KL_DBG_USB_BULK = BIT(22), + ATH6KL_DBG_RECOVERY = BIT(23), + ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */ +}; + +extern unsigned int debug_mask; +__printf(2, 3) void ath6kl_printk(const char *level, const char *fmt, ...); +__printf(1, 2) void ath6kl_info(const char *fmt, ...); +__printf(1, 2) void ath6kl_err(const char *fmt, ...); +__printf(1, 2) void ath6kl_warn(const char *fmt, ...); + +enum ath6kl_war { + ATH6KL_WAR_INVALID_RATE, +}; + +int ath6kl_read_tgt_stats(struct ath6kl *ar, struct ath6kl_vif *vif); + +#ifdef CONFIG_ATH6KL_DEBUG + +__printf(2, 3) +void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...); +void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask, + const char *msg, const char *prefix, + const void *buf, size_t len); + +void ath6kl_dump_registers(struct ath6kl_device *dev, + struct ath6kl_irq_proc_registers *irq_proc_reg, + struct ath6kl_irq_enable_reg *irq_en_reg); +void dump_cred_dist_stats(struct htc_target *target); +void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len); +void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war); +int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf, + size_t len); +void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive); +void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout); +void ath6kl_debug_init(struct ath6kl *ar); +int ath6kl_debug_init_fs(struct ath6kl *ar); +void ath6kl_debug_cleanup(struct ath6kl *ar); + +#else +__printf(2, 3) +static inline void ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask, + const char *fmt, ...) +{ +} + +static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask, + const char *msg, const char *prefix, + const void *buf, size_t len) +{ +} + +static inline void ath6kl_dump_registers(struct ath6kl_device *dev, + struct ath6kl_irq_proc_registers *irq_proc_reg, + struct ath6kl_irq_enable_reg *irq_en_reg) +{ +} + +static inline void dump_cred_dist_stats(struct htc_target *target) +{ +} + +static inline void ath6kl_debug_fwlog_event(struct ath6kl *ar, + const void *buf, size_t len) +{ +} + +static inline void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war) +{ +} + +static inline int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, + const void *buf, size_t len) +{ + return 0; +} + +static inline void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive) +{ +} + +static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, + u8 timeout) +{ +} + +static inline void ath6kl_debug_init(struct ath6kl *ar) +{ +} + +static inline int ath6kl_debug_init_fs(struct ath6kl *ar) +{ + return 0; +} + +static inline void ath6kl_debug_cleanup(struct ath6kl *ar) +{ +} + +#endif +#endif diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h new file mode 100644 index 000000000..8c9e72d52 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HIF_OPS_H +#define HIF_OPS_H + +#include "hif.h" +#include "debug.h" + +static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, + u32 len, u32 request) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, + "hif %s sync addr 0x%x buf 0x%p len %d request 0x%x\n", + (request & HIF_WRITE) ? "write" : "read", + addr, buf, len, request); + + return ar->hif_ops->read_write_sync(ar, addr, buf, len, request); +} + +static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer, + u32 length, u32 request, + struct htc_packet *packet) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, + "hif write async addr 0x%x buf 0x%p len %d request 0x%x\n", + address, buffer, length, request); + + return ar->hif_ops->write_async(ar, address, buffer, length, + request, packet); +} +static inline void ath6kl_hif_irq_enable(struct ath6kl *ar) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq enable\n"); + + return ar->hif_ops->irq_enable(ar); +} + +static inline void ath6kl_hif_irq_disable(struct ath6kl *ar) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq disable\n"); + + return ar->hif_ops->irq_disable(ar); +} + +static inline struct hif_scatter_req *hif_scatter_req_get(struct ath6kl *ar) +{ + return ar->hif_ops->scatter_req_get(ar); +} + +static inline void hif_scatter_req_add(struct ath6kl *ar, + struct hif_scatter_req *s_req) +{ + return ar->hif_ops->scatter_req_add(ar, s_req); +} + +static inline int ath6kl_hif_enable_scatter(struct ath6kl *ar) +{ + return ar->hif_ops->enable_scatter(ar); +} + +static inline int ath6kl_hif_scat_req_rw(struct ath6kl *ar, + struct hif_scatter_req *scat_req) +{ + return ar->hif_ops->scat_req_rw(ar, scat_req); +} + +static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar) +{ + return ar->hif_ops->cleanup_scatter(ar); +} + +static inline int ath6kl_hif_suspend(struct ath6kl *ar, + struct cfg80211_wowlan *wow) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif suspend\n"); + + return ar->hif_ops->suspend(ar, wow); +} + +/* + * Read from the ATH6KL through its diagnostic window. No cooperation from + * the Target is required for this. + */ +static inline int ath6kl_hif_diag_read32(struct ath6kl *ar, u32 address, + u32 *value) +{ + return ar->hif_ops->diag_read32(ar, address, value); +} + +/* + * Write to the ATH6KL through its diagnostic window. No cooperation from + * the Target is required for this. + */ +static inline int ath6kl_hif_diag_write32(struct ath6kl *ar, u32 address, + __le32 value) +{ + return ar->hif_ops->diag_write32(ar, address, value); +} + +static inline int ath6kl_hif_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) +{ + return ar->hif_ops->bmi_read(ar, buf, len); +} + +static inline int ath6kl_hif_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) +{ + return ar->hif_ops->bmi_write(ar, buf, len); +} + +static inline int ath6kl_hif_resume(struct ath6kl *ar) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif resume\n"); + + return ar->hif_ops->resume(ar); +} + +static inline int ath6kl_hif_power_on(struct ath6kl *ar) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif power on\n"); + + return ar->hif_ops->power_on(ar); +} + +static inline int ath6kl_hif_power_off(struct ath6kl *ar) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif power off\n"); + + return ar->hif_ops->power_off(ar); +} + +static inline void ath6kl_hif_stop(struct ath6kl *ar) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif stop\n"); + + ar->hif_ops->stop(ar); +} + +static inline int ath6kl_hif_pipe_send(struct ath6kl *ar, + u8 pipe, struct sk_buff *hdr_buf, + struct sk_buff *buf) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe send\n"); + + return ar->hif_ops->pipe_send(ar, pipe, hdr_buf, buf); +} + +static inline void ath6kl_hif_pipe_get_default(struct ath6kl *ar, + u8 *ul_pipe, u8 *dl_pipe) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n"); + + ar->hif_ops->pipe_get_default(ar, ul_pipe, dl_pipe); +} + +static inline int ath6kl_hif_pipe_map_service(struct ath6kl *ar, + u16 service_id, u8 *ul_pipe, + u8 *dl_pipe) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n"); + + return ar->hif_ops->pipe_map_service(ar, service_id, ul_pipe, dl_pipe); +} + +static inline u16 ath6kl_hif_pipe_get_free_queue_number(struct ath6kl *ar, + u8 pipe) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get free queue number\n"); + + return ar->hif_ops->pipe_get_free_queue_number(ar, pipe); +} + +#endif diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c new file mode 100644 index 000000000..d1942537e --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/hif.c @@ -0,0 +1,699 @@ +/* + * Copyright (c) 2007-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include "hif.h" + +#include <linux/export.h> + +#include "core.h" +#include "target.h" +#include "hif-ops.h" +#include "debug.h" +#include "trace.h" + +#define MAILBOX_FOR_BLOCK_SIZE 1 + +#define ATH6KL_TIME_QUANTUM 10 /* in ms */ + +static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req, + bool from_dma) +{ + u8 *buf; + int i; + + buf = req->virt_dma_buf; + + for (i = 0; i < req->scat_entries; i++) { + if (from_dma) + memcpy(req->scat_list[i].buf, buf, + req->scat_list[i].len); + else + memcpy(buf, req->scat_list[i].buf, + req->scat_list[i].len); + + buf += req->scat_list[i].len; + } + + return 0; +} + +int ath6kl_hif_rw_comp_handler(void *context, int status) +{ + struct htc_packet *packet = context; + + ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n", + packet, status); + + packet->status = status; + packet->completion(packet->context, packet); + + return 0; +} +EXPORT_SYMBOL(ath6kl_hif_rw_comp_handler); + +#define REGISTER_DUMP_COUNT 60 +#define REGISTER_DUMP_LEN_MAX 60 + +static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar) +{ + __le32 regdump_val[REGISTER_DUMP_LEN_MAX]; + u32 i, address, regdump_addr = 0; + int ret; + + /* the reg dump pointer is copied to the host interest area */ + address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state)); + address = TARG_VTOP(ar->target_type, address); + + /* read RAM location through diagnostic window */ + ret = ath6kl_diag_read32(ar, address, ®dump_addr); + + if (ret || !regdump_addr) { + ath6kl_warn("failed to get ptr to register dump area: %d\n", + ret); + return; + } + + ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n", + regdump_addr); + regdump_addr = TARG_VTOP(ar->target_type, regdump_addr); + + /* fetch register dump data */ + ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)®dump_val[0], + REGISTER_DUMP_COUNT * (sizeof(u32))); + if (ret) { + ath6kl_warn("failed to get register dump: %d\n", ret); + return; + } + + ath6kl_info("crash dump:\n"); + ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version, + ar->wiphy->fw_version); + + BUILD_BUG_ON(REGISTER_DUMP_COUNT % 4); + + for (i = 0; i < REGISTER_DUMP_COUNT; i += 4) { + ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n", + i, + le32_to_cpu(regdump_val[i]), + le32_to_cpu(regdump_val[i + 1]), + le32_to_cpu(regdump_val[i + 2]), + le32_to_cpu(regdump_val[i + 3])); + } +} + +static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev) +{ + u32 dummy; + int ret; + + ath6kl_warn("firmware crashed\n"); + + /* + * read counter to clear the interrupt, the debug error interrupt is + * counter 0. + */ + ret = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS, + (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC); + if (ret) + ath6kl_warn("Failed to clear debug interrupt: %d\n", ret); + + ath6kl_hif_dump_fw_crash(dev->ar); + ath6kl_read_fwlogs(dev->ar); + ath6kl_recovery_err_notify(dev->ar, ATH6KL_FW_ASSERT); + + return ret; +} + +/* mailbox recv message polling */ +int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, + int timeout) +{ + struct ath6kl_irq_proc_registers *rg; + int status = 0, i; + u8 htc_mbox = 1 << HTC_MAILBOX; + + for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) { + /* this is the standard HIF way, load the reg table */ + status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS, + (u8 *) &dev->irq_proc_reg, + sizeof(dev->irq_proc_reg), + HIF_RD_SYNC_BYTE_INC); + + if (status) { + ath6kl_err("failed to read reg table\n"); + return status; + } + + /* check for MBOX data and valid lookahead */ + if (dev->irq_proc_reg.host_int_status & htc_mbox) { + if (dev->irq_proc_reg.rx_lkahd_valid & + htc_mbox) { + /* + * Mailbox has a message and the look ahead + * is valid. + */ + rg = &dev->irq_proc_reg; + *lk_ahd = + le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]); + break; + } + } + + /* delay a little */ + mdelay(ATH6KL_TIME_QUANTUM); + ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i); + } + + if (i == 0) { + ath6kl_err("timeout waiting for recv message\n"); + status = -ETIME; + /* check if the target asserted */ + if (dev->irq_proc_reg.counter_int_status & + ATH6KL_TARGET_DEBUG_INTR_MASK) + /* + * Target failure handler will be called in case of + * an assert. + */ + ath6kl_hif_proc_dbg_intr(dev); + } + + return status; +} + +/* + * Disable packet reception (used in case the host runs out of buffers) + * using the interrupt enable registers through the host I/F + */ +int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx) +{ + struct ath6kl_irq_enable_reg regs; + int status = 0; + + ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n", + enable_rx ? "enable" : "disable"); + + /* take the lock to protect interrupt enable shadows */ + spin_lock_bh(&dev->lock); + + if (enable_rx) + dev->irq_en_reg.int_status_en |= + SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01); + else + dev->irq_en_reg.int_status_en &= + ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01); + + memcpy(®s, &dev->irq_en_reg, sizeof(regs)); + + spin_unlock_bh(&dev->lock); + + status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, + ®s.int_status_en, + sizeof(struct ath6kl_irq_enable_reg), + HIF_WR_SYNC_BYTE_INC); + + return status; +} + +int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev, + struct hif_scatter_req *scat_req, bool read) +{ + int status = 0; + + if (read) { + scat_req->req = HIF_RD_SYNC_BLOCK_FIX; + scat_req->addr = dev->ar->mbox_info.htc_addr; + } else { + scat_req->req = HIF_WR_ASYNC_BLOCK_INC; + + scat_req->addr = + (scat_req->len > HIF_MBOX_WIDTH) ? + dev->ar->mbox_info.htc_ext_addr : + dev->ar->mbox_info.htc_addr; + } + + ath6kl_dbg(ATH6KL_DBG_HIF, + "hif submit scatter request entries %d len %d mbox 0x%x %s %s\n", + scat_req->scat_entries, scat_req->len, + scat_req->addr, !read ? "async" : "sync", + (read) ? "rd" : "wr"); + + if (!read && scat_req->virt_scat) { + status = ath6kl_hif_cp_scat_dma_buf(scat_req, false); + if (status) { + scat_req->status = status; + scat_req->complete(dev->ar->htc_target, scat_req); + return 0; + } + } + + status = ath6kl_hif_scat_req_rw(dev->ar, scat_req); + + if (read) { + /* in sync mode, we can touch the scatter request */ + scat_req->status = status; + if (!status && scat_req->virt_scat) + scat_req->status = + ath6kl_hif_cp_scat_dma_buf(scat_req, true); + } + + return status; +} + +static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev) +{ + u8 counter_int_status; + + ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n"); + + counter_int_status = dev->irq_proc_reg.counter_int_status & + dev->irq_en_reg.cntr_int_status_en; + + ath6kl_dbg(ATH6KL_DBG_IRQ, + "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n", + counter_int_status); + + /* + * NOTE: other modules like GMBOX may use the counter interrupt for + * credit flow control on other counters, we only need to check for + * the debug assertion counter interrupt. + */ + if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK) + return ath6kl_hif_proc_dbg_intr(dev); + + return 0; +} + +static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev) +{ + int status; + u8 error_int_status; + u8 reg_buf[4]; + + ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n"); + + error_int_status = dev->irq_proc_reg.error_int_status & 0x0F; + if (!error_int_status) { + WARN_ON(1); + return -EIO; + } + + ath6kl_dbg(ATH6KL_DBG_IRQ, + "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n", + error_int_status); + + if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status)) + ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n"); + + if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status)) + ath6kl_err("rx underflow\n"); + + if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status)) + ath6kl_err("tx overflow\n"); + + /* Clear the interrupt */ + dev->irq_proc_reg.error_int_status &= ~error_int_status; + + /* set W1C value to clear the interrupt, this hits the register first */ + reg_buf[0] = error_int_status; + reg_buf[1] = 0; + reg_buf[2] = 0; + reg_buf[3] = 0; + + status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS, + reg_buf, 4, HIF_WR_SYNC_BYTE_FIX); + + WARN_ON(status); + + return status; +} + +static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev) +{ + int status; + u8 cpu_int_status; + u8 reg_buf[4]; + + ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n"); + + cpu_int_status = dev->irq_proc_reg.cpu_int_status & + dev->irq_en_reg.cpu_int_status_en; + if (!cpu_int_status) { + WARN_ON(1); + return -EIO; + } + + ath6kl_dbg(ATH6KL_DBG_IRQ, + "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n", + cpu_int_status); + + /* Clear the interrupt */ + dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status; + + /* + * Set up the register transfer buffer to hit the register 4 times , + * this is done to make the access 4-byte aligned to mitigate issues + * with host bus interconnects that restrict bus transfer lengths to + * be a multiple of 4-bytes. + */ + + /* set W1C value to clear the interrupt, this hits the register first */ + reg_buf[0] = cpu_int_status; + /* the remaining are set to zero which have no-effect */ + reg_buf[1] = 0; + reg_buf[2] = 0; + reg_buf[3] = 0; + + status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS, + reg_buf, 4, HIF_WR_SYNC_BYTE_FIX); + + WARN_ON(status); + + return status; +} + +/* process pending interrupts synchronously */ +static int proc_pending_irqs(struct ath6kl_device *dev, bool *done) +{ + struct ath6kl_irq_proc_registers *rg; + int status = 0; + u8 host_int_status = 0; + u32 lk_ahd = 0; + u8 htc_mbox = 1 << HTC_MAILBOX; + + ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev); + + /* + * NOTE: HIF implementation guarantees that the context of this + * call allows us to perform SYNCHRONOUS I/O, that is we can block, + * sleep or call any API that can block or switch thread/task + * contexts. This is a fully schedulable context. + */ + + /* + * Process pending intr only when int_status_en is clear, it may + * result in unnecessary bus transaction otherwise. Target may be + * unresponsive at the time. + */ + if (dev->irq_en_reg.int_status_en) { + /* + * Read the first 28 bytes of the HTC register table. This + * will yield us the value of different int status + * registers and the lookahead registers. + * + * length = sizeof(int_status) + sizeof(cpu_int_status) + * + sizeof(error_int_status) + + * sizeof(counter_int_status) + + * sizeof(mbox_frame) + sizeof(rx_lkahd_valid) + * + sizeof(hole) + sizeof(rx_lkahd) + + * sizeof(int_status_en) + + * sizeof(cpu_int_status_en) + + * sizeof(err_int_status_en) + + * sizeof(cntr_int_status_en); + */ + status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS, + (u8 *) &dev->irq_proc_reg, + sizeof(dev->irq_proc_reg), + HIF_RD_SYNC_BYTE_INC); + if (status) + goto out; + + ath6kl_dump_registers(dev, &dev->irq_proc_reg, + &dev->irq_en_reg); + trace_ath6kl_sdio_irq(&dev->irq_en_reg, + sizeof(dev->irq_en_reg)); + + /* Update only those registers that are enabled */ + host_int_status = dev->irq_proc_reg.host_int_status & + dev->irq_en_reg.int_status_en; + + /* Look at mbox status */ + if (host_int_status & htc_mbox) { + /* + * Mask out pending mbox value, we use "lookAhead as + * the real flag for mbox processing. + */ + host_int_status &= ~htc_mbox; + if (dev->irq_proc_reg.rx_lkahd_valid & + htc_mbox) { + rg = &dev->irq_proc_reg; + lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]); + if (!lk_ahd) + ath6kl_err("lookAhead is zero!\n"); + } + } + } + + if (!host_int_status && !lk_ahd) { + *done = true; + goto out; + } + + if (lk_ahd) { + int fetched = 0; + + ath6kl_dbg(ATH6KL_DBG_IRQ, + "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd); + /* + * Mailbox Interrupt, the HTC layer may issue async + * requests to empty the mailbox. When emptying the recv + * mailbox we use the async handler above called from the + * completion routine of the callers read request. This can + * improve performance by reducing context switching when + * we rapidly pull packets. + */ + status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt, + lk_ahd, &fetched); + if (status) + goto out; + + if (!fetched) + /* + * HTC could not pull any messages out due to lack + * of resources. + */ + dev->htc_cnxt->chk_irq_status_cnt = 0; + } + + /* now handle the rest of them */ + ath6kl_dbg(ATH6KL_DBG_IRQ, + "valid interrupt source(s) for other interrupts: 0x%x\n", + host_int_status); + + if (MS(HOST_INT_STATUS_CPU, host_int_status)) { + /* CPU Interrupt */ + status = ath6kl_hif_proc_cpu_intr(dev); + if (status) + goto out; + } + + if (MS(HOST_INT_STATUS_ERROR, host_int_status)) { + /* Error Interrupt */ + status = ath6kl_hif_proc_err_intr(dev); + if (status) + goto out; + } + + if (MS(HOST_INT_STATUS_COUNTER, host_int_status)) + /* Counter Interrupt */ + status = ath6kl_hif_proc_counter_intr(dev); + +out: + /* + * An optimization to bypass reading the IRQ status registers + * unecessarily which can re-wake the target, if upper layers + * determine that we are in a low-throughput mode, we can rely on + * taking another interrupt rather than re-checking the status + * registers which can re-wake the target. + * + * NOTE : for host interfaces that makes use of detecting pending + * mbox messages at hif can not use this optimization due to + * possible side effects, SPI requires the host to drain all + * messages from the mailbox before exiting the ISR routine. + */ + + ath6kl_dbg(ATH6KL_DBG_IRQ, + "bypassing irq status re-check, forcing done\n"); + + if (!dev->htc_cnxt->chk_irq_status_cnt) + *done = true; + + ath6kl_dbg(ATH6KL_DBG_IRQ, + "proc_pending_irqs: (done:%d, status=%d\n", *done, status); + + return status; +} + +/* interrupt handler, kicks off all interrupt processing */ +int ath6kl_hif_intr_bh_handler(struct ath6kl *ar) +{ + struct ath6kl_device *dev = ar->htc_target->dev; + unsigned long timeout; + int status = 0; + bool done = false; + + /* + * Reset counter used to flag a re-scan of IRQ status registers on + * the target. + */ + dev->htc_cnxt->chk_irq_status_cnt = 0; + + /* + * IRQ processing is synchronous, interrupt status registers can be + * re-read. + */ + timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT); + while (time_before(jiffies, timeout) && !done) { + status = proc_pending_irqs(dev, &done); + if (status) + break; + } + + return status; +} +EXPORT_SYMBOL(ath6kl_hif_intr_bh_handler); + +static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev) +{ + struct ath6kl_irq_enable_reg regs; + int status; + + spin_lock_bh(&dev->lock); + + /* Enable all but ATH6KL CPU interrupts */ + dev->irq_en_reg.int_status_en = + SM(INT_STATUS_ENABLE_ERROR, 0x01) | + SM(INT_STATUS_ENABLE_CPU, 0x01) | + SM(INT_STATUS_ENABLE_COUNTER, 0x01); + + /* + * NOTE: There are some cases where HIF can do detection of + * pending mbox messages which is disabled now. + */ + dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01); + + /* Set up the CPU Interrupt status Register */ + dev->irq_en_reg.cpu_int_status_en = 0; + + /* Set up the Error Interrupt status Register */ + dev->irq_en_reg.err_int_status_en = + SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) | + SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1); + + /* + * Enable Counter interrupt status register to get fatal errors for + * debugging. + */ + dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT, + ATH6KL_TARGET_DEBUG_INTR_MASK); + memcpy(®s, &dev->irq_en_reg, sizeof(regs)); + + spin_unlock_bh(&dev->lock); + + status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, + ®s.int_status_en, sizeof(regs), + HIF_WR_SYNC_BYTE_INC); + + if (status) + ath6kl_err("failed to update interrupt ctl reg err: %d\n", + status); + + return status; +} + +int ath6kl_hif_disable_intrs(struct ath6kl_device *dev) +{ + struct ath6kl_irq_enable_reg regs; + + spin_lock_bh(&dev->lock); + /* Disable all interrupts */ + dev->irq_en_reg.int_status_en = 0; + dev->irq_en_reg.cpu_int_status_en = 0; + dev->irq_en_reg.err_int_status_en = 0; + dev->irq_en_reg.cntr_int_status_en = 0; + memcpy(®s, &dev->irq_en_reg, sizeof(regs)); + spin_unlock_bh(&dev->lock); + + return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, + ®s.int_status_en, sizeof(regs), + HIF_WR_SYNC_BYTE_INC); +} + +/* enable device interrupts */ +int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev) +{ + int status = 0; + + /* + * Make sure interrupt are disabled before unmasking at the HIF + * layer. The rationale here is that between device insertion + * (where we clear the interrupts the first time) and when HTC + * is finally ready to handle interrupts, other software can perform + * target "soft" resets. The ATH6KL interrupt enables reset back to an + * "enabled" state when this happens. + */ + ath6kl_hif_disable_intrs(dev); + + /* unmask the host controller interrupts */ + ath6kl_hif_irq_enable(dev->ar); + status = ath6kl_hif_enable_intrs(dev); + + return status; +} + +/* disable all device interrupts */ +int ath6kl_hif_mask_intrs(struct ath6kl_device *dev) +{ + /* + * Mask the interrupt at the HIF layer to avoid any stray interrupt + * taken while we zero out our shadow registers in + * ath6kl_hif_disable_intrs(). + */ + ath6kl_hif_irq_disable(dev->ar); + + return ath6kl_hif_disable_intrs(dev); +} + +int ath6kl_hif_setup(struct ath6kl_device *dev) +{ + int status = 0; + + spin_lock_init(&dev->lock); + + /* + * NOTE: we actually get the block size of a mailbox other than 0, + * for SDIO the block size on mailbox 0 is artificially set to 1. + * So we use the block size that is set for the other 3 mailboxes. + */ + dev->htc_cnxt->block_sz = dev->ar->mbox_info.block_size; + + /* must be a power of 2 */ + if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) { + WARN_ON(1); + status = -EINVAL; + goto fail_setup; + } + + /* assemble mask, used for padding to a block */ + dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1; + + ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n", + dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr); + + status = ath6kl_hif_disable_intrs(dev); + +fail_setup: + return status; +} diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h new file mode 100644 index 000000000..ba16b98c8 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/hif.h @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HIF_H +#define HIF_H + +#include "common.h" +#include "core.h" + +#include <linux/scatterlist.h> + +#define BUS_REQUEST_MAX_NUM 64 +#define HIF_MBOX_BLOCK_SIZE 128 +#define HIF_MBOX0_BLOCK_SIZE 1 + +#define HIF_DMA_BUFFER_SIZE (32 * 1024) +#define CMD53_FIXED_ADDRESS 1 +#define CMD53_INCR_ADDRESS 2 + +#define MAX_SCATTER_REQUESTS 4 +#define MAX_SCATTER_ENTRIES_PER_REQ 16 +#define MAX_SCATTER_REQ_TRANSFER_SIZE (32 * 1024) + +/* Mailbox address in SDIO address space */ +#define HIF_MBOX_BASE_ADDR 0x800 +#define HIF_MBOX_WIDTH 0x800 + +#define HIF_MBOX_END_ADDR (HTC_MAILBOX_NUM_MAX * HIF_MBOX_WIDTH - 1) + +/* version 1 of the chip has only a 12K extended mbox range */ +#define HIF_MBOX0_EXT_BASE_ADDR 0x4000 +#define HIF_MBOX0_EXT_WIDTH (12*1024) + +/* GMBOX addresses */ +#define HIF_GMBOX_BASE_ADDR 0x7000 +#define HIF_GMBOX_WIDTH 0x4000 + +/* interrupt mode register */ +#define CCCR_SDIO_IRQ_MODE_REG 0xF0 + +/* mode to enable special 4-bit interrupt assertion without clock */ +#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0) + +/* HTC runs over mailbox 0 */ +#define HTC_MAILBOX 0 + +#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01 + +/* FIXME: are these duplicates with MAX_SCATTER_ values in hif.h? */ +#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16 +#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024) +#define ATH6KL_SCATTER_REQS 4 + +#define ATH6KL_HIF_COMMUNICATION_TIMEOUT 1000 + +struct bus_request { + struct list_head list; + + /* request data */ + u32 address; + + u8 *buffer; + u32 length; + u32 request; + struct htc_packet *packet; + int status; + + /* this is a scatter request */ + struct hif_scatter_req *scat_req; +}; + +/* direction of transfer (read/write) */ +#define HIF_READ 0x00000001 +#define HIF_WRITE 0x00000002 +#define HIF_DIR_MASK (HIF_READ | HIF_WRITE) + +/* + * emode - This indicates the whether the command is to be executed in a + * blocking or non-blocking fashion (HIF_SYNCHRONOUS/ + * HIF_ASYNCHRONOUS). The read/write data paths in HTC have been + * implemented using the asynchronous mode allowing the bus + * driver to indicate the completion of operation through the + * registered callback routine. The requirement primarily comes + * from the contexts these operations get called from (a driver's + * transmit context or the ISR context in case of receive). + * Support for both of these modes is essential. + */ +#define HIF_SYNCHRONOUS 0x00000010 +#define HIF_ASYNCHRONOUS 0x00000020 +#define HIF_EMODE_MASK (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS) + +/* + * dmode - An interface may support different kinds of commands based on + * the tradeoff between the amount of data it can carry and the + * setup time. Byte and Block modes are supported (HIF_BYTE_BASIS/ + * HIF_BLOCK_BASIS). In case of latter, the data is rounded off + * to the nearest block size by padding. The size of the block is + * configurable at compile time using the HIF_BLOCK_SIZE and is + * negotiated with the target during initialization after the + * ATH6KL interrupts are enabled. + */ +#define HIF_BYTE_BASIS 0x00000040 +#define HIF_BLOCK_BASIS 0x00000080 +#define HIF_DMODE_MASK (HIF_BYTE_BASIS | HIF_BLOCK_BASIS) + +/* + * amode - This indicates if the address has to be incremented on ATH6KL + * after every read/write operation (HIF?FIXED_ADDRESS/ + * HIF_INCREMENTAL_ADDRESS). + */ +#define HIF_FIXED_ADDRESS 0x00000100 +#define HIF_INCREMENTAL_ADDRESS 0x00000200 +#define HIF_AMODE_MASK (HIF_FIXED_ADDRESS | HIF_INCREMENTAL_ADDRESS) + +#define HIF_WR_ASYNC_BYTE_INC \ + (HIF_WRITE | HIF_ASYNCHRONOUS | \ + HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS) + +#define HIF_WR_ASYNC_BLOCK_INC \ + (HIF_WRITE | HIF_ASYNCHRONOUS | \ + HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS) + +#define HIF_WR_SYNC_BYTE_FIX \ + (HIF_WRITE | HIF_SYNCHRONOUS | \ + HIF_BYTE_BASIS | HIF_FIXED_ADDRESS) + +#define HIF_WR_SYNC_BYTE_INC \ + (HIF_WRITE | HIF_SYNCHRONOUS | \ + HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS) + +#define HIF_WR_SYNC_BLOCK_INC \ + (HIF_WRITE | HIF_SYNCHRONOUS | \ + HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS) + +#define HIF_RD_SYNC_BYTE_INC \ + (HIF_READ | HIF_SYNCHRONOUS | \ + HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS) + +#define HIF_RD_SYNC_BYTE_FIX \ + (HIF_READ | HIF_SYNCHRONOUS | \ + HIF_BYTE_BASIS | HIF_FIXED_ADDRESS) + +#define HIF_RD_ASYNC_BLOCK_FIX \ + (HIF_READ | HIF_ASYNCHRONOUS | \ + HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS) + +#define HIF_RD_SYNC_BLOCK_FIX \ + (HIF_READ | HIF_SYNCHRONOUS | \ + HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS) + +struct hif_scatter_item { + u8 *buf; + int len; + struct htc_packet *packet; +}; + +struct hif_scatter_req { + struct list_head list; + /* address for the read/write operation */ + u32 addr; + + /* request flags */ + u32 req; + + /* total length of entire transfer */ + u32 len; + + bool virt_scat; + + void (*complete) (struct htc_target *, struct hif_scatter_req *); + int status; + int scat_entries; + + struct bus_request *busrequest; + struct scatterlist *sgentries; + + /* bounce buffer for upper layers to copy to/from */ + u8 *virt_dma_buf; + + u32 scat_q_depth; + + struct hif_scatter_item scat_list[]; +}; + +struct ath6kl_irq_proc_registers { + u8 host_int_status; + u8 cpu_int_status; + u8 error_int_status; + u8 counter_int_status; + u8 mbox_frame; + u8 rx_lkahd_valid; + u8 host_int_status2; + u8 gmbox_rx_avail; + __le32 rx_lkahd[2]; + __le32 rx_gmbox_lkahd_alias[2]; +} __packed; + +struct ath6kl_irq_enable_reg { + u8 int_status_en; + u8 cpu_int_status_en; + u8 err_int_status_en; + u8 cntr_int_status_en; +} __packed; + +struct ath6kl_device { + /* protects irq_proc_reg and irq_en_reg below */ + spinlock_t lock; + struct ath6kl_irq_proc_registers irq_proc_reg; + struct ath6kl_irq_enable_reg irq_en_reg; + struct htc_target *htc_cnxt; + struct ath6kl *ar; +}; + +struct ath6kl_hif_ops { + int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf, + u32 len, u32 request); + int (*write_async)(struct ath6kl *ar, u32 address, u8 *buffer, + u32 length, u32 request, struct htc_packet *packet); + + void (*irq_enable)(struct ath6kl *ar); + void (*irq_disable)(struct ath6kl *ar); + + struct hif_scatter_req *(*scatter_req_get)(struct ath6kl *ar); + void (*scatter_req_add)(struct ath6kl *ar, + struct hif_scatter_req *s_req); + int (*enable_scatter)(struct ath6kl *ar); + int (*scat_req_rw) (struct ath6kl *ar, + struct hif_scatter_req *scat_req); + void (*cleanup_scatter)(struct ath6kl *ar); + int (*suspend)(struct ath6kl *ar, struct cfg80211_wowlan *wow); + int (*resume)(struct ath6kl *ar); + int (*diag_read32)(struct ath6kl *ar, u32 address, u32 *value); + int (*diag_write32)(struct ath6kl *ar, u32 address, __le32 value); + int (*bmi_read)(struct ath6kl *ar, u8 *buf, u32 len); + int (*bmi_write)(struct ath6kl *ar, u8 *buf, u32 len); + int (*power_on)(struct ath6kl *ar); + int (*power_off)(struct ath6kl *ar); + void (*stop)(struct ath6kl *ar); + int (*pipe_send)(struct ath6kl *ar, u8 pipe, struct sk_buff *hdr_buf, + struct sk_buff *buf); + void (*pipe_get_default)(struct ath6kl *ar, u8 *pipe_ul, u8 *pipe_dl); + int (*pipe_map_service)(struct ath6kl *ar, u16 service_id, u8 *pipe_ul, + u8 *pipe_dl); + u16 (*pipe_get_free_queue_number)(struct ath6kl *ar, u8 pipe); +}; + +int ath6kl_hif_setup(struct ath6kl_device *dev); +int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev); +int ath6kl_hif_mask_intrs(struct ath6kl_device *dev); +int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, + u32 *lk_ahd, int timeout); +int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx); +int ath6kl_hif_disable_intrs(struct ath6kl_device *dev); + +int ath6kl_hif_rw_comp_handler(void *context, int status); +int ath6kl_hif_intr_bh_handler(struct ath6kl *ar); + +/* Scatter Function and Definitions */ +int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev, + struct hif_scatter_req *scat_req, bool read); + +#endif diff --git a/drivers/net/wireless/ath/ath6kl/htc-ops.h b/drivers/net/wireless/ath/ath6kl/htc-ops.h new file mode 100644 index 000000000..2d4eed55c --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/htc-ops.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HTC_OPS_H +#define HTC_OPS_H + +#include "htc.h" +#include "debug.h" + +static inline void *ath6kl_htc_create(struct ath6kl *ar) +{ + return ar->htc_ops->create(ar); +} + +static inline int ath6kl_htc_wait_target(struct htc_target *target) +{ + return target->dev->ar->htc_ops->wait_target(target); +} + +static inline int ath6kl_htc_start(struct htc_target *target) +{ + return target->dev->ar->htc_ops->start(target); +} + +static inline int ath6kl_htc_conn_service(struct htc_target *target, + struct htc_service_connect_req *req, + struct htc_service_connect_resp *resp) +{ + return target->dev->ar->htc_ops->conn_service(target, req, resp); +} + +static inline int ath6kl_htc_tx(struct htc_target *target, + struct htc_packet *packet) +{ + return target->dev->ar->htc_ops->tx(target, packet); +} + +static inline void ath6kl_htc_stop(struct htc_target *target) +{ + return target->dev->ar->htc_ops->stop(target); +} + +static inline void ath6kl_htc_cleanup(struct htc_target *target) +{ + return target->dev->ar->htc_ops->cleanup(target); +} + +static inline void ath6kl_htc_flush_txep(struct htc_target *target, + enum htc_endpoint_id endpoint, + u16 tag) +{ + return target->dev->ar->htc_ops->flush_txep(target, endpoint, tag); +} + +static inline void ath6kl_htc_flush_rx_buf(struct htc_target *target) +{ + return target->dev->ar->htc_ops->flush_rx_buf(target); +} + +static inline void ath6kl_htc_activity_changed(struct htc_target *target, + enum htc_endpoint_id endpoint, + bool active) +{ + return target->dev->ar->htc_ops->activity_changed(target, endpoint, + active); +} + +static inline int ath6kl_htc_get_rxbuf_num(struct htc_target *target, + enum htc_endpoint_id endpoint) +{ + return target->dev->ar->htc_ops->get_rxbuf_num(target, endpoint); +} + +static inline int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, + struct list_head *pktq) +{ + return target->dev->ar->htc_ops->add_rxbuf_multiple(target, pktq); +} + +static inline int ath6kl_htc_credit_setup(struct htc_target *target, + struct ath6kl_htc_credit_info *info) +{ + return target->dev->ar->htc_ops->credit_setup(target, info); +} + +static inline void ath6kl_htc_tx_complete(struct ath6kl *ar, + struct sk_buff *skb) +{ + ar->htc_ops->tx_complete(ar, skb); +} + + +static inline void ath6kl_htc_rx_complete(struct ath6kl *ar, + struct sk_buff *skb, u8 pipe) +{ + ar->htc_ops->rx_complete(ar, skb, pipe); +} + + +#endif diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h new file mode 100644 index 000000000..d3534a29c --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/htc.h @@ -0,0 +1,684 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HTC_H +#define HTC_H + +#include "common.h" + +/* frame header flags */ + +/* send direction */ +#define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0) +#define HTC_FLAGS_SEND_BUNDLE (1 << 1) +#define HTC_FLAGS_TX_FIXUP_NETBUF (1 << 2) + +/* receive direction */ +#define HTC_FLG_RX_UNUSED (1 << 0) +#define HTC_FLG_RX_TRAILER (1 << 1) +/* Bundle count maske and shift */ +#define HTC_FLG_RX_BNDL_CNT (0xF0) +#define HTC_FLG_RX_BNDL_CNT_S 4 + +#define HTC_HDR_LENGTH (sizeof(struct htc_frame_hdr)) +#define HTC_MAX_PAYLOAD_LENGTH (4096 - sizeof(struct htc_frame_hdr)) + +/* HTC control message IDs */ + +#define HTC_MSG_READY_ID 1 +#define HTC_MSG_CONN_SVC_ID 2 +#define HTC_MSG_CONN_SVC_RESP_ID 3 +#define HTC_MSG_SETUP_COMPLETE_ID 4 +#define HTC_MSG_SETUP_COMPLETE_EX_ID 5 + +#define HTC_MAX_CTRL_MSG_LEN 256 + +#define HTC_VERSION_2P0 0x00 +#define HTC_VERSION_2P1 0x01 + +#define HTC_SERVICE_META_DATA_MAX_LENGTH 128 + +#define HTC_CONN_FLGS_THRESH_LVL_QUAT 0x0 +#define HTC_CONN_FLGS_THRESH_LVL_HALF 0x1 +#define HTC_CONN_FLGS_THRESH_LVL_THREE_QUAT 0x2 +#define HTC_CONN_FLGS_REDUCE_CRED_DRIB 0x4 +#define HTC_CONN_FLGS_THRESH_MASK 0x3 +/* disable credit flow control on a specific service */ +#define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL (1 << 3) +#define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT 8 +#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00U + +/* connect response status codes */ +#define HTC_SERVICE_SUCCESS 0 +#define HTC_SERVICE_NOT_FOUND 1 +#define HTC_SERVICE_FAILED 2 + +/* no resources (i.e. no more endpoints) */ +#define HTC_SERVICE_NO_RESOURCES 3 + +/* specific service is not allowing any more endpoints */ +#define HTC_SERVICE_NO_MORE_EP 4 + +/* report record IDs */ +#define HTC_RECORD_NULL 0 +#define HTC_RECORD_CREDITS 1 +#define HTC_RECORD_LOOKAHEAD 2 +#define HTC_RECORD_LOOKAHEAD_BUNDLE 3 + +#define HTC_SETUP_COMP_FLG_RX_BNDL_EN (1 << 0) +#define HTC_SETUP_COMP_FLG_DISABLE_TX_CREDIT_FLOW (1 << 1) + +#define MAKE_SERVICE_ID(group, index) \ + (int)(((int)group << 8) | (int)(index)) + +/* NOTE: service ID of 0x0000 is reserved and should never be used */ +#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 1) +#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 0) +#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 1) +#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 2) +#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 3) +#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4) +#define WMI_MAX_SERVICES 5 + +#define WMM_NUM_AC 4 + +/* reserved and used to flush ALL packets */ +#define HTC_TX_PACKET_TAG_ALL 0 +#define HTC_SERVICE_TX_PACKET_TAG 1 +#define HTC_TX_PACKET_TAG_USER_DEFINED (HTC_SERVICE_TX_PACKET_TAG + 9) + +/* more packets on this endpoint are being fetched */ +#define HTC_RX_FLAGS_INDICATE_MORE_PKTS (1 << 0) + +/* TODO.. for BMI */ +#define ENDPOINT1 0 +/* TODO -remove me, but we have to fix BMI first */ +#define HTC_MAILBOX_NUM_MAX 4 + +/* enable send bundle padding for this endpoint */ +#define HTC_FLGS_TX_BNDL_PAD_EN (1 << 0) +#define HTC_EP_ACTIVE ((u32) (1u << 31)) + +/* HTC operational parameters */ +#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */ +#define HTC_TARGET_RESPONSE_POLL_WAIT 10 +#define HTC_TARGET_RESPONSE_POLL_COUNT 200 +#define HTC_TARGET_DEBUG_INTR_MASK 0x01 +#define HTC_TARGET_CREDIT_INTR_MASK 0xF0 + +#define HTC_HOST_MAX_MSG_PER_BUNDLE 8 +#define HTC_MIN_HTC_MSGS_TO_BUNDLE 2 + +/* packet flags */ + +#define HTC_RX_PKT_IGNORE_LOOKAHEAD (1 << 0) +#define HTC_RX_PKT_REFRESH_HDR (1 << 1) +#define HTC_RX_PKT_PART_OF_BUNDLE (1 << 2) +#define HTC_RX_PKT_NO_RECYCLE (1 << 3) + +#define NUM_CONTROL_BUFFERS 8 +#define NUM_CONTROL_TX_BUFFERS 2 +#define NUM_CONTROL_RX_BUFFERS (NUM_CONTROL_BUFFERS - NUM_CONTROL_TX_BUFFERS) + +#define HTC_RECV_WAIT_BUFFERS (1 << 0) +#define HTC_OP_STATE_STOPPING (1 << 0) +#define HTC_OP_STATE_SETUP_COMPLETE (1 << 1) + +/* + * The frame header length and message formats defined herein were selected + * to accommodate optimal alignment for target processing. This reduces + * code size and improves performance. Any changes to the header length may + * alter the alignment and cause exceptions on the target. When adding to + * the messagestructures insure that fields are properly aligned. + */ + +/* HTC frame header + * + * NOTE: do not remove or re-arrange the fields, these are minimally + * required to take advantage of 4-byte lookaheads in some hardware + * implementations. + */ +struct htc_frame_hdr { + struct_group_tagged(htc_frame_look_ahead, header, + union { + struct { + u8 eid; + u8 flags; + + /* length of data (including trailer) that follows the header */ + __le16 payld_len; + + }; + u32 word; + }; + ); + /* end of 4-byte lookahead */ + + u8 ctrl[2]; +} __packed; + +/* HTC ready message */ +struct htc_ready_msg { + __le16 msg_id; + __le16 cred_cnt; + __le16 cred_sz; + u8 max_ep; + u8 pad; +} __packed; + +/* extended HTC ready message */ +struct htc_ready_ext_msg { + struct htc_ready_msg ver2_0_info; + u8 htc_ver; + u8 msg_per_htc_bndl; +} __packed; + +/* connect service */ +struct htc_conn_service_msg { + __le16 msg_id; + __le16 svc_id; + __le16 conn_flags; + u8 svc_meta_len; + u8 pad; +} __packed; + +/* connect response */ +struct htc_conn_service_resp { + __le16 msg_id; + __le16 svc_id; + u8 status; + u8 eid; + __le16 max_msg_sz; + u8 svc_meta_len; + u8 pad; +} __packed; + +struct htc_setup_comp_msg { + __le16 msg_id; +} __packed; + +/* extended setup completion message */ +struct htc_setup_comp_ext_msg { + __le16 msg_id; + __le32 flags; + u8 msg_per_rxbndl; + u8 Rsvd[3]; +} __packed; + +struct htc_record_hdr { + u8 rec_id; + u8 len; +} __packed; + +struct htc_credit_report { + u8 eid; + u8 credits; +} __packed; + +/* + * NOTE: The lk_ahd array is guarded by a pre_valid + * and Post Valid guard bytes. The pre_valid bytes must + * equal the inverse of the post_valid byte. + */ +struct htc_lookahead_report { + u8 pre_valid; + u8 lk_ahd[4]; + u8 post_valid; +} __packed; + +struct htc_bundle_lkahd_rpt { + u8 lk_ahd[4]; +} __packed; + +/* Current service IDs */ + +enum htc_service_grp_ids { + RSVD_SERVICE_GROUP = 0, + WMI_SERVICE_GROUP = 1, + + HTC_TEST_GROUP = 254, + HTC_SERVICE_GROUP_LAST = 255 +}; + +/* ------ endpoint IDS ------ */ + +enum htc_endpoint_id { + ENDPOINT_UNUSED = -1, + ENDPOINT_0 = 0, + ENDPOINT_1 = 1, + ENDPOINT_2 = 2, + ENDPOINT_3, + ENDPOINT_4, + ENDPOINT_5, + ENDPOINT_6, + ENDPOINT_7, + ENDPOINT_8, + ENDPOINT_MAX, +}; + +struct htc_tx_packet_info { + u16 tag; + int cred_used; + u8 flags; + int seqno; +}; + +struct htc_rx_packet_info { + u32 exp_hdr; + u32 rx_flags; + u32 indicat_flags; +}; + +struct htc_target; + +/* wrapper around endpoint-specific packets */ +struct htc_packet { + struct list_head list; + + /* caller's per packet specific context */ + void *pkt_cntxt; + + /* + * the true buffer start , the caller can store the real + * buffer start here. In receive callbacks, the HTC layer + * sets buf to the start of the payload past the header. + * This field allows the caller to reset buf when it recycles + * receive packets back to HTC. + */ + u8 *buf_start; + + /* + * Pointer to the start of the buffer. In the transmit + * direction this points to the start of the payload. In the + * receive direction, however, the buffer when queued up + * points to the start of the HTC header but when returned + * to the caller points to the start of the payload + */ + u8 *buf; + u32 buf_len; + + /* actual length of payload */ + u32 act_len; + + /* endpoint that this packet was sent/recv'd from */ + enum htc_endpoint_id endpoint; + + /* completion status */ + + int status; + union { + struct htc_tx_packet_info tx; + struct htc_rx_packet_info rx; + } info; + + void (*completion) (struct htc_target *, struct htc_packet *); + struct htc_target *context; + + /* + * optimization for network-oriented data, the HTC packet + * can pass the network buffer corresponding to the HTC packet + * lower layers may optimized the transfer knowing this is + * a network buffer + */ + struct sk_buff *skb; +}; + +enum htc_send_full_action { + HTC_SEND_FULL_KEEP = 0, + HTC_SEND_FULL_DROP = 1, +}; + +struct htc_ep_callbacks { + void (*tx_complete) (struct htc_target *, struct htc_packet *); + void (*rx) (struct htc_target *, struct htc_packet *); + void (*rx_refill) (struct htc_target *, enum htc_endpoint_id endpoint); + enum htc_send_full_action (*tx_full) (struct htc_target *, + struct htc_packet *); + struct htc_packet *(*rx_allocthresh) (struct htc_target *, + enum htc_endpoint_id, int); + void (*tx_comp_multi) (struct htc_target *, struct list_head *); + int rx_alloc_thresh; + int rx_refill_thresh; +}; + +/* service connection information */ +struct htc_service_connect_req { + u16 svc_id; + u16 conn_flags; + struct htc_ep_callbacks ep_cb; + int max_txq_depth; + u32 flags; + unsigned int max_rxmsg_sz; +}; + +/* service connection response information */ +struct htc_service_connect_resp { + u8 buf_len; + u8 act_len; + enum htc_endpoint_id endpoint; + unsigned int len_max; + u8 resp_code; +}; + +/* endpoint distributionstructure */ +struct htc_endpoint_credit_dist { + struct list_head list; + + /* Service ID (set by HTC) */ + u16 svc_id; + + /* endpoint for this distributionstruct (set by HTC) */ + enum htc_endpoint_id endpoint; + + u32 dist_flags; + + /* + * credits for normal operation, anything above this + * indicates the endpoint is over-subscribed. + */ + int cred_norm; + + /* floor for credit distribution */ + int cred_min; + + int cred_assngd; + + /* current credits available */ + int credits; + + /* + * pending credits to distribute on this endpoint, this + * is set by HTC when credit reports arrive. The credit + * distribution functions sets this to zero when it distributes + * the credits. + */ + int cred_to_dist; + + /* + * the number of credits that the current pending TX packet needs + * to transmit. This is set by HTC when endpoint needs credits in + * order to transmit. + */ + int seek_cred; + + /* size in bytes of each credit */ + int cred_sz; + + /* credits required for a maximum sized messages */ + int cred_per_msg; + + /* reserved for HTC use */ + struct htc_endpoint *htc_ep; + + /* + * current depth of TX queue , i.e. messages waiting for credits + * This field is valid only when HTC_CREDIT_DIST_ACTIVITY_CHANGE + * or HTC_CREDIT_DIST_SEND_COMPLETE is indicated on an endpoint + * that has non-zero credits to recover. + */ + int txq_depth; +}; + +/* + * credit distribution code that is passed into the distribution function, + * there are mandatory and optional codes that must be handled + */ +enum htc_credit_dist_reason { + HTC_CREDIT_DIST_SEND_COMPLETE = 0, + HTC_CREDIT_DIST_ACTIVITY_CHANGE = 1, + HTC_CREDIT_DIST_SEEK_CREDITS, +}; + +struct ath6kl_htc_credit_info { + int total_avail_credits; + int cur_free_credits; + + /* list of lowest priority endpoints */ + struct list_head lowestpri_ep_dist; +}; + +/* endpoint statistics */ +struct htc_endpoint_stats { + /* + * number of times the host set the credit-low flag in a send + * message on this endpoint + */ + u32 cred_low_indicate; + + u32 tx_issued; + u32 tx_pkt_bundled; + u32 tx_bundles; + u32 tx_dropped; + + /* running count of total credit reports received for this endpoint */ + u32 tx_cred_rpt; + + /* credit reports received from this endpoint's RX packets */ + u32 cred_rpt_from_rx; + + /* credit reports received from RX packets of other endpoints */ + u32 cred_rpt_from_other; + + /* credit reports received from endpoint 0 RX packets */ + u32 cred_rpt_ep0; + + /* count of credits received via Rx packets on this endpoint */ + u32 cred_from_rx; + + /* count of credits received via another endpoint */ + u32 cred_from_other; + + /* count of credits received via another endpoint */ + u32 cred_from_ep0; + + /* count of consummed credits */ + u32 cred_cosumd; + + /* count of credits returned */ + u32 cred_retnd; + + u32 rx_pkts; + + /* count of lookahead records found in Rx msg */ + u32 rx_lkahds; + + /* count of recv packets received in a bundle */ + u32 rx_bundl; + + /* count of number of bundled lookaheads */ + u32 rx_bundle_lkahd; + + /* count of the number of bundle indications from the HTC header */ + u32 rx_bundle_from_hdr; + + /* the number of times the recv allocation threshold was hit */ + u32 rx_alloc_thresh_hit; + + /* total number of bytes */ + u32 rxalloc_thresh_byte; +}; + +struct htc_endpoint { + enum htc_endpoint_id eid; + u16 svc_id; + struct list_head txq; + struct list_head rx_bufq; + struct htc_endpoint_credit_dist cred_dist; + struct htc_ep_callbacks ep_cb; + int max_txq_depth; + int len_max; + int tx_proc_cnt; + int rx_proc_cnt; + struct htc_target *target; + u8 seqno; + u32 conn_flags; + struct htc_endpoint_stats ep_st; + u16 tx_drop_packet_threshold; + + struct { + u8 pipeid_ul; + u8 pipeid_dl; + struct list_head tx_lookup_queue; + bool tx_credit_flow_enabled; + } pipe; +}; + +struct htc_control_buffer { + struct htc_packet packet; + u8 *buf; +}; + +struct htc_pipe_txcredit_alloc { + u16 service_id; + u8 credit_alloc; +}; + +enum htc_send_queue_result { + HTC_SEND_QUEUE_OK = 0, /* packet was queued */ + HTC_SEND_QUEUE_DROP = 1, /* this packet should be dropped */ +}; + +struct ath6kl_htc_ops { + void* (*create)(struct ath6kl *ar); + int (*wait_target)(struct htc_target *target); + int (*start)(struct htc_target *target); + int (*conn_service)(struct htc_target *target, + struct htc_service_connect_req *req, + struct htc_service_connect_resp *resp); + int (*tx)(struct htc_target *target, struct htc_packet *packet); + void (*stop)(struct htc_target *target); + void (*cleanup)(struct htc_target *target); + void (*flush_txep)(struct htc_target *target, + enum htc_endpoint_id endpoint, u16 tag); + void (*flush_rx_buf)(struct htc_target *target); + void (*activity_changed)(struct htc_target *target, + enum htc_endpoint_id endpoint, + bool active); + int (*get_rxbuf_num)(struct htc_target *target, + enum htc_endpoint_id endpoint); + int (*add_rxbuf_multiple)(struct htc_target *target, + struct list_head *pktq); + int (*credit_setup)(struct htc_target *target, + struct ath6kl_htc_credit_info *cred_info); + int (*tx_complete)(struct ath6kl *ar, struct sk_buff *skb); + int (*rx_complete)(struct ath6kl *ar, struct sk_buff *skb, u8 pipe); +}; + +struct ath6kl_device; + +/* our HTC target state */ +struct htc_target { + struct htc_endpoint endpoint[ENDPOINT_MAX]; + + /* contains struct htc_endpoint_credit_dist */ + struct list_head cred_dist_list; + + struct list_head free_ctrl_txbuf; + struct list_head free_ctrl_rxbuf; + struct ath6kl_htc_credit_info *credit_info; + int tgt_creds; + unsigned int tgt_cred_sz; + + /* protects free_ctrl_txbuf and free_ctrl_rxbuf */ + spinlock_t htc_lock; + + /* FIXME: does this protext rx_bufq and endpoint structures or what? */ + spinlock_t rx_lock; + + /* protects endpoint->txq */ + spinlock_t tx_lock; + + struct ath6kl_device *dev; + u32 htc_flags; + u32 rx_st_flags; + enum htc_endpoint_id ep_waiting; + u8 htc_tgt_ver; + + /* max messages per bundle for HTC */ + int msg_per_bndl_max; + + u32 tx_bndl_mask; + int rx_bndl_enable; + int max_rx_bndl_sz; + int max_tx_bndl_sz; + + u32 block_sz; + u32 block_mask; + + int max_scat_entries; + int max_xfer_szper_scatreq; + + int chk_irq_status_cnt; + + /* counts the number of Tx without bundling continously per AC */ + u32 ac_tx_count[WMM_NUM_AC]; + + struct { + struct htc_packet *htc_packet_pool; + u8 ctrl_response_buf[HTC_MAX_CTRL_MSG_LEN]; + int ctrl_response_len; + bool ctrl_response_valid; + struct htc_pipe_txcredit_alloc txcredit_alloc[ENDPOINT_MAX]; + } pipe; +}; + +int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, + u32 msg_look_ahead, int *n_pkts); + +static inline void set_htc_pkt_info(struct htc_packet *packet, void *context, + u8 *buf, unsigned int len, + enum htc_endpoint_id eid, u16 tag) +{ + packet->pkt_cntxt = context; + packet->buf = buf; + packet->act_len = len; + packet->endpoint = eid; + packet->info.tx.tag = tag; +} + +static inline void htc_rxpkt_reset(struct htc_packet *packet) +{ + packet->buf = packet->buf_start; + packet->act_len = 0; +} + +static inline void set_htc_rxpkt_info(struct htc_packet *packet, void *context, + u8 *buf, unsigned long len, + enum htc_endpoint_id eid) +{ + packet->pkt_cntxt = context; + packet->buf = buf; + packet->buf_start = buf; + packet->buf_len = len; + packet->endpoint = eid; +} + +static inline int get_queue_depth(struct list_head *queue) +{ + struct list_head *tmp_list; + int depth = 0; + + list_for_each(tmp_list, queue) + depth++; + + return depth; +} + +void ath6kl_htc_pipe_attach(struct ath6kl *ar); +void ath6kl_htc_mbox_attach(struct ath6kl *ar); + +#endif diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c new file mode 100644 index 000000000..1963d3145 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c @@ -0,0 +1,2931 @@ +/* + * Copyright (c) 2007-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "core.h" +#include "hif.h" +#include "debug.h" +#include "hif-ops.h" +#include "trace.h" + +#include <asm/unaligned.h> + +#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) + +static void ath6kl_htc_mbox_cleanup(struct htc_target *target); +static void ath6kl_htc_mbox_stop(struct htc_target *target); +static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target, + struct list_head *pkt_queue); +static void ath6kl_htc_set_credit_dist(struct htc_target *target, + struct ath6kl_htc_credit_info *cred_info, + u16 svc_pri_order[], int len); + +/* threshold to re-enable Tx bundling for an AC*/ +#define TX_RESUME_BUNDLE_THRESHOLD 1500 + +/* Functions for Tx credit handling */ +static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info, + struct htc_endpoint_credit_dist *ep_dist, + int credits) +{ + ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n", + ep_dist->endpoint, credits); + + ep_dist->credits += credits; + ep_dist->cred_assngd += credits; + cred_info->cur_free_credits -= credits; +} + +static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info, + struct list_head *ep_list, + int tot_credits) +{ + struct htc_endpoint_credit_dist *cur_ep_dist; + int count; + + ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits); + + cred_info->cur_free_credits = tot_credits; + cred_info->total_avail_credits = tot_credits; + + list_for_each_entry(cur_ep_dist, ep_list, list) { + if (cur_ep_dist->endpoint == ENDPOINT_0) + continue; + + cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg; + + if (tot_credits > 4) { + if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) || + (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) { + ath6kl_credit_deposit(cred_info, + cur_ep_dist, + cur_ep_dist->cred_min); + cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; + } + } + + if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) { + ath6kl_credit_deposit(cred_info, cur_ep_dist, + cur_ep_dist->cred_min); + /* + * Control service is always marked active, it + * never goes inactive EVER. + */ + cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; + } + + /* + * Streams have to be created (explicit | implicit) for all + * kinds of traffic. BE endpoints are also inactive in the + * beginning. When BE traffic starts it creates implicit + * streams that redistributes credits. + * + * Note: all other endpoints have minimums set but are + * initially given NO credits. credits will be distributed + * as traffic activity demands + */ + } + + /* + * For ath6kl_credit_seek function, + * it use list_for_each_entry_reverse to walk around the whole ep list. + * Therefore assign this lowestpri_ep_dist after walk around the ep_list + */ + cred_info->lowestpri_ep_dist = cur_ep_dist->list; + + WARN_ON(cred_info->cur_free_credits <= 0); + + list_for_each_entry(cur_ep_dist, ep_list, list) { + if (cur_ep_dist->endpoint == ENDPOINT_0) + continue; + + if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) { + cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg; + } else { + /* + * For the remaining data endpoints, we assume that + * each cred_per_msg are the same. We use a simple + * calculation here, we take the remaining credits + * and determine how many max messages this can + * cover and then set each endpoint's normal value + * equal to 3/4 this amount. + */ + count = (cred_info->cur_free_credits / + cur_ep_dist->cred_per_msg) + * cur_ep_dist->cred_per_msg; + count = (count * 3) >> 2; + count = max(count, cur_ep_dist->cred_per_msg); + cur_ep_dist->cred_norm = count; + } + + ath6kl_dbg(ATH6KL_DBG_CREDIT, + "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n", + cur_ep_dist->endpoint, + cur_ep_dist->svc_id, + cur_ep_dist->credits, + cur_ep_dist->cred_per_msg, + cur_ep_dist->cred_norm, + cur_ep_dist->cred_min); + } +} + +/* initialize and setup credit distribution */ +static int ath6kl_htc_mbox_credit_setup(struct htc_target *htc_target, + struct ath6kl_htc_credit_info *cred_info) +{ + u16 servicepriority[5]; + + memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info)); + + servicepriority[0] = WMI_CONTROL_SVC; /* highest */ + servicepriority[1] = WMI_DATA_VO_SVC; + servicepriority[2] = WMI_DATA_VI_SVC; + servicepriority[3] = WMI_DATA_BE_SVC; + servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */ + + /* set priority list */ + ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5); + + return 0; +} + +/* reduce an ep's credits back to a set limit */ +static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info, + struct htc_endpoint_credit_dist *ep_dist, + int limit) +{ + int credits; + + ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n", + ep_dist->endpoint, limit); + + ep_dist->cred_assngd = limit; + + if (ep_dist->credits <= limit) + return; + + credits = ep_dist->credits - limit; + ep_dist->credits -= credits; + cred_info->cur_free_credits += credits; +} + +static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info, + struct list_head *epdist_list) +{ + struct htc_endpoint_credit_dist *cur_list; + + list_for_each_entry(cur_list, epdist_list, list) { + if (cur_list->endpoint == ENDPOINT_0) + continue; + + if (cur_list->cred_to_dist > 0) { + cur_list->credits += cur_list->cred_to_dist; + cur_list->cred_to_dist = 0; + + if (cur_list->credits > cur_list->cred_assngd) + ath6kl_credit_reduce(cred_info, + cur_list, + cur_list->cred_assngd); + + if (cur_list->credits > cur_list->cred_norm) + ath6kl_credit_reduce(cred_info, cur_list, + cur_list->cred_norm); + + if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) { + if (cur_list->txq_depth == 0) + ath6kl_credit_reduce(cred_info, + cur_list, 0); + } + } + } +} + +/* + * HTC has an endpoint that needs credits, ep_dist is the endpoint in + * question. + */ +static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info, + struct htc_endpoint_credit_dist *ep_dist) +{ + struct htc_endpoint_credit_dist *curdist_list; + int credits = 0; + int need; + + if (ep_dist->svc_id == WMI_CONTROL_SVC) + goto out; + + if ((ep_dist->svc_id == WMI_DATA_VI_SVC) || + (ep_dist->svc_id == WMI_DATA_VO_SVC)) + if ((ep_dist->cred_assngd >= ep_dist->cred_norm)) + goto out; + + /* + * For all other services, we follow a simple algorithm of: + * + * 1. checking the free pool for credits + * 2. checking lower priority endpoints for credits to take + */ + + credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); + + if (credits >= ep_dist->seek_cred) + goto out; + + /* + * We don't have enough in the free pool, try taking away from + * lower priority services The rule for taking away credits: + * + * 1. Only take from lower priority endpoints + * 2. Only take what is allocated above the minimum (never + * starve an endpoint completely) + * 3. Only take what you need. + */ + + list_for_each_entry_reverse(curdist_list, + &cred_info->lowestpri_ep_dist, + list) { + if (curdist_list == ep_dist) + break; + + need = ep_dist->seek_cred - cred_info->cur_free_credits; + + if ((curdist_list->cred_assngd - need) >= + curdist_list->cred_min) { + /* + * The current one has been allocated more than + * it's minimum and it has enough credits assigned + * above it's minimum to fulfill our need try to + * take away just enough to fulfill our need. + */ + ath6kl_credit_reduce(cred_info, curdist_list, + curdist_list->cred_assngd - need); + + if (cred_info->cur_free_credits >= + ep_dist->seek_cred) + break; + } + + if (curdist_list->endpoint == ENDPOINT_0) + break; + } + + credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); + +out: + /* did we find some credits? */ + if (credits) + ath6kl_credit_deposit(cred_info, ep_dist, credits); + + ep_dist->seek_cred = 0; +} + +/* redistribute credits based on activity change */ +static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info, + struct list_head *ep_dist_list) +{ + struct htc_endpoint_credit_dist *curdist_list; + + list_for_each_entry(curdist_list, ep_dist_list, list) { + if (curdist_list->endpoint == ENDPOINT_0) + continue; + + if ((curdist_list->svc_id == WMI_DATA_BK_SVC) || + (curdist_list->svc_id == WMI_DATA_BE_SVC)) + curdist_list->dist_flags |= HTC_EP_ACTIVE; + + if ((curdist_list->svc_id != WMI_CONTROL_SVC) && + !(curdist_list->dist_flags & HTC_EP_ACTIVE)) { + if (curdist_list->txq_depth == 0) + ath6kl_credit_reduce(info, curdist_list, 0); + else + ath6kl_credit_reduce(info, + curdist_list, + curdist_list->cred_min); + } + } +} + +/* + * + * This function is invoked whenever endpoints require credit + * distributions. A lock is held while this function is invoked, this + * function shall NOT block. The ep_dist_list is a list of distribution + * structures in prioritized order as defined by the call to the + * htc_set_credit_dist() api. + */ +static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info, + struct list_head *ep_dist_list, + enum htc_credit_dist_reason reason) +{ + switch (reason) { + case HTC_CREDIT_DIST_SEND_COMPLETE: + ath6kl_credit_update(cred_info, ep_dist_list); + break; + case HTC_CREDIT_DIST_ACTIVITY_CHANGE: + ath6kl_credit_redistribute(cred_info, ep_dist_list); + break; + default: + break; + } + + WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits); + WARN_ON(cred_info->cur_free_credits < 0); +} + +static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len) +{ + u8 *align_addr; + + if (!IS_ALIGNED((unsigned long) *buf, 4)) { + align_addr = PTR_ALIGN(*buf - 4, 4); + memmove(align_addr, *buf, len); + *buf = align_addr; + } +} + +static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags, + int ctrl0, int ctrl1) +{ + struct htc_frame_hdr *hdr; + + packet->buf -= HTC_HDR_LENGTH; + hdr = (struct htc_frame_hdr *)packet->buf; + + /* Endianess? */ + put_unaligned((u16)packet->act_len, &hdr->payld_len); + hdr->flags = flags; + hdr->eid = packet->endpoint; + hdr->ctrl[0] = ctrl0; + hdr->ctrl[1] = ctrl1; +} + +static void htc_reclaim_txctrl_buf(struct htc_target *target, + struct htc_packet *pkt) +{ + spin_lock_bh(&target->htc_lock); + list_add_tail(&pkt->list, &target->free_ctrl_txbuf); + spin_unlock_bh(&target->htc_lock); +} + +static struct htc_packet *htc_get_control_buf(struct htc_target *target, + bool tx) +{ + struct htc_packet *packet = NULL; + struct list_head *buf_list; + + buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf; + + spin_lock_bh(&target->htc_lock); + + if (list_empty(buf_list)) { + spin_unlock_bh(&target->htc_lock); + return NULL; + } + + packet = list_first_entry(buf_list, struct htc_packet, list); + list_del(&packet->list); + spin_unlock_bh(&target->htc_lock); + + if (tx) + packet->buf = packet->buf_start + HTC_HDR_LENGTH; + + return packet; +} + +static void htc_tx_comp_update(struct htc_target *target, + struct htc_endpoint *endpoint, + struct htc_packet *packet) +{ + packet->completion = NULL; + packet->buf += HTC_HDR_LENGTH; + + if (!packet->status) + return; + + ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n", + packet->status, packet->endpoint, packet->act_len, + packet->info.tx.cred_used); + + /* on failure to submit, reclaim credits for this packet */ + spin_lock_bh(&target->tx_lock); + endpoint->cred_dist.cred_to_dist += + packet->info.tx.cred_used; + endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq); + + ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n", + target->credit_info, &target->cred_dist_list); + + ath6kl_credit_distribute(target->credit_info, + &target->cred_dist_list, + HTC_CREDIT_DIST_SEND_COMPLETE); + + spin_unlock_bh(&target->tx_lock); +} + +static void htc_tx_complete(struct htc_endpoint *endpoint, + struct list_head *txq) +{ + if (list_empty(txq)) + return; + + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx complete ep %d pkts %d\n", + endpoint->eid, get_queue_depth(txq)); + + ath6kl_tx_complete(endpoint->target, txq); +} + +static void htc_tx_comp_handler(struct htc_target *target, + struct htc_packet *packet) +{ + struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint]; + struct list_head container; + + ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n", + packet->info.tx.seqno); + + htc_tx_comp_update(target, endpoint, packet); + INIT_LIST_HEAD(&container); + list_add_tail(&packet->list, &container); + /* do completion */ + htc_tx_complete(endpoint, &container); +} + +static void htc_async_tx_scat_complete(struct htc_target *target, + struct hif_scatter_req *scat_req) +{ + struct htc_endpoint *endpoint; + struct htc_packet *packet; + struct list_head tx_compq; + int i; + + INIT_LIST_HEAD(&tx_compq); + + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx scat complete len %d entries %d\n", + scat_req->len, scat_req->scat_entries); + + if (scat_req->status) + ath6kl_err("send scatter req failed: %d\n", scat_req->status); + + packet = scat_req->scat_list[0].packet; + endpoint = &target->endpoint[packet->endpoint]; + + /* walk through the scatter list and process */ + for (i = 0; i < scat_req->scat_entries; i++) { + packet = scat_req->scat_list[i].packet; + if (!packet) { + WARN_ON(1); + return; + } + + packet->status = scat_req->status; + htc_tx_comp_update(target, endpoint, packet); + list_add_tail(&packet->list, &tx_compq); + } + + /* free scatter request */ + hif_scatter_req_add(target->dev->ar, scat_req); + + /* complete all packets */ + htc_tx_complete(endpoint, &tx_compq); +} + +static int ath6kl_htc_tx_issue(struct htc_target *target, + struct htc_packet *packet) +{ + int status; + bool sync = false; + u32 padded_len, send_len; + + if (!packet->completion) + sync = true; + + send_len = packet->act_len + HTC_HDR_LENGTH; + + padded_len = CALC_TXRX_PADDED_LEN(target, send_len); + + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n", + send_len, packet->info.tx.seqno, padded_len, + target->dev->ar->mbox_info.htc_addr, + sync ? "sync" : "async"); + + if (sync) { + status = hif_read_write_sync(target->dev->ar, + target->dev->ar->mbox_info.htc_addr, + packet->buf, padded_len, + HIF_WR_SYNC_BLOCK_INC); + + packet->status = status; + packet->buf += HTC_HDR_LENGTH; + } else + status = hif_write_async(target->dev->ar, + target->dev->ar->mbox_info.htc_addr, + packet->buf, padded_len, + HIF_WR_ASYNC_BLOCK_INC, packet); + + trace_ath6kl_htc_tx(status, packet->endpoint, packet->buf, send_len); + + return status; +} + +static int htc_check_credits(struct htc_target *target, + struct htc_endpoint *ep, u8 *flags, + enum htc_endpoint_id eid, unsigned int len, + int *req_cred) +{ + *req_cred = (len > target->tgt_cred_sz) ? + DIV_ROUND_UP(len, target->tgt_cred_sz) : 1; + + ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n", + *req_cred, ep->cred_dist.credits); + + if (ep->cred_dist.credits < *req_cred) { + if (eid == ENDPOINT_0) + return -EINVAL; + + /* Seek more credits */ + ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits; + + ath6kl_credit_seek(target->credit_info, &ep->cred_dist); + + ep->cred_dist.seek_cred = 0; + + if (ep->cred_dist.credits < *req_cred) { + ath6kl_dbg(ATH6KL_DBG_CREDIT, + "credit not found for ep %d\n", + eid); + return -EINVAL; + } + } + + ep->cred_dist.credits -= *req_cred; + ep->ep_st.cred_cosumd += *req_cred; + + /* When we are getting low on credits, ask for more */ + if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { + ep->cred_dist.seek_cred = + ep->cred_dist.cred_per_msg - ep->cred_dist.credits; + + ath6kl_credit_seek(target->credit_info, &ep->cred_dist); + + /* see if we were successful in getting more */ + if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { + /* tell the target we need credits ASAP! */ + *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE; + ep->ep_st.cred_low_indicate += 1; + ath6kl_dbg(ATH6KL_DBG_CREDIT, + "credit we need credits asap\n"); + } + } + + return 0; +} + +static void ath6kl_htc_tx_pkts_get(struct htc_target *target, + struct htc_endpoint *endpoint, + struct list_head *queue) +{ + int req_cred; + u8 flags; + struct htc_packet *packet; + unsigned int len; + + while (true) { + flags = 0; + + if (list_empty(&endpoint->txq)) + break; + packet = list_first_entry(&endpoint->txq, struct htc_packet, + list); + + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx got packet 0x%p queue depth %d\n", + packet, get_queue_depth(&endpoint->txq)); + + len = CALC_TXRX_PADDED_LEN(target, + packet->act_len + HTC_HDR_LENGTH); + + if (htc_check_credits(target, endpoint, &flags, + packet->endpoint, len, &req_cred)) + break; + + /* now we can fully move onto caller's queue */ + packet = list_first_entry(&endpoint->txq, struct htc_packet, + list); + list_move_tail(&packet->list, queue); + + /* save the number of credits this packet consumed */ + packet->info.tx.cred_used = req_cred; + + /* all TX packets are handled asynchronously */ + packet->completion = htc_tx_comp_handler; + packet->context = target; + endpoint->ep_st.tx_issued += 1; + + /* save send flags */ + packet->info.tx.flags = flags; + packet->info.tx.seqno = endpoint->seqno; + endpoint->seqno++; + } +} + +/* See if the padded tx length falls on a credit boundary */ +static int htc_get_credit_padding(unsigned int cred_sz, int *len, + struct htc_endpoint *ep) +{ + int rem_cred, cred_pad; + + rem_cred = *len % cred_sz; + + /* No padding needed */ + if (!rem_cred) + return 0; + + if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN)) + return -1; + + /* + * The transfer consumes a "partial" credit, this + * packet cannot be bundled unless we add + * additional "dummy" padding (max 255 bytes) to + * consume the entire credit. + */ + cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred; + + if ((cred_pad > 0) && (cred_pad <= 255)) + *len += cred_pad; + else + /* The amount of padding is too large, send as non-bundled */ + return -1; + + return cred_pad; +} + +static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target, + struct htc_endpoint *endpoint, + struct hif_scatter_req *scat_req, + int n_scat, + struct list_head *queue) +{ + struct htc_packet *packet; + int i, len, rem_scat, cred_pad; + int status = 0; + u8 flags; + + rem_scat = target->max_tx_bndl_sz; + + for (i = 0; i < n_scat; i++) { + scat_req->scat_list[i].packet = NULL; + + if (list_empty(queue)) + break; + + packet = list_first_entry(queue, struct htc_packet, list); + len = CALC_TXRX_PADDED_LEN(target, + packet->act_len + HTC_HDR_LENGTH); + + cred_pad = htc_get_credit_padding(target->tgt_cred_sz, + &len, endpoint); + if (cred_pad < 0 || rem_scat < len) { + status = -ENOSPC; + break; + } + + rem_scat -= len; + /* now remove it from the queue */ + list_del(&packet->list); + + scat_req->scat_list[i].packet = packet; + /* prepare packet and flag message as part of a send bundle */ + flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE; + ath6kl_htc_tx_prep_pkt(packet, flags, + cred_pad, packet->info.tx.seqno); + /* Make sure the buffer is 4-byte aligned */ + ath6kl_htc_tx_buf_align(&packet->buf, + packet->act_len + HTC_HDR_LENGTH); + scat_req->scat_list[i].buf = packet->buf; + scat_req->scat_list[i].len = len; + + scat_req->len += len; + scat_req->scat_entries++; + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n", + i, packet, packet->info.tx.seqno, len, rem_scat); + } + + /* Roll back scatter setup in case of any failure */ + if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) { + for (i = scat_req->scat_entries - 1; i >= 0; i--) { + packet = scat_req->scat_list[i].packet; + if (packet) { + packet->buf += HTC_HDR_LENGTH; + list_add(&packet->list, queue); + } + } + return -EAGAIN; + } + + return status; +} + +/* + * Drain a queue and send as bundles this function may return without fully + * draining the queue when + * + * 1. scatter resources are exhausted + * 2. a message that will consume a partial credit will stop the + * bundling process early + * 3. we drop below the minimum number of messages for a bundle + */ +static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint, + struct list_head *queue, + int *sent_bundle, int *n_bundle_pkts) +{ + struct htc_target *target = endpoint->target; + struct hif_scatter_req *scat_req = NULL; + int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0, i; + struct htc_packet *packet; + int status; + u32 txb_mask; + u8 ac = WMM_NUM_AC; + + if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) && + (WMI_CONTROL_SVC != endpoint->svc_id)) + ac = target->dev->ar->ep2ac_map[endpoint->eid]; + + while (true) { + status = 0; + n_scat = get_queue_depth(queue); + n_scat = min(n_scat, target->msg_per_bndl_max); + + if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE) + /* not enough to bundle */ + break; + + scat_req = hif_scatter_req_get(target->dev->ar); + + if (!scat_req) { + /* no scatter resources */ + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx no more scatter resources\n"); + break; + } + + if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) { + if (WMM_AC_BE == ac) + /* + * BE, BK have priorities and bit + * positions reversed + */ + txb_mask = (1 << WMM_AC_BK); + else + /* + * any AC with priority lower than + * itself + */ + txb_mask = ((1 << ac) - 1); + + /* + * when the scatter request resources drop below a + * certain threshold, disable Tx bundling for all + * AC's with priority lower than the current requesting + * AC. Otherwise re-enable Tx bundling for them + */ + if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS) + target->tx_bndl_mask &= ~txb_mask; + else + target->tx_bndl_mask |= txb_mask; + } + + ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n", + n_scat); + + scat_req->len = 0; + scat_req->scat_entries = 0; + + status = ath6kl_htc_tx_setup_scat_list(target, endpoint, + scat_req, n_scat, + queue); + if (status == -EAGAIN) { + hif_scatter_req_add(target->dev->ar, scat_req); + break; + } + + /* send path is always asynchronous */ + scat_req->complete = htc_async_tx_scat_complete; + n_sent_bundle++; + tot_pkts_bundle += scat_req->scat_entries; + + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx scatter bytes %d entries %d\n", + scat_req->len, scat_req->scat_entries); + + for (i = 0; i < scat_req->scat_entries; i++) { + packet = scat_req->scat_list[i].packet; + trace_ath6kl_htc_tx(packet->status, packet->endpoint, + packet->buf, packet->act_len); + } + + ath6kl_hif_submit_scat_req(target->dev, scat_req, false); + + if (status) + break; + } + + *sent_bundle = n_sent_bundle; + *n_bundle_pkts = tot_pkts_bundle; + ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n", + n_sent_bundle); + + return; +} + +static void ath6kl_htc_tx_from_queue(struct htc_target *target, + struct htc_endpoint *endpoint) +{ + struct list_head txq; + struct htc_packet *packet; + int bundle_sent; + int n_pkts_bundle; + u8 ac = WMM_NUM_AC; + int status; + + spin_lock_bh(&target->tx_lock); + + endpoint->tx_proc_cnt++; + if (endpoint->tx_proc_cnt > 1) { + endpoint->tx_proc_cnt--; + spin_unlock_bh(&target->tx_lock); + ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n"); + return; + } + + /* + * drain the endpoint TX queue for transmission as long + * as we have enough credits. + */ + INIT_LIST_HEAD(&txq); + + if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) && + (WMI_CONTROL_SVC != endpoint->svc_id)) + ac = target->dev->ar->ep2ac_map[endpoint->eid]; + + while (true) { + if (list_empty(&endpoint->txq)) + break; + + ath6kl_htc_tx_pkts_get(target, endpoint, &txq); + + if (list_empty(&txq)) + break; + + spin_unlock_bh(&target->tx_lock); + + bundle_sent = 0; + n_pkts_bundle = 0; + + while (true) { + /* try to send a bundle on each pass */ + if ((target->tx_bndl_mask) && + (get_queue_depth(&txq) >= + HTC_MIN_HTC_MSGS_TO_BUNDLE)) { + int temp1 = 0, temp2 = 0; + + /* check if bundling is enabled for an AC */ + if (target->tx_bndl_mask & (1 << ac)) { + ath6kl_htc_tx_bundle(endpoint, &txq, + &temp1, &temp2); + bundle_sent += temp1; + n_pkts_bundle += temp2; + } + } + + if (list_empty(&txq)) + break; + + packet = list_first_entry(&txq, struct htc_packet, + list); + list_del(&packet->list); + + ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags, + 0, packet->info.tx.seqno); + status = ath6kl_htc_tx_issue(target, packet); + + if (status) { + packet->status = status; + packet->completion(packet->context, packet); + } + } + + spin_lock_bh(&target->tx_lock); + + endpoint->ep_st.tx_bundles += bundle_sent; + endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle; + + /* + * if an AC has bundling disabled and no tx bundling + * has occured continously for a certain number of TX, + * enable tx bundling for this AC + */ + if (!bundle_sent) { + if (!(target->tx_bndl_mask & (1 << ac)) && + (ac < WMM_NUM_AC)) { + if (++target->ac_tx_count[ac] >= + TX_RESUME_BUNDLE_THRESHOLD) { + target->ac_tx_count[ac] = 0; + target->tx_bndl_mask |= (1 << ac); + } + } + } else { + /* tx bundling will reset the counter */ + if (ac < WMM_NUM_AC) + target->ac_tx_count[ac] = 0; + } + } + + endpoint->tx_proc_cnt = 0; + spin_unlock_bh(&target->tx_lock); +} + +static bool ath6kl_htc_tx_try(struct htc_target *target, + struct htc_endpoint *endpoint, + struct htc_packet *tx_pkt) +{ + struct htc_ep_callbacks ep_cb; + int txq_depth; + bool overflow = false; + + ep_cb = endpoint->ep_cb; + + spin_lock_bh(&target->tx_lock); + txq_depth = get_queue_depth(&endpoint->txq); + spin_unlock_bh(&target->tx_lock); + + if (txq_depth >= endpoint->max_txq_depth) + overflow = true; + + if (overflow) + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx overflow ep %d depth %d max %d\n", + endpoint->eid, txq_depth, + endpoint->max_txq_depth); + + if (overflow && ep_cb.tx_full) { + if (ep_cb.tx_full(endpoint->target, tx_pkt) == + HTC_SEND_FULL_DROP) { + endpoint->ep_st.tx_dropped += 1; + return false; + } + } + + spin_lock_bh(&target->tx_lock); + list_add_tail(&tx_pkt->list, &endpoint->txq); + spin_unlock_bh(&target->tx_lock); + + ath6kl_htc_tx_from_queue(target, endpoint); + + return true; +} + +static void htc_chk_ep_txq(struct htc_target *target) +{ + struct htc_endpoint *endpoint; + struct htc_endpoint_credit_dist *cred_dist; + + /* + * Run through the credit distribution list to see if there are + * packets queued. NOTE: no locks need to be taken since the + * distribution list is not dynamic (cannot be re-ordered) and we + * are not modifying any state. + */ + list_for_each_entry(cred_dist, &target->cred_dist_list, list) { + endpoint = cred_dist->htc_ep; + + spin_lock_bh(&target->tx_lock); + if (!list_empty(&endpoint->txq)) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc creds ep %d credits %d pkts %d\n", + cred_dist->endpoint, + endpoint->cred_dist.credits, + get_queue_depth(&endpoint->txq)); + spin_unlock_bh(&target->tx_lock); + /* + * Try to start the stalled queue, this list is + * ordered by priority. If there are credits + * available the highest priority queue will get a + * chance to reclaim credits from lower priority + * ones. + */ + ath6kl_htc_tx_from_queue(target, endpoint); + spin_lock_bh(&target->tx_lock); + } + spin_unlock_bh(&target->tx_lock); + } +} + +static int htc_setup_tx_complete(struct htc_target *target) +{ + struct htc_packet *send_pkt = NULL; + int status; + + send_pkt = htc_get_control_buf(target, true); + + if (!send_pkt) + return -ENOMEM; + + if (target->htc_tgt_ver >= HTC_VERSION_2P1) { + struct htc_setup_comp_ext_msg *setup_comp_ext; + u32 flags = 0; + + setup_comp_ext = + (struct htc_setup_comp_ext_msg *)send_pkt->buf; + memset(setup_comp_ext, 0, sizeof(*setup_comp_ext)); + setup_comp_ext->msg_id = + cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID); + + if (target->msg_per_bndl_max > 0) { + /* Indicate HTC bundling to the target */ + flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN; + setup_comp_ext->msg_per_rxbndl = + target->msg_per_bndl_max; + } + + memcpy(&setup_comp_ext->flags, &flags, + sizeof(setup_comp_ext->flags)); + set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext, + sizeof(struct htc_setup_comp_ext_msg), + ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); + + } else { + struct htc_setup_comp_msg *setup_comp; + setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf; + memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg)); + setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID); + set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp, + sizeof(struct htc_setup_comp_msg), + ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); + } + + /* we want synchronous operation */ + send_pkt->completion = NULL; + ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0); + status = ath6kl_htc_tx_issue(target, send_pkt); + htc_reclaim_txctrl_buf(target, send_pkt); + + return status; +} + +static void ath6kl_htc_set_credit_dist(struct htc_target *target, + struct ath6kl_htc_credit_info *credit_info, + u16 srvc_pri_order[], int list_len) +{ + struct htc_endpoint *endpoint; + int i, ep; + + target->credit_info = credit_info; + + list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list, + &target->cred_dist_list); + + for (i = 0; i < list_len; i++) { + for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) { + endpoint = &target->endpoint[ep]; + if (endpoint->svc_id == srvc_pri_order[i]) { + list_add_tail(&endpoint->cred_dist.list, + &target->cred_dist_list); + break; + } + } + if (ep >= ENDPOINT_MAX) { + WARN_ON(1); + return; + } + } +} + +static int ath6kl_htc_mbox_tx(struct htc_target *target, + struct htc_packet *packet) +{ + struct htc_endpoint *endpoint; + struct list_head queue; + + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx ep id %d buf 0x%p len %d\n", + packet->endpoint, packet->buf, packet->act_len); + + if (packet->endpoint >= ENDPOINT_MAX) { + WARN_ON(1); + return -EINVAL; + } + + endpoint = &target->endpoint[packet->endpoint]; + + if (!ath6kl_htc_tx_try(target, endpoint, packet)) { + packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ? + -ECANCELED : -ENOSPC; + INIT_LIST_HEAD(&queue); + list_add(&packet->list, &queue); + htc_tx_complete(endpoint, &queue); + } + + return 0; +} + +/* flush endpoint TX queue */ +static void ath6kl_htc_mbox_flush_txep(struct htc_target *target, + enum htc_endpoint_id eid, u16 tag) +{ + struct htc_packet *packet, *tmp_pkt; + struct list_head discard_q, container; + struct htc_endpoint *endpoint = &target->endpoint[eid]; + + if (!endpoint->svc_id) { + WARN_ON(1); + return; + } + + /* initialize the discard queue */ + INIT_LIST_HEAD(&discard_q); + + spin_lock_bh(&target->tx_lock); + + list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) { + if ((tag == HTC_TX_PACKET_TAG_ALL) || + (tag == packet->info.tx.tag)) + list_move_tail(&packet->list, &discard_q); + } + + spin_unlock_bh(&target->tx_lock); + + list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) { + packet->status = -ECANCELED; + list_del(&packet->list); + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n", + packet, packet->act_len, + packet->endpoint, packet->info.tx.tag); + + INIT_LIST_HEAD(&container); + list_add_tail(&packet->list, &container); + htc_tx_complete(endpoint, &container); + } +} + +static void ath6kl_htc_flush_txep_all(struct htc_target *target) +{ + struct htc_endpoint *endpoint; + int i; + + dump_cred_dist_stats(target); + + for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { + endpoint = &target->endpoint[i]; + if (endpoint->svc_id == 0) + /* not in use.. */ + continue; + ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL); + } +} + +static void ath6kl_htc_mbox_activity_changed(struct htc_target *target, + enum htc_endpoint_id eid, + bool active) +{ + struct htc_endpoint *endpoint = &target->endpoint[eid]; + bool dist = false; + + if (endpoint->svc_id == 0) { + WARN_ON(1); + return; + } + + spin_lock_bh(&target->tx_lock); + + if (active) { + if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) { + endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE; + dist = true; + } + } else { + if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) { + endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE; + dist = true; + } + } + + if (dist) { + endpoint->cred_dist.txq_depth = + get_queue_depth(&endpoint->txq); + + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx activity ctxt 0x%p dist 0x%p\n", + target->credit_info, &target->cred_dist_list); + + ath6kl_credit_distribute(target->credit_info, + &target->cred_dist_list, + HTC_CREDIT_DIST_ACTIVITY_CHANGE); + } + + spin_unlock_bh(&target->tx_lock); + + if (dist && !active) + htc_chk_ep_txq(target); +} + +/* HTC Rx */ + +static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint, + int n_look_ahds) +{ + endpoint->ep_st.rx_pkts++; + if (n_look_ahds == 1) + endpoint->ep_st.rx_lkahds++; + else if (n_look_ahds > 1) + endpoint->ep_st.rx_bundle_lkahd++; +} + +static inline bool htc_valid_rx_frame_len(struct htc_target *target, + enum htc_endpoint_id eid, int len) +{ + return (eid == target->dev->ar->ctrl_ep) ? + len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE; +} + +static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet) +{ + struct list_head queue; + + INIT_LIST_HEAD(&queue); + list_add_tail(&packet->list, &queue); + return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue); +} + +static void htc_reclaim_rxbuf(struct htc_target *target, + struct htc_packet *packet, + struct htc_endpoint *ep) +{ + if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) { + htc_rxpkt_reset(packet); + packet->status = -ECANCELED; + ep->ep_cb.rx(ep->target, packet); + } else { + htc_rxpkt_reset(packet); + htc_add_rxbuf((void *)(target), packet); + } +} + +static void reclaim_rx_ctrl_buf(struct htc_target *target, + struct htc_packet *packet) +{ + spin_lock_bh(&target->htc_lock); + list_add_tail(&packet->list, &target->free_ctrl_rxbuf); + spin_unlock_bh(&target->htc_lock); +} + +static int ath6kl_htc_rx_packet(struct htc_target *target, + struct htc_packet *packet, + u32 rx_len) +{ + struct ath6kl_device *dev = target->dev; + u32 padded_len; + int status; + + padded_len = CALC_TXRX_PADDED_LEN(target, rx_len); + + if (padded_len > packet->buf_len) { + ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n", + padded_len, rx_len, packet->buf_len); + return -ENOMEM; + } + + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx 0x%p hdr 0x%x len %d mbox 0x%x\n", + packet, packet->info.rx.exp_hdr, + padded_len, dev->ar->mbox_info.htc_addr); + + status = hif_read_write_sync(dev->ar, + dev->ar->mbox_info.htc_addr, + packet->buf, padded_len, + HIF_RD_SYNC_BLOCK_FIX); + + packet->status = status; + + return status; +} + +/* + * optimization for recv packets, we can indicate a + * "hint" that there are more single-packets to fetch + * on this endpoint. + */ +static void ath6kl_htc_rx_set_indicate(u32 lk_ahd, + struct htc_endpoint *endpoint, + struct htc_packet *packet) +{ + struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd; + + if (htc_hdr->eid == packet->endpoint) { + if (!list_empty(&endpoint->rx_bufq)) + packet->info.rx.indicat_flags |= + HTC_RX_FLAGS_INDICATE_MORE_PKTS; + } +} + +static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint) +{ + struct htc_ep_callbacks ep_cb = endpoint->ep_cb; + + if (ep_cb.rx_refill_thresh > 0) { + spin_lock_bh(&endpoint->target->rx_lock); + if (get_queue_depth(&endpoint->rx_bufq) + < ep_cb.rx_refill_thresh) { + spin_unlock_bh(&endpoint->target->rx_lock); + ep_cb.rx_refill(endpoint->target, endpoint->eid); + return; + } + spin_unlock_bh(&endpoint->target->rx_lock); + } +} + +/* This function is called with rx_lock held */ +static int ath6kl_htc_rx_setup(struct htc_target *target, + struct htc_endpoint *ep, + u32 *lk_ahds, struct list_head *queue, int n_msg) +{ + struct htc_packet *packet; + /* FIXME: type of lk_ahds can't be right */ + struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds; + struct htc_ep_callbacks ep_cb; + int status = 0, j, full_len; + bool no_recycle; + + full_len = CALC_TXRX_PADDED_LEN(target, + le16_to_cpu(htc_hdr->payld_len) + + sizeof(*htc_hdr)); + + if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) { + ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n", + htc_hdr->eid, htc_hdr->flags, + le16_to_cpu(htc_hdr->payld_len)); + return -EINVAL; + } + + ep_cb = ep->ep_cb; + for (j = 0; j < n_msg; j++) { + /* + * Reset flag, any packets allocated using the + * rx_alloc() API cannot be recycled on + * cleanup,they must be explicitly returned. + */ + no_recycle = false; + + if (ep_cb.rx_allocthresh && + (full_len > ep_cb.rx_alloc_thresh)) { + ep->ep_st.rx_alloc_thresh_hit += 1; + ep->ep_st.rxalloc_thresh_byte += + le16_to_cpu(htc_hdr->payld_len); + + spin_unlock_bh(&target->rx_lock); + no_recycle = true; + + packet = ep_cb.rx_allocthresh(ep->target, ep->eid, + full_len); + spin_lock_bh(&target->rx_lock); + } else { + /* refill handler is being used */ + if (list_empty(&ep->rx_bufq)) { + if (ep_cb.rx_refill) { + spin_unlock_bh(&target->rx_lock); + ep_cb.rx_refill(ep->target, ep->eid); + spin_lock_bh(&target->rx_lock); + } + } + + if (list_empty(&ep->rx_bufq)) { + packet = NULL; + } else { + packet = list_first_entry(&ep->rx_bufq, + struct htc_packet, list); + list_del(&packet->list); + } + } + + if (!packet) { + target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS; + target->ep_waiting = ep->eid; + return -ENOSPC; + } + + /* clear flags */ + packet->info.rx.rx_flags = 0; + packet->info.rx.indicat_flags = 0; + packet->status = 0; + + if (no_recycle) + /* + * flag that these packets cannot be + * recycled, they have to be returned to + * the user + */ + packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE; + + /* Caller needs to free this upon any failure */ + list_add_tail(&packet->list, queue); + + if (target->htc_flags & HTC_OP_STATE_STOPPING) { + status = -ECANCELED; + break; + } + + if (j) { + packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR; + packet->info.rx.exp_hdr = 0xFFFFFFFF; + } else + /* set expected look ahead */ + packet->info.rx.exp_hdr = *lk_ahds; + + packet->act_len = le16_to_cpu(htc_hdr->payld_len) + + HTC_HDR_LENGTH; + } + + return status; +} + +static int ath6kl_htc_rx_alloc(struct htc_target *target, + u32 lk_ahds[], int msg, + struct htc_endpoint *endpoint, + struct list_head *queue) +{ + int status = 0; + struct htc_packet *packet, *tmp_pkt; + struct htc_frame_hdr *htc_hdr; + int i, n_msg; + + spin_lock_bh(&target->rx_lock); + + for (i = 0; i < msg; i++) { + htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i]; + + if (htc_hdr->eid >= ENDPOINT_MAX) { + ath6kl_err("invalid ep in look-ahead: %d\n", + htc_hdr->eid); + status = -ENOMEM; + break; + } + + if (htc_hdr->eid != endpoint->eid) { + ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n", + htc_hdr->eid, endpoint->eid, i); + status = -ENOMEM; + break; + } + + if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) { + ath6kl_err("payload len %d exceeds max htc : %d !\n", + htc_hdr->payld_len, + (u32) HTC_MAX_PAYLOAD_LENGTH); + status = -ENOMEM; + break; + } + + if (endpoint->svc_id == 0) { + ath6kl_err("ep %d is not connected !\n", htc_hdr->eid); + status = -ENOMEM; + break; + } + + if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) { + /* + * HTC header indicates that every packet to follow + * has the same padded length so that it can be + * optimally fetched as a full bundle. + */ + n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >> + HTC_FLG_RX_BNDL_CNT_S; + + /* the count doesn't include the starter frame */ + n_msg++; + if (n_msg > target->msg_per_bndl_max) { + status = -ENOMEM; + break; + } + + endpoint->ep_st.rx_bundle_from_hdr += 1; + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx bundle pkts %d\n", + n_msg); + } else + /* HTC header only indicates 1 message to fetch */ + n_msg = 1; + + /* Setup packet buffers for each message */ + status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i], + queue, n_msg); + + /* + * This is due to unavailability of buffers to rx entire data. + * Return no error so that free buffers from queue can be used + * to receive partial data. + */ + if (status == -ENOSPC) { + spin_unlock_bh(&target->rx_lock); + return 0; + } + + if (status) + break; + } + + spin_unlock_bh(&target->rx_lock); + + if (status) { + list_for_each_entry_safe(packet, tmp_pkt, queue, list) { + list_del(&packet->list); + htc_reclaim_rxbuf(target, packet, + &target->endpoint[packet->endpoint]); + } + } + + return status; +} + +static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets) +{ + if (packets->endpoint != ENDPOINT_0) { + WARN_ON(1); + return; + } + + if (packets->status == -ECANCELED) { + reclaim_rx_ctrl_buf(context, packets); + return; + } + + if (packets->act_len > 0) { + ath6kl_err("htc_ctrl_rx, got message with len:%zu\n", + packets->act_len + HTC_HDR_LENGTH); + + ath6kl_dbg_dump(ATH6KL_DBG_HTC, + "htc rx unexpected endpoint 0 message", "", + packets->buf - HTC_HDR_LENGTH, + packets->act_len + HTC_HDR_LENGTH); + } + + htc_reclaim_rxbuf(context, packets, &context->endpoint[0]); +} + +static void htc_proc_cred_rpt(struct htc_target *target, + struct htc_credit_report *rpt, + int n_entries, + enum htc_endpoint_id from_ep) +{ + struct htc_endpoint *endpoint; + int tot_credits = 0, i; + bool dist = false; + + spin_lock_bh(&target->tx_lock); + + for (i = 0; i < n_entries; i++, rpt++) { + if (rpt->eid >= ENDPOINT_MAX) { + WARN_ON(1); + spin_unlock_bh(&target->tx_lock); + return; + } + + endpoint = &target->endpoint[rpt->eid]; + + ath6kl_dbg(ATH6KL_DBG_CREDIT, + "credit report ep %d credits %d\n", + rpt->eid, rpt->credits); + + endpoint->ep_st.tx_cred_rpt += 1; + endpoint->ep_st.cred_retnd += rpt->credits; + + if (from_ep == rpt->eid) { + /* + * This credit report arrived on the same endpoint + * indicating it arrived in an RX packet. + */ + endpoint->ep_st.cred_from_rx += rpt->credits; + endpoint->ep_st.cred_rpt_from_rx += 1; + } else if (from_ep == ENDPOINT_0) { + /* credit arrived on endpoint 0 as a NULL message */ + endpoint->ep_st.cred_from_ep0 += rpt->credits; + endpoint->ep_st.cred_rpt_ep0 += 1; + } else { + endpoint->ep_st.cred_from_other += rpt->credits; + endpoint->ep_st.cred_rpt_from_other += 1; + } + + if (rpt->eid == ENDPOINT_0) + /* always give endpoint 0 credits back */ + endpoint->cred_dist.credits += rpt->credits; + else { + endpoint->cred_dist.cred_to_dist += rpt->credits; + dist = true; + } + + /* + * Refresh tx depth for distribution function that will + * recover these credits NOTE: this is only valid when + * there are credits to recover! + */ + endpoint->cred_dist.txq_depth = + get_queue_depth(&endpoint->txq); + + tot_credits += rpt->credits; + } + + if (dist) { + /* + * This was a credit return based on a completed send + * operations note, this is done with the lock held + */ + ath6kl_credit_distribute(target->credit_info, + &target->cred_dist_list, + HTC_CREDIT_DIST_SEND_COMPLETE); + } + + spin_unlock_bh(&target->tx_lock); + + if (tot_credits) + htc_chk_ep_txq(target); +} + +static int htc_parse_trailer(struct htc_target *target, + struct htc_record_hdr *record, + u8 *record_buf, u32 *next_lk_ahds, + enum htc_endpoint_id endpoint, + int *n_lk_ahds) +{ + struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt; + struct htc_lookahead_report *lk_ahd; + int len; + + switch (record->rec_id) { + case HTC_RECORD_CREDITS: + len = record->len / sizeof(struct htc_credit_report); + if (!len) { + WARN_ON(1); + return -EINVAL; + } + + htc_proc_cred_rpt(target, + (struct htc_credit_report *) record_buf, + len, endpoint); + break; + case HTC_RECORD_LOOKAHEAD: + len = record->len / sizeof(*lk_ahd); + if (!len) { + WARN_ON(1); + return -EINVAL; + } + + lk_ahd = (struct htc_lookahead_report *) record_buf; + if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) && + next_lk_ahds) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n", + lk_ahd->pre_valid, lk_ahd->post_valid); + + /* look ahead bytes are valid, copy them over */ + memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4); + + ath6kl_dbg_dump(ATH6KL_DBG_HTC, + "htc rx next look ahead", + "", next_lk_ahds, 4); + + *n_lk_ahds = 1; + } + break; + case HTC_RECORD_LOOKAHEAD_BUNDLE: + len = record->len / sizeof(*bundle_lkahd_rpt); + if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) { + WARN_ON(1); + return -EINVAL; + } + + if (next_lk_ahds) { + int i; + + bundle_lkahd_rpt = + (struct htc_bundle_lkahd_rpt *) record_buf; + + ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd", + "", record_buf, record->len); + + for (i = 0; i < len; i++) { + memcpy((u8 *)&next_lk_ahds[i], + bundle_lkahd_rpt->lk_ahd, 4); + bundle_lkahd_rpt++; + } + + *n_lk_ahds = i; + } + break; + default: + ath6kl_err("unhandled record: id:%d len:%d\n", + record->rec_id, record->len); + break; + } + + return 0; +} + +static int htc_proc_trailer(struct htc_target *target, + u8 *buf, int len, u32 *next_lk_ahds, + int *n_lk_ahds, enum htc_endpoint_id endpoint) +{ + struct htc_record_hdr *record; + int orig_len; + int status; + u8 *record_buf; + u8 *orig_buf; + + ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len); + ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len); + + orig_buf = buf; + orig_len = len; + status = 0; + + while (len > 0) { + if (len < sizeof(struct htc_record_hdr)) { + status = -ENOMEM; + break; + } + /* these are byte aligned structs */ + record = (struct htc_record_hdr *) buf; + len -= sizeof(struct htc_record_hdr); + buf += sizeof(struct htc_record_hdr); + + if (record->len > len) { + ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n", + record->len, record->rec_id, len); + status = -ENOMEM; + break; + } + record_buf = buf; + + status = htc_parse_trailer(target, record, record_buf, + next_lk_ahds, endpoint, n_lk_ahds); + + if (status) + break; + + /* advance buffer past this record for next time around */ + buf += record->len; + len -= record->len; + } + + if (status) + ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer", + "", orig_buf, orig_len); + + return status; +} + +static int ath6kl_htc_rx_process_hdr(struct htc_target *target, + struct htc_packet *packet, + u32 *next_lkahds, int *n_lkahds) +{ + int status = 0; + u16 payload_len; + u32 lk_ahd; + struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf; + + if (n_lkahds != NULL) + *n_lkahds = 0; + + /* + * NOTE: we cannot assume the alignment of buf, so we use the safe + * macros to retrieve 16 bit fields. + */ + payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len)); + + memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd)); + + if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) { + /* + * Refresh the expected header and the actual length as it + * was unknown when this packet was grabbed as part of the + * bundle. + */ + packet->info.rx.exp_hdr = lk_ahd; + packet->act_len = payload_len + HTC_HDR_LENGTH; + + /* validate the actual header that was refreshed */ + if (packet->act_len > packet->buf_len) { + ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n", + payload_len, lk_ahd); + /* + * Limit this to max buffer just to print out some + * of the buffer. + */ + packet->act_len = min(packet->act_len, packet->buf_len); + status = -ENOMEM; + goto fail_rx; + } + + if (packet->endpoint != htc_hdr->eid) { + ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n", + htc_hdr->eid, packet->endpoint); + status = -ENOMEM; + goto fail_rx; + } + } + + if (lk_ahd != packet->info.rx.exp_hdr) { + ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n", + __func__, packet, packet->info.rx.rx_flags); + ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd", + "", &packet->info.rx.exp_hdr, 4); + ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header", + "", (u8 *)&lk_ahd, sizeof(lk_ahd)); + status = -ENOMEM; + goto fail_rx; + } + + if (htc_hdr->flags & HTC_FLG_RX_TRAILER) { + if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) || + htc_hdr->ctrl[0] > payload_len) { + ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n", + __func__, payload_len, htc_hdr->ctrl[0]); + status = -ENOMEM; + goto fail_rx; + } + + if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) { + next_lkahds = NULL; + n_lkahds = NULL; + } + + status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH + + payload_len - htc_hdr->ctrl[0], + htc_hdr->ctrl[0], next_lkahds, + n_lkahds, packet->endpoint); + + if (status) + goto fail_rx; + + packet->act_len -= htc_hdr->ctrl[0]; + } + + packet->buf += HTC_HDR_LENGTH; + packet->act_len -= HTC_HDR_LENGTH; + +fail_rx: + if (status) + ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet", + "", packet->buf, packet->act_len); + + return status; +} + +static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint, + struct htc_packet *packet) +{ + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx complete ep %d packet 0x%p\n", + endpoint->eid, packet); + + endpoint->ep_cb.rx(endpoint->target, packet); +} + +static int ath6kl_htc_rx_bundle(struct htc_target *target, + struct list_head *rxq, + struct list_head *sync_compq, + int *n_pkt_fetched, bool part_bundle) +{ + struct hif_scatter_req *scat_req; + struct htc_packet *packet; + int rem_space = target->max_rx_bndl_sz; + int n_scat_pkt, status = 0, i, len; + + n_scat_pkt = get_queue_depth(rxq); + n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max); + + if ((get_queue_depth(rxq) - n_scat_pkt) > 0) { + /* + * We were forced to split this bundle receive operation + * all packets in this partial bundle must have their + * lookaheads ignored. + */ + part_bundle = true; + + /* + * This would only happen if the target ignored our max + * bundle limit. + */ + ath6kl_warn("%s(): partial bundle detected num:%d , %d\n", + __func__, get_queue_depth(rxq), n_scat_pkt); + } + + len = 0; + + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx bundle depth %d pkts %d\n", + get_queue_depth(rxq), n_scat_pkt); + + scat_req = hif_scatter_req_get(target->dev->ar); + + if (scat_req == NULL) + goto fail_rx_pkt; + + for (i = 0; i < n_scat_pkt; i++) { + int pad_len; + + packet = list_first_entry(rxq, struct htc_packet, list); + list_del(&packet->list); + + pad_len = CALC_TXRX_PADDED_LEN(target, + packet->act_len); + + if ((rem_space - pad_len) < 0) { + list_add(&packet->list, rxq); + break; + } + + rem_space -= pad_len; + + if (part_bundle || (i < (n_scat_pkt - 1))) + /* + * Packet 0..n-1 cannot be checked for look-aheads + * since we are fetching a bundle the last packet + * however can have it's lookahead used + */ + packet->info.rx.rx_flags |= + HTC_RX_PKT_IGNORE_LOOKAHEAD; + + /* NOTE: 1 HTC packet per scatter entry */ + scat_req->scat_list[i].buf = packet->buf; + scat_req->scat_list[i].len = pad_len; + + packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE; + + list_add_tail(&packet->list, sync_compq); + + WARN_ON(!scat_req->scat_list[i].len); + len += scat_req->scat_list[i].len; + } + + scat_req->len = len; + scat_req->scat_entries = i; + + status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true); + + if (!status) + *n_pkt_fetched = i; + + /* free scatter request */ + hif_scatter_req_add(target->dev->ar, scat_req); + +fail_rx_pkt: + + return status; +} + +static int ath6kl_htc_rx_process_packets(struct htc_target *target, + struct list_head *comp_pktq, + u32 lk_ahds[], + int *n_lk_ahd) +{ + struct htc_packet *packet, *tmp_pkt; + struct htc_endpoint *ep; + int status = 0; + + list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) { + ep = &target->endpoint[packet->endpoint]; + + trace_ath6kl_htc_rx(packet->status, packet->endpoint, + packet->buf, packet->act_len); + + /* process header for each of the recv packet */ + status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds, + n_lk_ahd); + if (status) + return status; + + list_del(&packet->list); + + if (list_empty(comp_pktq)) { + /* + * Last packet's more packet flag is set + * based on the lookahead. + */ + if (*n_lk_ahd > 0) + ath6kl_htc_rx_set_indicate(lk_ahds[0], + ep, packet); + } else + /* + * Packets in a bundle automatically have + * this flag set. + */ + packet->info.rx.indicat_flags |= + HTC_RX_FLAGS_INDICATE_MORE_PKTS; + + ath6kl_htc_rx_update_stats(ep, *n_lk_ahd); + + if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE) + ep->ep_st.rx_bundl += 1; + + ath6kl_htc_rx_complete(ep, packet); + } + + return status; +} + +static int ath6kl_htc_rx_fetch(struct htc_target *target, + struct list_head *rx_pktq, + struct list_head *comp_pktq) +{ + int fetched_pkts; + bool part_bundle = false; + int status = 0; + struct list_head tmp_rxq; + struct htc_packet *packet, *tmp_pkt; + + /* now go fetch the list of HTC packets */ + while (!list_empty(rx_pktq)) { + fetched_pkts = 0; + + INIT_LIST_HEAD(&tmp_rxq); + + if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) { + /* + * There are enough packets to attempt a + * bundle transfer and recv bundling is + * allowed. + */ + status = ath6kl_htc_rx_bundle(target, rx_pktq, + &tmp_rxq, + &fetched_pkts, + part_bundle); + if (status) + goto fail_rx; + + if (!list_empty(rx_pktq)) + part_bundle = true; + + list_splice_tail_init(&tmp_rxq, comp_pktq); + } + + if (!fetched_pkts) { + packet = list_first_entry(rx_pktq, struct htc_packet, + list); + + /* fully synchronous */ + packet->completion = NULL; + + if (!list_is_singular(rx_pktq)) + /* + * look_aheads in all packet + * except the last one in the + * bundle must be ignored + */ + packet->info.rx.rx_flags |= + HTC_RX_PKT_IGNORE_LOOKAHEAD; + + /* go fetch the packet */ + status = ath6kl_htc_rx_packet(target, packet, + packet->act_len); + + list_move_tail(&packet->list, &tmp_rxq); + + if (status) + goto fail_rx; + + list_splice_tail_init(&tmp_rxq, comp_pktq); + } + } + + return 0; + +fail_rx: + + /* + * Cleanup any packets we allocated but didn't use to + * actually fetch any packets. + */ + + list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) { + list_del(&packet->list); + htc_reclaim_rxbuf(target, packet, + &target->endpoint[packet->endpoint]); + } + + list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) { + list_del(&packet->list); + htc_reclaim_rxbuf(target, packet, + &target->endpoint[packet->endpoint]); + } + + return status; +} + +int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, + u32 msg_look_ahead, int *num_pkts) +{ + struct htc_packet *packets, *tmp_pkt; + struct htc_endpoint *endpoint; + struct list_head rx_pktq, comp_pktq; + int status = 0; + u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE]; + int num_look_ahead = 1; + enum htc_endpoint_id id; + int n_fetched = 0; + + INIT_LIST_HEAD(&comp_pktq); + *num_pkts = 0; + + /* + * On first entry copy the look_aheads into our temp array for + * processing + */ + look_aheads[0] = msg_look_ahead; + + while (true) { + /* + * First lookahead sets the expected endpoint IDs for all + * packets in a bundle. + */ + id = ((struct htc_frame_hdr *)&look_aheads[0])->eid; + endpoint = &target->endpoint[id]; + + if (id >= ENDPOINT_MAX) { + ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n", + id); + status = -ENOMEM; + break; + } + + INIT_LIST_HEAD(&rx_pktq); + INIT_LIST_HEAD(&comp_pktq); + + /* + * Try to allocate as many HTC RX packets indicated by the + * look_aheads. + */ + status = ath6kl_htc_rx_alloc(target, look_aheads, + num_look_ahead, endpoint, + &rx_pktq); + if (status) + break; + + if (get_queue_depth(&rx_pktq) >= 2) + /* + * A recv bundle was detected, force IRQ status + * re-check again + */ + target->chk_irq_status_cnt = 1; + + n_fetched += get_queue_depth(&rx_pktq); + + num_look_ahead = 0; + + status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq); + + if (!status) + ath6kl_htc_rx_chk_water_mark(endpoint); + + /* Process fetched packets */ + status = ath6kl_htc_rx_process_packets(target, &comp_pktq, + look_aheads, + &num_look_ahead); + + if (!num_look_ahead || status) + break; + + /* + * For SYNCH processing, if we get here, we are running + * through the loop again due to a detected lookahead. Set + * flag that we should re-check IRQ status registers again + * before leaving IRQ processing, this can net better + * performance in high throughput situations. + */ + target->chk_irq_status_cnt = 1; + } + + if (status) { + if (status != -ECANCELED) + ath6kl_err("failed to get pending recv messages: %d\n", + status); + + /* cleanup any packets in sync completion queue */ + list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) { + list_del(&packets->list); + htc_reclaim_rxbuf(target, packets, + &target->endpoint[packets->endpoint]); + } + + if (target->htc_flags & HTC_OP_STATE_STOPPING) { + ath6kl_warn("host is going to stop blocking receiver for htc_stop\n"); + ath6kl_hif_rx_control(target->dev, false); + } + } + + /* + * Before leaving, check to see if host ran out of buffers and + * needs to stop the receiver. + */ + if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { + ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n"); + ath6kl_hif_rx_control(target->dev, false); + } + *num_pkts = n_fetched; + + return status; +} + +/* + * Synchronously wait for a control message from the target, + * This function is used at initialization time ONLY. At init messages + * on ENDPOINT 0 are expected. + */ +static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target) +{ + struct htc_packet *packet = NULL; + struct htc_frame_look_ahead look_ahead; + + if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead.word, + HTC_TARGET_RESPONSE_TIMEOUT)) + return NULL; + + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx wait ctrl look_ahead 0x%X\n", look_ahead.word); + + if (look_ahead.eid != ENDPOINT_0) + return NULL; + + packet = htc_get_control_buf(target, false); + + if (!packet) + return NULL; + + packet->info.rx.rx_flags = 0; + packet->info.rx.exp_hdr = look_ahead.word; + packet->act_len = le16_to_cpu(look_ahead.payld_len) + HTC_HDR_LENGTH; + + if (packet->act_len > packet->buf_len) + goto fail_ctrl_rx; + + /* we want synchronous operation */ + packet->completion = NULL; + + /* get the message from the device, this will block */ + if (ath6kl_htc_rx_packet(target, packet, packet->act_len)) + goto fail_ctrl_rx; + + trace_ath6kl_htc_rx(packet->status, packet->endpoint, + packet->buf, packet->act_len); + + /* process receive header */ + packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL); + + if (packet->status) { + ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n", + packet->status); + goto fail_ctrl_rx; + } + + return packet; + +fail_ctrl_rx: + if (packet != NULL) { + htc_rxpkt_reset(packet); + reclaim_rx_ctrl_buf(target, packet); + } + + return NULL; +} + +static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target, + struct list_head *pkt_queue) +{ + struct htc_endpoint *endpoint; + struct htc_packet *first_pkt; + bool rx_unblock = false; + int status = 0, depth; + + if (list_empty(pkt_queue)) + return -ENOMEM; + + first_pkt = list_first_entry(pkt_queue, struct htc_packet, list); + + if (first_pkt->endpoint >= ENDPOINT_MAX) + return status; + + depth = get_queue_depth(pkt_queue); + + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx add multiple ep id %d cnt %d len %d\n", + first_pkt->endpoint, depth, first_pkt->buf_len); + + endpoint = &target->endpoint[first_pkt->endpoint]; + + if (target->htc_flags & HTC_OP_STATE_STOPPING) { + struct htc_packet *packet, *tmp_pkt; + + /* walk through queue and mark each one canceled */ + list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) { + packet->status = -ECANCELED; + list_del(&packet->list); + ath6kl_htc_rx_complete(endpoint, packet); + } + + return status; + } + + spin_lock_bh(&target->rx_lock); + + list_splice_tail_init(pkt_queue, &endpoint->rx_bufq); + + /* check if we are blocked waiting for a new buffer */ + if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { + if (target->ep_waiting == first_pkt->endpoint) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx blocked on ep %d, unblocking\n", + target->ep_waiting); + target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS; + target->ep_waiting = ENDPOINT_MAX; + rx_unblock = true; + } + } + + spin_unlock_bh(&target->rx_lock); + + if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING)) + /* TODO : implement a buffer threshold count? */ + ath6kl_hif_rx_control(target->dev, true); + + return status; +} + +static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target) +{ + struct htc_endpoint *endpoint; + struct htc_packet *packet, *tmp_pkt; + int i; + + for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { + endpoint = &target->endpoint[i]; + if (!endpoint->svc_id) + /* not in use.. */ + continue; + + spin_lock_bh(&target->rx_lock); + list_for_each_entry_safe(packet, tmp_pkt, + &endpoint->rx_bufq, list) { + list_del(&packet->list); + spin_unlock_bh(&target->rx_lock); + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx flush pkt 0x%p len %d ep %d\n", + packet, packet->buf_len, + packet->endpoint); + /* + * packets in rx_bufq of endpoint 0 have originally + * been queued from target->free_ctrl_rxbuf where + * packet and packet->buf_start are allocated + * separately using kmalloc(). For other endpoint + * rx_bufq, it is allocated as skb where packet is + * skb->head. Take care of this difference while freeing + * the memory. + */ + if (packet->endpoint == ENDPOINT_0) { + kfree(packet->buf_start); + kfree(packet); + } else { + dev_kfree_skb(packet->pkt_cntxt); + } + spin_lock_bh(&target->rx_lock); + } + spin_unlock_bh(&target->rx_lock); + } +} + +static int ath6kl_htc_mbox_conn_service(struct htc_target *target, + struct htc_service_connect_req *conn_req, + struct htc_service_connect_resp *conn_resp) +{ + struct htc_packet *rx_pkt = NULL; + struct htc_packet *tx_pkt = NULL; + struct htc_conn_service_resp *resp_msg; + struct htc_conn_service_msg *conn_msg; + struct htc_endpoint *endpoint; + enum htc_endpoint_id assigned_ep = ENDPOINT_MAX; + unsigned int max_msg_sz = 0; + int status = 0; + u16 msg_id; + + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc connect service target 0x%p service id 0x%x\n", + target, conn_req->svc_id); + + if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) { + /* special case for pseudo control service */ + assigned_ep = ENDPOINT_0; + max_msg_sz = HTC_MAX_CTRL_MSG_LEN; + } else { + /* allocate a packet to send to the target */ + tx_pkt = htc_get_control_buf(target, true); + + if (!tx_pkt) + return -ENOMEM; + + conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf; + memset(conn_msg, 0, sizeof(*conn_msg)); + conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID); + conn_msg->svc_id = cpu_to_le16(conn_req->svc_id); + conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags); + + set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg, + sizeof(*conn_msg) + conn_msg->svc_meta_len, + ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); + + /* we want synchronous operation */ + tx_pkt->completion = NULL; + ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0); + status = ath6kl_htc_tx_issue(target, tx_pkt); + + if (status) + goto fail_tx; + + /* wait for response */ + rx_pkt = htc_wait_for_ctrl_msg(target); + + if (!rx_pkt) { + status = -ENOMEM; + goto fail_tx; + } + + resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf; + msg_id = le16_to_cpu(resp_msg->msg_id); + + if ((msg_id != HTC_MSG_CONN_SVC_RESP_ID) || + (rx_pkt->act_len < sizeof(*resp_msg))) { + status = -ENOMEM; + goto fail_tx; + } + + conn_resp->resp_code = resp_msg->status; + /* check response status */ + if (resp_msg->status != HTC_SERVICE_SUCCESS) { + ath6kl_err("target failed service 0x%X connect request (status:%d)\n", + resp_msg->svc_id, resp_msg->status); + status = -ENOMEM; + goto fail_tx; + } + + assigned_ep = (enum htc_endpoint_id)resp_msg->eid; + max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz); + } + + if (WARN_ON_ONCE(assigned_ep == ENDPOINT_UNUSED || + assigned_ep >= ENDPOINT_MAX || !max_msg_sz)) { + status = -ENOMEM; + goto fail_tx; + } + + endpoint = &target->endpoint[assigned_ep]; + endpoint->eid = assigned_ep; + if (endpoint->svc_id) { + status = -ENOMEM; + goto fail_tx; + } + + /* return assigned endpoint to caller */ + conn_resp->endpoint = assigned_ep; + conn_resp->len_max = max_msg_sz; + + /* setup the endpoint */ + + /* this marks the endpoint in use */ + endpoint->svc_id = conn_req->svc_id; + + endpoint->max_txq_depth = conn_req->max_txq_depth; + endpoint->len_max = max_msg_sz; + endpoint->ep_cb = conn_req->ep_cb; + endpoint->cred_dist.svc_id = conn_req->svc_id; + endpoint->cred_dist.htc_ep = endpoint; + endpoint->cred_dist.endpoint = assigned_ep; + endpoint->cred_dist.cred_sz = target->tgt_cred_sz; + + switch (endpoint->svc_id) { + case WMI_DATA_BK_SVC: + endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3; + break; + default: + endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM; + break; + } + + if (conn_req->max_rxmsg_sz) { + /* + * Override cred_per_msg calculation, this optimizes + * the credit-low indications since the host will actually + * issue smaller messages in the Send path. + */ + if (conn_req->max_rxmsg_sz > max_msg_sz) { + status = -ENOMEM; + goto fail_tx; + } + endpoint->cred_dist.cred_per_msg = + conn_req->max_rxmsg_sz / target->tgt_cred_sz; + } else + endpoint->cred_dist.cred_per_msg = + max_msg_sz / target->tgt_cred_sz; + + if (!endpoint->cred_dist.cred_per_msg) + endpoint->cred_dist.cred_per_msg = 1; + + /* save local connection flags */ + endpoint->conn_flags = conn_req->flags; + +fail_tx: + if (tx_pkt) + htc_reclaim_txctrl_buf(target, tx_pkt); + + if (rx_pkt) { + htc_rxpkt_reset(rx_pkt); + reclaim_rx_ctrl_buf(target, rx_pkt); + } + + return status; +} + +static void reset_ep_state(struct htc_target *target) +{ + struct htc_endpoint *endpoint; + int i; + + for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { + endpoint = &target->endpoint[i]; + memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist)); + endpoint->svc_id = 0; + endpoint->len_max = 0; + endpoint->max_txq_depth = 0; + memset(&endpoint->ep_st, 0, + sizeof(endpoint->ep_st)); + INIT_LIST_HEAD(&endpoint->rx_bufq); + INIT_LIST_HEAD(&endpoint->txq); + endpoint->target = target; + } + + /* reset distribution list */ + /* FIXME: free existing entries */ + INIT_LIST_HEAD(&target->cred_dist_list); +} + +static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target, + enum htc_endpoint_id endpoint) +{ + int num; + + spin_lock_bh(&target->rx_lock); + num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq)); + spin_unlock_bh(&target->rx_lock); + return num; +} + +static void htc_setup_msg_bndl(struct htc_target *target) +{ + /* limit what HTC can handle */ + target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE, + target->msg_per_bndl_max); + + if (ath6kl_hif_enable_scatter(target->dev->ar)) { + target->msg_per_bndl_max = 0; + return; + } + + /* limit bundle what the device layer can handle */ + target->msg_per_bndl_max = min(target->max_scat_entries, + target->msg_per_bndl_max); + + ath6kl_dbg(ATH6KL_DBG_BOOT, + "htc bundling allowed msg_per_bndl_max %d\n", + target->msg_per_bndl_max); + + /* Max rx bundle size is limited by the max tx bundle size */ + target->max_rx_bndl_sz = target->max_xfer_szper_scatreq; + /* Max tx bundle size if limited by the extended mbox address range */ + target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH, + target->max_xfer_szper_scatreq); + + ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n", + target->max_rx_bndl_sz, target->max_tx_bndl_sz); + + if (target->max_tx_bndl_sz) + /* tx_bndl_mask is enabled per AC, each has 1 bit */ + target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1; + + if (target->max_rx_bndl_sz) + target->rx_bndl_enable = true; + + if ((target->tgt_cred_sz % target->block_sz) != 0) { + ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n", + target->tgt_cred_sz); + + /* + * Disallow send bundling since the credit size is + * not aligned to a block size the I/O block + * padding will spill into the next credit buffer + * which is fatal. + */ + target->tx_bndl_mask = 0; + } +} + +static int ath6kl_htc_mbox_wait_target(struct htc_target *target) +{ + struct htc_packet *packet = NULL; + struct htc_ready_ext_msg *rdy_msg; + struct htc_service_connect_req connect; + struct htc_service_connect_resp resp; + int status; + + /* we should be getting 1 control message that the target is ready */ + packet = htc_wait_for_ctrl_msg(target); + + if (!packet) + return -ENOMEM; + + /* we controlled the buffer creation so it's properly aligned */ + rdy_msg = (struct htc_ready_ext_msg *)packet->buf; + + if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) || + (packet->act_len < sizeof(struct htc_ready_msg))) { + status = -ENOMEM; + goto fail_wait_target; + } + + if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) { + status = -ENOMEM; + goto fail_wait_target; + } + + target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt); + target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz); + + ath6kl_dbg(ATH6KL_DBG_BOOT, + "htc target ready credits %d size %d\n", + target->tgt_creds, target->tgt_cred_sz); + + /* check if this is an extended ready message */ + if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) { + /* this is an extended message */ + target->htc_tgt_ver = rdy_msg->htc_ver; + target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl; + } else { + /* legacy */ + target->htc_tgt_ver = HTC_VERSION_2P0; + target->msg_per_bndl_max = 0; + } + + ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n", + (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1", + target->htc_tgt_ver); + + if (target->msg_per_bndl_max > 0) + htc_setup_msg_bndl(target); + + /* setup our pseudo HTC control endpoint connection */ + memset(&connect, 0, sizeof(connect)); + memset(&resp, 0, sizeof(resp)); + connect.ep_cb.rx = htc_ctrl_rx; + connect.ep_cb.rx_refill = NULL; + connect.ep_cb.tx_full = NULL; + connect.max_txq_depth = NUM_CONTROL_BUFFERS; + connect.svc_id = HTC_CTRL_RSVD_SVC; + + /* connect fake service */ + status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp); + + if (status) + /* + * FIXME: this call doesn't make sense, the caller should + * call ath6kl_htc_mbox_cleanup() when it wants remove htc + */ + ath6kl_hif_cleanup_scatter(target->dev->ar); + +fail_wait_target: + if (packet) { + htc_rxpkt_reset(packet); + reclaim_rx_ctrl_buf(target, packet); + } + + return status; +} + +/* + * Start HTC, enable interrupts and let the target know + * host has finished setup. + */ +static int ath6kl_htc_mbox_start(struct htc_target *target) +{ + struct htc_packet *packet; + int status; + + memset(&target->dev->irq_proc_reg, 0, + sizeof(target->dev->irq_proc_reg)); + + /* Disable interrupts at the chip level */ + ath6kl_hif_disable_intrs(target->dev); + + target->htc_flags = 0; + target->rx_st_flags = 0; + + /* Push control receive buffers into htc control endpoint */ + while ((packet = htc_get_control_buf(target, false)) != NULL) { + status = htc_add_rxbuf(target, packet); + if (status) + return status; + } + + /* NOTE: the first entry in the distribution list is ENDPOINT_0 */ + ath6kl_credit_init(target->credit_info, &target->cred_dist_list, + target->tgt_creds); + + dump_cred_dist_stats(target); + + /* Indicate to the target of the setup completion */ + status = htc_setup_tx_complete(target); + + if (status) + return status; + + /* unmask interrupts */ + status = ath6kl_hif_unmask_intrs(target->dev); + + if (status) + ath6kl_htc_mbox_stop(target); + + return status; +} + +static int ath6kl_htc_reset(struct htc_target *target) +{ + u32 block_size, ctrl_bufsz; + struct htc_packet *packet; + int i; + + reset_ep_state(target); + + block_size = target->dev->ar->mbox_info.block_size; + + ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ? + (block_size + HTC_HDR_LENGTH) : + (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH); + + for (i = 0; i < NUM_CONTROL_BUFFERS; i++) { + packet = kzalloc(sizeof(*packet), GFP_KERNEL); + if (!packet) + return -ENOMEM; + + packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL); + if (!packet->buf_start) { + kfree(packet); + return -ENOMEM; + } + + packet->buf_len = ctrl_bufsz; + if (i < NUM_CONTROL_RX_BUFFERS) { + packet->act_len = 0; + packet->buf = packet->buf_start; + packet->endpoint = ENDPOINT_0; + list_add_tail(&packet->list, &target->free_ctrl_rxbuf); + } else { + list_add_tail(&packet->list, &target->free_ctrl_txbuf); + } + } + + return 0; +} + +/* htc_stop: stop interrupt reception, and flush all queued buffers */ +static void ath6kl_htc_mbox_stop(struct htc_target *target) +{ + spin_lock_bh(&target->htc_lock); + target->htc_flags |= HTC_OP_STATE_STOPPING; + spin_unlock_bh(&target->htc_lock); + + /* + * Masking interrupts is a synchronous operation, when this + * function returns all pending HIF I/O has completed, we can + * safely flush the queues. + */ + ath6kl_hif_mask_intrs(target->dev); + + ath6kl_htc_flush_txep_all(target); + + ath6kl_htc_mbox_flush_rx_buf(target); + + ath6kl_htc_reset(target); +} + +static void *ath6kl_htc_mbox_create(struct ath6kl *ar) +{ + struct htc_target *target = NULL; + int status = 0; + + target = kzalloc(sizeof(*target), GFP_KERNEL); + if (!target) { + ath6kl_err("unable to allocate memory\n"); + return NULL; + } + + target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL); + if (!target->dev) { + ath6kl_err("unable to allocate memory\n"); + kfree(target); + return NULL; + } + + spin_lock_init(&target->htc_lock); + spin_lock_init(&target->rx_lock); + spin_lock_init(&target->tx_lock); + + INIT_LIST_HEAD(&target->free_ctrl_txbuf); + INIT_LIST_HEAD(&target->free_ctrl_rxbuf); + INIT_LIST_HEAD(&target->cred_dist_list); + + target->dev->ar = ar; + target->dev->htc_cnxt = target; + target->ep_waiting = ENDPOINT_MAX; + + status = ath6kl_hif_setup(target->dev); + if (status) + goto err_htc_cleanup; + + status = ath6kl_htc_reset(target); + if (status) + goto err_htc_cleanup; + + return target; + +err_htc_cleanup: + ath6kl_htc_mbox_cleanup(target); + + return NULL; +} + +/* cleanup the HTC instance */ +static void ath6kl_htc_mbox_cleanup(struct htc_target *target) +{ + struct htc_packet *packet, *tmp_packet; + + ath6kl_hif_cleanup_scatter(target->dev->ar); + + list_for_each_entry_safe(packet, tmp_packet, + &target->free_ctrl_txbuf, list) { + list_del(&packet->list); + kfree(packet->buf_start); + kfree(packet); + } + + list_for_each_entry_safe(packet, tmp_packet, + &target->free_ctrl_rxbuf, list) { + list_del(&packet->list); + kfree(packet->buf_start); + kfree(packet); + } + + kfree(target->dev); + kfree(target); +} + +static const struct ath6kl_htc_ops ath6kl_htc_mbox_ops = { + .create = ath6kl_htc_mbox_create, + .wait_target = ath6kl_htc_mbox_wait_target, + .start = ath6kl_htc_mbox_start, + .conn_service = ath6kl_htc_mbox_conn_service, + .tx = ath6kl_htc_mbox_tx, + .stop = ath6kl_htc_mbox_stop, + .cleanup = ath6kl_htc_mbox_cleanup, + .flush_txep = ath6kl_htc_mbox_flush_txep, + .flush_rx_buf = ath6kl_htc_mbox_flush_rx_buf, + .activity_changed = ath6kl_htc_mbox_activity_changed, + .get_rxbuf_num = ath6kl_htc_mbox_get_rxbuf_num, + .add_rxbuf_multiple = ath6kl_htc_mbox_add_rxbuf_multiple, + .credit_setup = ath6kl_htc_mbox_credit_setup, +}; + +void ath6kl_htc_mbox_attach(struct ath6kl *ar) +{ + ar->htc_ops = &ath6kl_htc_mbox_ops; +} diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c new file mode 100644 index 000000000..9b88d96bf --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c @@ -0,0 +1,1725 @@ +/* + * Copyright (c) 2007-2011 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "core.h" +#include "debug.h" +#include "hif-ops.h" + +#define HTC_PACKET_CONTAINER_ALLOCATION 32 +#define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH) + +static int ath6kl_htc_pipe_tx(struct htc_target *handle, + struct htc_packet *packet); +static void ath6kl_htc_pipe_cleanup(struct htc_target *handle); + +/* htc pipe tx path */ +static inline void restore_tx_packet(struct htc_packet *packet) +{ + if (packet->info.tx.flags & HTC_FLAGS_TX_FIXUP_NETBUF) { + skb_pull(packet->skb, sizeof(struct htc_frame_hdr)); + packet->info.tx.flags &= ~HTC_FLAGS_TX_FIXUP_NETBUF; + } +} + +static void do_send_completion(struct htc_endpoint *ep, + struct list_head *queue_to_indicate) +{ + struct htc_packet *packet; + + if (list_empty(queue_to_indicate)) { + /* nothing to indicate */ + return; + } + + if (ep->ep_cb.tx_comp_multi != NULL) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: calling ep %d, send complete multiple callback (%d pkts)\n", + __func__, ep->eid, + get_queue_depth(queue_to_indicate)); + /* + * a multiple send complete handler is being used, + * pass the queue to the handler + */ + ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate); + /* + * all packets are now owned by the callback, + * reset queue to be safe + */ + INIT_LIST_HEAD(queue_to_indicate); + } else { + /* using legacy EpTxComplete */ + do { + packet = list_first_entry(queue_to_indicate, + struct htc_packet, list); + + list_del(&packet->list); + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: calling ep %d send complete callback on packet 0x%p\n", + __func__, ep->eid, packet); + ep->ep_cb.tx_complete(ep->target, packet); + } while (!list_empty(queue_to_indicate)); + } +} + +static void send_packet_completion(struct htc_target *target, + struct htc_packet *packet) +{ + struct htc_endpoint *ep = &target->endpoint[packet->endpoint]; + struct list_head container; + + restore_tx_packet(packet); + INIT_LIST_HEAD(&container); + list_add_tail(&packet->list, &container); + + /* do completion */ + do_send_completion(ep, &container); +} + +static void get_htc_packet_credit_based(struct htc_target *target, + struct htc_endpoint *ep, + struct list_head *queue) +{ + int credits_required; + int remainder; + u8 send_flags; + struct htc_packet *packet; + unsigned int transfer_len; + + /* NOTE : the TX lock is held when this function is called */ + + /* loop until we can grab as many packets out of the queue as we can */ + while (true) { + send_flags = 0; + if (list_empty(&ep->txq)) + break; + + /* get packet at head, but don't remove it */ + packet = list_first_entry(&ep->txq, struct htc_packet, list); + + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: got head packet:0x%p , queue depth: %d\n", + __func__, packet, get_queue_depth(&ep->txq)); + + transfer_len = packet->act_len + HTC_HDR_LENGTH; + + if (transfer_len <= target->tgt_cred_sz) { + credits_required = 1; + } else { + /* figure out how many credits this message requires */ + credits_required = transfer_len / target->tgt_cred_sz; + remainder = transfer_len % target->tgt_cred_sz; + + if (remainder) + credits_required++; + } + + ath6kl_dbg(ATH6KL_DBG_HTC, "%s: creds required:%d got:%d\n", + __func__, credits_required, ep->cred_dist.credits); + + if (ep->eid == ENDPOINT_0) { + /* + * endpoint 0 is special, it always has a credit and + * does not require credit based flow control + */ + credits_required = 0; + + } else { + if (ep->cred_dist.credits < credits_required) + break; + + ep->cred_dist.credits -= credits_required; + ep->ep_st.cred_cosumd += credits_required; + + /* check if we need credits back from the target */ + if (ep->cred_dist.credits < + ep->cred_dist.cred_per_msg) { + /* tell the target we need credits ASAP! */ + send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE; + ep->ep_st.cred_low_indicate += 1; + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: host needs credits\n", + __func__); + } + } + + /* now we can fully dequeue */ + packet = list_first_entry(&ep->txq, struct htc_packet, list); + + list_del(&packet->list); + /* save the number of credits this packet consumed */ + packet->info.tx.cred_used = credits_required; + /* save send flags */ + packet->info.tx.flags = send_flags; + packet->info.tx.seqno = ep->seqno; + ep->seqno++; + /* queue this packet into the caller's queue */ + list_add_tail(&packet->list, queue); + } +} + +static void get_htc_packet(struct htc_target *target, + struct htc_endpoint *ep, + struct list_head *queue, int resources) +{ + struct htc_packet *packet; + + /* NOTE : the TX lock is held when this function is called */ + + /* loop until we can grab as many packets out of the queue as we can */ + while (resources) { + if (list_empty(&ep->txq)) + break; + + packet = list_first_entry(&ep->txq, struct htc_packet, list); + list_del(&packet->list); + + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: got packet:0x%p , new queue depth: %d\n", + __func__, packet, get_queue_depth(&ep->txq)); + packet->info.tx.seqno = ep->seqno; + packet->info.tx.flags = 0; + packet->info.tx.cred_used = 0; + ep->seqno++; + + /* queue this packet into the caller's queue */ + list_add_tail(&packet->list, queue); + resources--; + } +} + +static int htc_issue_packets(struct htc_target *target, + struct htc_endpoint *ep, + struct list_head *pkt_queue) +{ + int status = 0; + u16 payload_len; + struct sk_buff *skb; + struct htc_frame_hdr *htc_hdr; + struct htc_packet *packet; + + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: queue: 0x%p, pkts %d\n", __func__, + pkt_queue, get_queue_depth(pkt_queue)); + + while (!list_empty(pkt_queue)) { + packet = list_first_entry(pkt_queue, struct htc_packet, list); + list_del(&packet->list); + + skb = packet->skb; + if (!skb) { + WARN_ON_ONCE(1); + status = -EINVAL; + break; + } + + payload_len = packet->act_len; + + /* setup HTC frame header */ + htc_hdr = skb_push(skb, sizeof(*htc_hdr)); + if (!htc_hdr) { + WARN_ON_ONCE(1); + status = -EINVAL; + break; + } + + packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF; + + /* Endianess? */ + put_unaligned((u16) payload_len, &htc_hdr->payld_len); + htc_hdr->flags = packet->info.tx.flags; + htc_hdr->eid = (u8) packet->endpoint; + htc_hdr->ctrl[0] = 0; + htc_hdr->ctrl[1] = (u8) packet->info.tx.seqno; + + spin_lock_bh(&target->tx_lock); + + /* store in look up queue to match completions */ + list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue); + ep->ep_st.tx_issued += 1; + spin_unlock_bh(&target->tx_lock); + + status = ath6kl_hif_pipe_send(target->dev->ar, + ep->pipe.pipeid_ul, NULL, skb); + + if (status != 0) { + if (status != -ENOMEM) { + /* TODO: if more than 1 endpoint maps to the + * same PipeID, it is possible to run out of + * resources in the HIF layer. + * Don't emit the error + */ + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: failed status:%d\n", + __func__, status); + } + spin_lock_bh(&target->tx_lock); + list_del(&packet->list); + + /* reclaim credits */ + ep->cred_dist.credits += packet->info.tx.cred_used; + spin_unlock_bh(&target->tx_lock); + + /* put it back into the callers queue */ + list_add(&packet->list, pkt_queue); + break; + } + } + + if (status != 0) { + while (!list_empty(pkt_queue)) { + if (status != -ENOMEM) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: failed pkt:0x%p status:%d\n", + __func__, packet, status); + } + + packet = list_first_entry(pkt_queue, + struct htc_packet, list); + list_del(&packet->list); + packet->status = status; + send_packet_completion(target, packet); + } + } + + return status; +} + +static enum htc_send_queue_result htc_try_send(struct htc_target *target, + struct htc_endpoint *ep, + struct list_head *txq) +{ + struct list_head send_queue; /* temp queue to hold packets */ + struct htc_packet *packet, *tmp_pkt; + struct ath6kl *ar = target->dev->ar; + enum htc_send_full_action action; + int tx_resources, overflow, txqueue_depth, i, good_pkts; + u8 pipeid; + + ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n", + __func__, txq, + (txq == NULL) ? 0 : get_queue_depth(txq)); + + /* init the local send queue */ + INIT_LIST_HEAD(&send_queue); + + /* + * txq equals to NULL means + * caller didn't provide a queue, just wants us to + * check queues and send + */ + if (txq != NULL) { + if (list_empty(txq)) { + /* empty queue */ + return HTC_SEND_QUEUE_DROP; + } + + spin_lock_bh(&target->tx_lock); + txqueue_depth = get_queue_depth(&ep->txq); + spin_unlock_bh(&target->tx_lock); + + if (txqueue_depth >= ep->max_txq_depth) { + /* we've already overflowed */ + overflow = get_queue_depth(txq); + } else { + /* get how much we will overflow by */ + overflow = txqueue_depth; + overflow += get_queue_depth(txq); + /* get how much we will overflow the TX queue by */ + overflow -= ep->max_txq_depth; + } + + /* if overflow is negative or zero, we are okay */ + if (overflow > 0) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n", + __func__, ep->eid, overflow, txqueue_depth, + ep->max_txq_depth); + } + if ((overflow <= 0) || + (ep->ep_cb.tx_full == NULL)) { + /* + * all packets will fit or caller did not provide send + * full indication handler -- just move all of them + * to the local send_queue object + */ + list_splice_tail_init(txq, &send_queue); + } else { + good_pkts = get_queue_depth(txq) - overflow; + if (good_pkts < 0) { + WARN_ON_ONCE(1); + return HTC_SEND_QUEUE_DROP; + } + + /* we have overflowed, and a callback is provided */ + /* dequeue all non-overflow packets to the sendqueue */ + for (i = 0; i < good_pkts; i++) { + /* pop off caller's queue */ + packet = list_first_entry(txq, + struct htc_packet, + list); + /* move to local queue */ + list_move_tail(&packet->list, &send_queue); + } + + /* + * the caller's queue has all the packets that won't fit + * walk through the caller's queue and indicate each to + * the send full handler + */ + list_for_each_entry_safe(packet, tmp_pkt, + txq, list) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: Indicate overflowed TX pkts: %p\n", + __func__, packet); + action = ep->ep_cb.tx_full(ep->target, packet); + if (action == HTC_SEND_FULL_DROP) { + /* callback wants the packet dropped */ + ep->ep_st.tx_dropped += 1; + + /* leave this one in the caller's queue + * for cleanup */ + } else { + /* callback wants to keep this packet, + * move from caller's queue to the send + * queue */ + list_move_tail(&packet->list, + &send_queue); + } + } + + if (list_empty(&send_queue)) { + /* no packets made it in, caller will cleanup */ + return HTC_SEND_QUEUE_DROP; + } + } + } + + if (!ep->pipe.tx_credit_flow_enabled) { + tx_resources = + ath6kl_hif_pipe_get_free_queue_number(ar, + ep->pipe.pipeid_ul); + } else { + tx_resources = 0; + } + + spin_lock_bh(&target->tx_lock); + if (!list_empty(&send_queue)) { + /* transfer packets to tail */ + list_splice_tail_init(&send_queue, &ep->txq); + if (!list_empty(&send_queue)) { + WARN_ON_ONCE(1); + spin_unlock_bh(&target->tx_lock); + return HTC_SEND_QUEUE_DROP; + } + INIT_LIST_HEAD(&send_queue); + } + + /* increment tx processing count on entry */ + ep->tx_proc_cnt++; + + if (ep->tx_proc_cnt > 1) { + /* + * Another thread or task is draining the TX queues on this + * endpoint that thread will reset the tx processing count + * when the queue is drained. + */ + ep->tx_proc_cnt--; + spin_unlock_bh(&target->tx_lock); + return HTC_SEND_QUEUE_OK; + } + + /***** beyond this point only 1 thread may enter ******/ + + /* + * Now drain the endpoint TX queue for transmission as long as we have + * enough transmit resources. + */ + while (true) { + if (get_queue_depth(&ep->txq) == 0) + break; + + if (ep->pipe.tx_credit_flow_enabled) { + /* + * Credit based mechanism provides flow control + * based on target transmit resource availability, + * we assume that the HIF layer will always have + * bus resources greater than target transmit + * resources. + */ + get_htc_packet_credit_based(target, ep, &send_queue); + } else { + /* + * Get all packets for this endpoint that we can + * for this pass. + */ + get_htc_packet(target, ep, &send_queue, tx_resources); + } + + if (get_queue_depth(&send_queue) == 0) { + /* + * Didn't get packets due to out of resources or TX + * queue was drained. + */ + break; + } + + spin_unlock_bh(&target->tx_lock); + + /* send what we can */ + htc_issue_packets(target, ep, &send_queue); + + if (!ep->pipe.tx_credit_flow_enabled) { + pipeid = ep->pipe.pipeid_ul; + tx_resources = + ath6kl_hif_pipe_get_free_queue_number(ar, pipeid); + } + + spin_lock_bh(&target->tx_lock); + } + + /* done with this endpoint, we can clear the count */ + ep->tx_proc_cnt = 0; + spin_unlock_bh(&target->tx_lock); + + return HTC_SEND_QUEUE_OK; +} + +/* htc control packet manipulation */ +static void destroy_htc_txctrl_packet(struct htc_packet *packet) +{ + struct sk_buff *skb; + skb = packet->skb; + dev_kfree_skb(skb); + kfree(packet); +} + +static struct htc_packet *build_htc_txctrl_packet(void) +{ + struct htc_packet *packet = NULL; + struct sk_buff *skb; + + packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL); + if (packet == NULL) + return NULL; + + skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL); + + if (skb == NULL) { + kfree(packet); + return NULL; + } + packet->skb = skb; + + return packet; +} + +static void htc_free_txctrl_packet(struct htc_target *target, + struct htc_packet *packet) +{ + destroy_htc_txctrl_packet(packet); +} + +static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target) +{ + return build_htc_txctrl_packet(); +} + +static void htc_txctrl_complete(struct htc_target *target, + struct htc_packet *packet) +{ + htc_free_txctrl_packet(target, packet); +} + +#define MAX_MESSAGE_SIZE 1536 + +static int htc_setup_target_buffer_assignments(struct htc_target *target) +{ + int status, credits, credit_per_maxmsg, i; + struct htc_pipe_txcredit_alloc *entry; + unsigned int hif_usbaudioclass = 0; + + credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz; + if (MAX_MESSAGE_SIZE % target->tgt_cred_sz) + credit_per_maxmsg++; + + /* TODO, this should be configured by the caller! */ + + credits = target->tgt_creds; + entry = &target->pipe.txcredit_alloc[0]; + + status = -ENOMEM; + + /* FIXME: hif_usbaudioclass is always zero */ + if (hif_usbaudioclass) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: For USB Audio Class- Total:%d\n", + __func__, credits); + entry++; + entry++; + /* Setup VO Service To have Max Credits */ + entry->service_id = WMI_DATA_VO_SVC; + entry->credit_alloc = (credits - 6); + if (entry->credit_alloc == 0) + entry->credit_alloc++; + + credits -= (int) entry->credit_alloc; + if (credits <= 0) + return status; + + entry++; + entry->service_id = WMI_CONTROL_SVC; + entry->credit_alloc = credit_per_maxmsg; + credits -= (int) entry->credit_alloc; + if (credits <= 0) + return status; + + /* leftovers go to best effort */ + entry++; + entry++; + entry->service_id = WMI_DATA_BE_SVC; + entry->credit_alloc = (u8) credits; + status = 0; + } else { + entry++; + entry->service_id = WMI_DATA_VI_SVC; + entry->credit_alloc = credits / 4; + if (entry->credit_alloc == 0) + entry->credit_alloc++; + + credits -= (int) entry->credit_alloc; + if (credits <= 0) + return status; + + entry++; + entry->service_id = WMI_DATA_VO_SVC; + entry->credit_alloc = credits / 4; + if (entry->credit_alloc == 0) + entry->credit_alloc++; + + credits -= (int) entry->credit_alloc; + if (credits <= 0) + return status; + + entry++; + entry->service_id = WMI_CONTROL_SVC; + entry->credit_alloc = credit_per_maxmsg; + credits -= (int) entry->credit_alloc; + if (credits <= 0) + return status; + + entry++; + entry->service_id = WMI_DATA_BK_SVC; + entry->credit_alloc = credit_per_maxmsg; + credits -= (int) entry->credit_alloc; + if (credits <= 0) + return status; + + /* leftovers go to best effort */ + entry++; + entry->service_id = WMI_DATA_BE_SVC; + entry->credit_alloc = (u8) credits; + status = 0; + } + + if (status == 0) { + for (i = 0; i < ENDPOINT_MAX; i++) { + if (target->pipe.txcredit_alloc[i].service_id != 0) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n", + i, + target->pipe.txcredit_alloc[i]. + service_id, + target->pipe.txcredit_alloc[i]. + credit_alloc); + } + } + } + return status; +} + +/* process credit reports and call distribution function */ +static void htc_process_credit_report(struct htc_target *target, + struct htc_credit_report *rpt, + int num_entries, + enum htc_endpoint_id from_ep) +{ + int total_credits = 0, i; + struct htc_endpoint *ep; + + /* lock out TX while we update credits */ + spin_lock_bh(&target->tx_lock); + + for (i = 0; i < num_entries; i++, rpt++) { + if (rpt->eid >= ENDPOINT_MAX) { + WARN_ON_ONCE(1); + spin_unlock_bh(&target->tx_lock); + return; + } + + ep = &target->endpoint[rpt->eid]; + ep->cred_dist.credits += rpt->credits; + + if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) { + spin_unlock_bh(&target->tx_lock); + htc_try_send(target, ep, NULL); + spin_lock_bh(&target->tx_lock); + } + + total_credits += rpt->credits; + } + ath6kl_dbg(ATH6KL_DBG_HTC, + "Report indicated %d credits to distribute\n", + total_credits); + + spin_unlock_bh(&target->tx_lock); +} + +/* flush endpoint TX queue */ +static void htc_flush_tx_endpoint(struct htc_target *target, + struct htc_endpoint *ep, u16 tag) +{ + struct htc_packet *packet; + + spin_lock_bh(&target->tx_lock); + while (get_queue_depth(&ep->txq)) { + packet = list_first_entry(&ep->txq, struct htc_packet, list); + list_del(&packet->list); + packet->status = 0; + send_packet_completion(target, packet); + } + spin_unlock_bh(&target->tx_lock); +} + +/* + * In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC, + * since upper layers expects struct htc_packet containers we use the completed + * skb and lookup it's corresponding HTC packet buffer from a lookup list. + * This is extra overhead that can be fixed by re-aligning HIF interfaces with + * HTC. + */ +static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target, + struct htc_endpoint *ep, + struct sk_buff *skb) +{ + struct htc_packet *packet, *tmp_pkt, *found_packet = NULL; + + spin_lock_bh(&target->tx_lock); + + /* + * interate from the front of tx lookup queue + * this lookup should be fast since lower layers completes in-order and + * so the completed packet should be at the head of the list generally + */ + list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue, + list) { + /* check for removal */ + if (skb == packet->skb) { + /* found it */ + list_del(&packet->list); + found_packet = packet; + break; + } + } + + spin_unlock_bh(&target->tx_lock); + + return found_packet; +} + +static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb) +{ + struct htc_target *target = ar->htc_target; + struct htc_frame_hdr *htc_hdr; + struct htc_endpoint *ep; + struct htc_packet *packet; + u8 ep_id, *netdata; + + netdata = skb->data; + + htc_hdr = (struct htc_frame_hdr *) netdata; + + ep_id = htc_hdr->eid; + ep = &target->endpoint[ep_id]; + + packet = htc_lookup_tx_packet(target, ep, skb); + if (packet == NULL) { + /* may have already been flushed and freed */ + ath6kl_err("HTC TX lookup failed!\n"); + } else { + /* will be giving this buffer back to upper layers */ + packet->status = 0; + send_packet_completion(target, packet); + } + skb = NULL; + + if (!ep->pipe.tx_credit_flow_enabled) { + /* + * note: when using TX credit flow, the re-checking of queues + * happens when credits flow back from the target. in the + * non-TX credit case, we recheck after the packet completes + */ + htc_try_send(target, ep, NULL); + } + + return 0; +} + +static int htc_send_packets_multiple(struct htc_target *target, + struct list_head *pkt_queue) +{ + struct htc_endpoint *ep; + struct htc_packet *packet, *tmp_pkt; + + if (list_empty(pkt_queue)) + return -EINVAL; + + /* get first packet to find out which ep the packets will go into */ + packet = list_first_entry(pkt_queue, struct htc_packet, list); + + if (packet->endpoint >= ENDPOINT_MAX) { + WARN_ON_ONCE(1); + return -EINVAL; + } + ep = &target->endpoint[packet->endpoint]; + + htc_try_send(target, ep, pkt_queue); + + /* do completion on any packets that couldn't get in */ + if (!list_empty(pkt_queue)) { + list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) { + packet->status = -ENOMEM; + } + + do_send_completion(ep, pkt_queue); + } + + return 0; +} + +/* htc pipe rx path */ +static struct htc_packet *alloc_htc_packet_container(struct htc_target *target) +{ + struct htc_packet *packet; + spin_lock_bh(&target->rx_lock); + + if (target->pipe.htc_packet_pool == NULL) { + spin_unlock_bh(&target->rx_lock); + return NULL; + } + + packet = target->pipe.htc_packet_pool; + target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next; + + spin_unlock_bh(&target->rx_lock); + + packet->list.next = NULL; + return packet; +} + +static void free_htc_packet_container(struct htc_target *target, + struct htc_packet *packet) +{ + struct list_head *lh; + + spin_lock_bh(&target->rx_lock); + + if (target->pipe.htc_packet_pool == NULL) { + target->pipe.htc_packet_pool = packet; + packet->list.next = NULL; + } else { + lh = (struct list_head *) target->pipe.htc_packet_pool; + packet->list.next = lh; + target->pipe.htc_packet_pool = packet; + } + + spin_unlock_bh(&target->rx_lock); +} + +static int htc_process_trailer(struct htc_target *target, u8 *buffer, + int len, enum htc_endpoint_id from_ep) +{ + struct htc_credit_report *report; + struct htc_record_hdr *record; + u8 *record_buf; + int status = 0; + + while (len > 0) { + if (len < sizeof(struct htc_record_hdr)) { + status = -EINVAL; + break; + } + + /* these are byte aligned structs */ + record = (struct htc_record_hdr *) buffer; + len -= sizeof(struct htc_record_hdr); + buffer += sizeof(struct htc_record_hdr); + + if (record->len > len) { + /* no room left in buffer for record */ + ath6kl_dbg(ATH6KL_DBG_HTC, + "invalid length: %d (id:%d) buffer has: %d bytes left\n", + record->len, record->rec_id, len); + status = -EINVAL; + break; + } + + /* start of record follows the header */ + record_buf = buffer; + + switch (record->rec_id) { + case HTC_RECORD_CREDITS: + if (record->len < sizeof(struct htc_credit_report)) { + WARN_ON_ONCE(1); + return -EINVAL; + } + + report = (struct htc_credit_report *) record_buf; + htc_process_credit_report(target, report, + record->len / sizeof(*report), + from_ep); + break; + default: + ath6kl_dbg(ATH6KL_DBG_HTC, + "unhandled record: id:%d length:%d\n", + record->rec_id, record->len); + break; + } + + /* advance buffer past this record for next time around */ + buffer += record->len; + len -= record->len; + } + + return status; +} + +static void do_recv_completion(struct htc_endpoint *ep, + struct list_head *queue_to_indicate) +{ + struct htc_packet *packet; + + if (list_empty(queue_to_indicate)) { + /* nothing to indicate */ + return; + } + + /* using legacy EpRecv */ + while (!list_empty(queue_to_indicate)) { + packet = list_first_entry(queue_to_indicate, + struct htc_packet, list); + list_del(&packet->list); + ep->ep_cb.rx(ep->target, packet); + } + + return; +} + +static void recv_packet_completion(struct htc_target *target, + struct htc_endpoint *ep, + struct htc_packet *packet) +{ + struct list_head container; + INIT_LIST_HEAD(&container); + list_add_tail(&packet->list, &container); + + /* do completion */ + do_recv_completion(ep, &container); +} + +static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb, + u8 pipeid) +{ + struct htc_target *target = ar->htc_target; + u8 *netdata, *trailer, hdr_info; + struct htc_frame_hdr *htc_hdr; + u32 netlen, trailerlen = 0; + struct htc_packet *packet; + struct htc_endpoint *ep; + u16 payload_len; + int status = 0; + + /* + * ar->htc_target can be NULL due to a race condition that can occur + * during driver initialization(we do 'ath6kl_hif_power_on' before + * initializing 'ar->htc_target' via 'ath6kl_htc_create'). + * 'ath6kl_hif_power_on' assigns 'ath6kl_recv_complete' as + * usb_complete_t/callback function for 'usb_fill_bulk_urb'. + * Thus the possibility of ar->htc_target being NULL + * via ath6kl_recv_complete -> ath6kl_usb_io_comp_work. + */ + if (!target) { + ath6kl_dbg(ATH6KL_DBG_HTC, "Target not yet initialized\n"); + status = -EINVAL; + goto free_skb; + } + + + netdata = skb->data; + netlen = skb->len; + + htc_hdr = (struct htc_frame_hdr *) netdata; + + if (htc_hdr->eid >= ENDPOINT_MAX) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "HTC Rx: invalid EndpointID=%d\n", + htc_hdr->eid); + status = -EINVAL; + goto free_skb; + } + ep = &target->endpoint[htc_hdr->eid]; + + payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len)); + + if (netlen < (payload_len + HTC_HDR_LENGTH)) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "HTC Rx: insufficient length, got:%d expected =%zu\n", + netlen, payload_len + HTC_HDR_LENGTH); + status = -EINVAL; + goto free_skb; + } + + /* get flags to check for trailer */ + hdr_info = htc_hdr->flags; + if (hdr_info & HTC_FLG_RX_TRAILER) { + /* extract the trailer length */ + hdr_info = htc_hdr->ctrl[0]; + if ((hdr_info < sizeof(struct htc_record_hdr)) || + (hdr_info > payload_len)) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "invalid header: payloadlen should be %d, CB[0]: %d\n", + payload_len, hdr_info); + status = -EINVAL; + goto free_skb; + } + + trailerlen = hdr_info; + /* process trailer after hdr/apps payload */ + trailer = (u8 *) htc_hdr + HTC_HDR_LENGTH + + payload_len - hdr_info; + status = htc_process_trailer(target, trailer, hdr_info, + htc_hdr->eid); + if (status != 0) + goto free_skb; + } + + if (((int) payload_len - (int) trailerlen) <= 0) { + /* zero length packet with trailer, just drop these */ + goto free_skb; + } + + if (htc_hdr->eid == ENDPOINT_0) { + /* handle HTC control message */ + if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) { + /* + * fatal: target should not send unsolicited + * messageson the endpoint 0 + */ + ath6kl_dbg(ATH6KL_DBG_HTC, + "HTC ignores Rx Ctrl after setup complete\n"); + status = -EINVAL; + goto free_skb; + } + + /* remove HTC header */ + skb_pull(skb, HTC_HDR_LENGTH); + + netdata = skb->data; + netlen = skb->len; + + spin_lock_bh(&target->rx_lock); + + target->pipe.ctrl_response_valid = true; + target->pipe.ctrl_response_len = min_t(int, netlen, + HTC_MAX_CTRL_MSG_LEN); + memcpy(target->pipe.ctrl_response_buf, netdata, + target->pipe.ctrl_response_len); + + spin_unlock_bh(&target->rx_lock); + + dev_kfree_skb(skb); + skb = NULL; + + goto free_skb; + } + + /* + * TODO: the message based HIF architecture allocates net bufs + * for recv packets since it bridges that HIF to upper layers, + * which expects HTC packets, we form the packets here + */ + packet = alloc_htc_packet_container(target); + if (packet == NULL) { + status = -ENOMEM; + goto free_skb; + } + + packet->status = 0; + packet->endpoint = htc_hdr->eid; + packet->pkt_cntxt = skb; + + /* TODO: for backwards compatibility */ + packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH; + packet->act_len = netlen - HTC_HDR_LENGTH - trailerlen; + + /* + * TODO: this is a hack because the driver layer will set the + * actual len of the skb again which will just double the len + */ + skb_trim(skb, 0); + + recv_packet_completion(target, ep, packet); + + /* recover the packet container */ + free_htc_packet_container(target, packet); + skb = NULL; + +free_skb: + dev_kfree_skb(skb); + + return status; +} + +static void htc_flush_rx_queue(struct htc_target *target, + struct htc_endpoint *ep) +{ + struct list_head container; + struct htc_packet *packet; + + spin_lock_bh(&target->rx_lock); + + while (1) { + if (list_empty(&ep->rx_bufq)) + break; + + packet = list_first_entry(&ep->rx_bufq, + struct htc_packet, list); + list_del(&packet->list); + + spin_unlock_bh(&target->rx_lock); + packet->status = -ECANCELED; + packet->act_len = 0; + + ath6kl_dbg(ATH6KL_DBG_HTC, + "Flushing RX packet:0x%p, length:%d, ep:%d\n", + packet, packet->buf_len, + packet->endpoint); + + INIT_LIST_HEAD(&container); + list_add_tail(&packet->list, &container); + + /* give the packet back */ + do_recv_completion(ep, &container); + spin_lock_bh(&target->rx_lock); + } + + spin_unlock_bh(&target->rx_lock); +} + +/* polling routine to wait for a control packet to be received */ +static int htc_wait_recv_ctrl_message(struct htc_target *target) +{ + int count = HTC_TARGET_RESPONSE_POLL_COUNT; + + while (count > 0) { + spin_lock_bh(&target->rx_lock); + + if (target->pipe.ctrl_response_valid) { + target->pipe.ctrl_response_valid = false; + spin_unlock_bh(&target->rx_lock); + break; + } + + spin_unlock_bh(&target->rx_lock); + + count--; + + msleep_interruptible(HTC_TARGET_RESPONSE_POLL_WAIT); + } + + if (count <= 0) { + ath6kl_warn("htc pipe control receive timeout!\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void htc_rxctrl_complete(struct htc_target *context, + struct htc_packet *packet) +{ + struct sk_buff *skb = packet->skb; + + if (packet->endpoint == ENDPOINT_0 && + packet->status == -ECANCELED && + skb != NULL) + dev_kfree_skb(skb); +} + +/* htc pipe initialization */ +static void reset_endpoint_states(struct htc_target *target) +{ + struct htc_endpoint *ep; + int i; + + for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { + ep = &target->endpoint[i]; + ep->svc_id = 0; + ep->len_max = 0; + ep->max_txq_depth = 0; + ep->eid = i; + INIT_LIST_HEAD(&ep->txq); + INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue); + INIT_LIST_HEAD(&ep->rx_bufq); + ep->target = target; + ep->pipe.tx_credit_flow_enabled = true; + } +} + +/* start HTC, this is called after all services are connected */ +static int htc_config_target_hif_pipe(struct htc_target *target) +{ + return 0; +} + +/* htc service functions */ +static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id) +{ + u8 allocation = 0; + int i; + + for (i = 0; i < ENDPOINT_MAX; i++) { + if (target->pipe.txcredit_alloc[i].service_id == service_id) + allocation = + target->pipe.txcredit_alloc[i].credit_alloc; + } + + if (allocation == 0) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "HTC Service TX : 0x%2.2X : allocation is zero!\n", + service_id); + } + + return allocation; +} + +static int ath6kl_htc_pipe_conn_service(struct htc_target *target, + struct htc_service_connect_req *conn_req, + struct htc_service_connect_resp *conn_resp) +{ + struct ath6kl *ar = target->dev->ar; + struct htc_packet *packet = NULL; + struct htc_conn_service_resp *resp_msg; + struct htc_conn_service_msg *conn_msg; + enum htc_endpoint_id assigned_epid = ENDPOINT_MAX; + bool disable_credit_flowctrl = false; + unsigned int max_msg_size = 0; + struct htc_endpoint *ep; + int length, status = 0; + struct sk_buff *skb; + u8 tx_alloc; + u16 flags; + + if (conn_req->svc_id == 0) { + WARN_ON_ONCE(1); + status = -EINVAL; + goto free_packet; + } + + if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) { + /* special case for pseudo control service */ + assigned_epid = ENDPOINT_0; + max_msg_size = HTC_MAX_CTRL_MSG_LEN; + tx_alloc = 0; + + } else { + tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id); + if (tx_alloc == 0) { + status = -ENOMEM; + goto free_packet; + } + + /* allocate a packet to send to the target */ + packet = htc_alloc_txctrl_packet(target); + + if (packet == NULL) { + WARN_ON_ONCE(1); + status = -ENOMEM; + goto free_packet; + } + + skb = packet->skb; + length = sizeof(struct htc_conn_service_msg); + + /* assemble connect service message */ + conn_msg = skb_put(skb, length); + if (conn_msg == NULL) { + WARN_ON_ONCE(1); + status = -EINVAL; + goto free_packet; + } + + memset(conn_msg, 0, + sizeof(struct htc_conn_service_msg)); + conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID); + conn_msg->svc_id = cpu_to_le16(conn_req->svc_id); + conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags & + ~HTC_CONN_FLGS_SET_RECV_ALLOC_MASK); + + /* tell target desired recv alloc for this ep */ + flags = tx_alloc << HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT; + conn_msg->conn_flags |= cpu_to_le16(flags); + + if (conn_req->conn_flags & + HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL) { + disable_credit_flowctrl = true; + } + + set_htc_pkt_info(packet, NULL, (u8 *) conn_msg, + length, + ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); + + status = ath6kl_htc_pipe_tx(target, packet); + + /* we don't own it anymore */ + packet = NULL; + if (status != 0) + goto free_packet; + + /* wait for response */ + status = htc_wait_recv_ctrl_message(target); + if (status != 0) + goto free_packet; + + /* we controlled the buffer creation so it has to be + * properly aligned + */ + resp_msg = (struct htc_conn_service_resp *) + target->pipe.ctrl_response_buf; + + if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) || + (target->pipe.ctrl_response_len < sizeof(*resp_msg))) { + /* this message is not valid */ + WARN_ON_ONCE(1); + status = -EINVAL; + goto free_packet; + } + + ath6kl_dbg(ATH6KL_DBG_TRC, + "%s: service 0x%X conn resp: status: %d ep: %d\n", + __func__, resp_msg->svc_id, resp_msg->status, + resp_msg->eid); + + conn_resp->resp_code = resp_msg->status; + /* check response status */ + if (resp_msg->status != HTC_SERVICE_SUCCESS) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "Target failed service 0x%X connect request (status:%d)\n", + resp_msg->svc_id, resp_msg->status); + status = -EINVAL; + goto free_packet; + } + + assigned_epid = (enum htc_endpoint_id) resp_msg->eid; + max_msg_size = le16_to_cpu(resp_msg->max_msg_sz); + } + + /* the rest are parameter checks so set the error status */ + status = -EINVAL; + + if (assigned_epid >= ENDPOINT_MAX) { + WARN_ON_ONCE(1); + goto free_packet; + } + + if (max_msg_size == 0) { + WARN_ON_ONCE(1); + goto free_packet; + } + + ep = &target->endpoint[assigned_epid]; + ep->eid = assigned_epid; + if (ep->svc_id != 0) { + /* endpoint already in use! */ + WARN_ON_ONCE(1); + goto free_packet; + } + + /* return assigned endpoint to caller */ + conn_resp->endpoint = assigned_epid; + conn_resp->len_max = max_msg_size; + + /* setup the endpoint */ + ep->svc_id = conn_req->svc_id; /* this marks ep in use */ + ep->max_txq_depth = conn_req->max_txq_depth; + ep->len_max = max_msg_size; + ep->cred_dist.credits = tx_alloc; + ep->cred_dist.cred_sz = target->tgt_cred_sz; + ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz; + if (max_msg_size % target->tgt_cred_sz) + ep->cred_dist.cred_per_msg++; + + /* copy all the callbacks */ + ep->ep_cb = conn_req->ep_cb; + + /* initialize tx_drop_packet_threshold */ + ep->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM; + + status = ath6kl_hif_pipe_map_service(ar, ep->svc_id, + &ep->pipe.pipeid_ul, + &ep->pipe.pipeid_dl); + if (status != 0) + goto free_packet; + + ath6kl_dbg(ATH6KL_DBG_HTC, + "SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n", + ep->svc_id, ep->pipe.pipeid_ul, + ep->pipe.pipeid_dl, ep->eid); + + if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) { + ep->pipe.tx_credit_flow_enabled = false; + ath6kl_dbg(ATH6KL_DBG_HTC, + "SVC: 0x%4.4X ep:%d TX flow control off\n", + ep->svc_id, assigned_epid); + } + +free_packet: + if (packet != NULL) + htc_free_txctrl_packet(target, packet); + return status; +} + +/* htc export functions */ +static void *ath6kl_htc_pipe_create(struct ath6kl *ar) +{ + int status = 0; + struct htc_endpoint *ep = NULL; + struct htc_target *target = NULL; + struct htc_packet *packet; + int i; + + target = kzalloc(sizeof(struct htc_target), GFP_KERNEL); + if (target == NULL) { + ath6kl_err("htc create unable to allocate memory\n"); + status = -ENOMEM; + goto fail_htc_create; + } + + spin_lock_init(&target->htc_lock); + spin_lock_init(&target->rx_lock); + spin_lock_init(&target->tx_lock); + + reset_endpoint_states(target); + + for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) { + packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL); + + if (packet != NULL) + free_htc_packet_container(target, packet); + } + + target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL); + if (!target->dev) { + ath6kl_err("unable to allocate memory\n"); + status = -ENOMEM; + goto fail_htc_create; + } + target->dev->ar = ar; + target->dev->htc_cnxt = target; + + /* Get HIF default pipe for HTC message exchange */ + ep = &target->endpoint[ENDPOINT_0]; + + ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul, + &ep->pipe.pipeid_dl); + + return target; + +fail_htc_create: + if (status != 0) { + if (target != NULL) + ath6kl_htc_pipe_cleanup(target); + + target = NULL; + } + return target; +} + +/* cleanup the HTC instance */ +static void ath6kl_htc_pipe_cleanup(struct htc_target *target) +{ + struct htc_packet *packet; + + while (true) { + packet = alloc_htc_packet_container(target); + if (packet == NULL) + break; + kfree(packet); + } + + kfree(target->dev); + + /* kfree our instance */ + kfree(target); +} + +static int ath6kl_htc_pipe_start(struct htc_target *target) +{ + struct sk_buff *skb; + struct htc_setup_comp_ext_msg *setup; + struct htc_packet *packet; + + htc_config_target_hif_pipe(target); + + /* allocate a buffer to send */ + packet = htc_alloc_txctrl_packet(target); + if (packet == NULL) { + WARN_ON_ONCE(1); + return -ENOMEM; + } + + skb = packet->skb; + + /* assemble setup complete message */ + setup = skb_put(skb, sizeof(*setup)); + memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg)); + setup->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID); + + ath6kl_dbg(ATH6KL_DBG_HTC, "HTC using TX credit flow control\n"); + + set_htc_pkt_info(packet, NULL, (u8 *) setup, + sizeof(struct htc_setup_comp_ext_msg), + ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); + + target->htc_flags |= HTC_OP_STATE_SETUP_COMPLETE; + + return ath6kl_htc_pipe_tx(target, packet); +} + +static void ath6kl_htc_pipe_stop(struct htc_target *target) +{ + int i; + struct htc_endpoint *ep; + + /* cleanup endpoints */ + for (i = 0; i < ENDPOINT_MAX; i++) { + ep = &target->endpoint[i]; + htc_flush_rx_queue(target, ep); + htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL); + } + + reset_endpoint_states(target); + target->htc_flags &= ~HTC_OP_STATE_SETUP_COMPLETE; +} + +static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target, + enum htc_endpoint_id endpoint) +{ + int num; + + spin_lock_bh(&target->rx_lock); + num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq)); + spin_unlock_bh(&target->rx_lock); + + return num; +} + +static int ath6kl_htc_pipe_tx(struct htc_target *target, + struct htc_packet *packet) +{ + struct list_head queue; + + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: endPointId: %d, buffer: 0x%p, length: %d\n", + __func__, packet->endpoint, packet->buf, + packet->act_len); + + INIT_LIST_HEAD(&queue); + list_add_tail(&packet->list, &queue); + + return htc_send_packets_multiple(target, &queue); +} + +static int ath6kl_htc_pipe_wait_target(struct htc_target *target) +{ + struct htc_ready_ext_msg *ready_msg; + struct htc_service_connect_req connect; + struct htc_service_connect_resp resp; + int status = 0; + + status = htc_wait_recv_ctrl_message(target); + + if (status != 0) + return status; + + if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) { + ath6kl_warn("invalid htc pipe ready msg len: %d\n", + target->pipe.ctrl_response_len); + return -ECOMM; + } + + ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf; + + if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) { + ath6kl_warn("invalid htc pipe ready msg: 0x%x\n", + ready_msg->ver2_0_info.msg_id); + return -ECOMM; + } + + ath6kl_dbg(ATH6KL_DBG_HTC, + "Target Ready! : transmit resources : %d size:%d\n", + ready_msg->ver2_0_info.cred_cnt, + ready_msg->ver2_0_info.cred_sz); + + target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt); + target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz); + + if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0)) + return -ECOMM; + + htc_setup_target_buffer_assignments(target); + + /* setup our pseudo HTC control endpoint connection */ + memset(&connect, 0, sizeof(connect)); + memset(&resp, 0, sizeof(resp)); + connect.ep_cb.tx_complete = htc_txctrl_complete; + connect.ep_cb.rx = htc_rxctrl_complete; + connect.max_txq_depth = NUM_CONTROL_TX_BUFFERS; + connect.svc_id = HTC_CTRL_RSVD_SVC; + + /* connect fake service */ + status = ath6kl_htc_pipe_conn_service(target, &connect, &resp); + + return status; +} + +static void ath6kl_htc_pipe_flush_txep(struct htc_target *target, + enum htc_endpoint_id endpoint, u16 tag) +{ + struct htc_endpoint *ep = &target->endpoint[endpoint]; + + if (ep->svc_id == 0) { + WARN_ON_ONCE(1); + /* not in use.. */ + return; + } + + htc_flush_tx_endpoint(target, ep, tag); +} + +static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target, + struct list_head *pkt_queue) +{ + struct htc_packet *packet, *tmp_pkt, *first; + struct htc_endpoint *ep; + int status = 0; + + if (list_empty(pkt_queue)) + return -EINVAL; + + first = list_first_entry(pkt_queue, struct htc_packet, list); + + if (first->endpoint >= ENDPOINT_MAX) { + WARN_ON_ONCE(1); + return -EINVAL; + } + + ath6kl_dbg(ATH6KL_DBG_HTC, "%s: epid: %d, cnt:%d, len: %d\n", + __func__, first->endpoint, get_queue_depth(pkt_queue), + first->buf_len); + + ep = &target->endpoint[first->endpoint]; + + spin_lock_bh(&target->rx_lock); + + /* store receive packets */ + list_splice_tail_init(pkt_queue, &ep->rx_bufq); + + spin_unlock_bh(&target->rx_lock); + + if (status != 0) { + /* walk through queue and mark each one canceled */ + list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) { + packet->status = -ECANCELED; + } + + do_recv_completion(ep, pkt_queue); + } + + return status; +} + +static void ath6kl_htc_pipe_activity_changed(struct htc_target *target, + enum htc_endpoint_id ep, + bool active) +{ + /* TODO */ +} + +static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target) +{ + struct htc_endpoint *endpoint; + struct htc_packet *packet, *tmp_pkt; + int i; + + for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { + endpoint = &target->endpoint[i]; + + spin_lock_bh(&target->rx_lock); + + list_for_each_entry_safe(packet, tmp_pkt, + &endpoint->rx_bufq, list) { + list_del(&packet->list); + spin_unlock_bh(&target->rx_lock); + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx flush pkt 0x%p len %d ep %d\n", + packet, packet->buf_len, + packet->endpoint); + dev_kfree_skb(packet->pkt_cntxt); + spin_lock_bh(&target->rx_lock); + } + + spin_unlock_bh(&target->rx_lock); + } +} + +static int ath6kl_htc_pipe_credit_setup(struct htc_target *target, + struct ath6kl_htc_credit_info *info) +{ + return 0; +} + +static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops = { + .create = ath6kl_htc_pipe_create, + .wait_target = ath6kl_htc_pipe_wait_target, + .start = ath6kl_htc_pipe_start, + .conn_service = ath6kl_htc_pipe_conn_service, + .tx = ath6kl_htc_pipe_tx, + .stop = ath6kl_htc_pipe_stop, + .cleanup = ath6kl_htc_pipe_cleanup, + .flush_txep = ath6kl_htc_pipe_flush_txep, + .flush_rx_buf = ath6kl_htc_pipe_flush_rx_buf, + .activity_changed = ath6kl_htc_pipe_activity_changed, + .get_rxbuf_num = ath6kl_htc_pipe_get_rxbuf_num, + .add_rxbuf_multiple = ath6kl_htc_pipe_add_rxbuf_multiple, + .credit_setup = ath6kl_htc_pipe_credit_setup, + .tx_complete = ath6kl_htc_pipe_tx_complete, + .rx_complete = ath6kl_htc_pipe_rx_complete, +}; + +void ath6kl_htc_pipe_attach(struct ath6kl *ar) +{ + ar->htc_ops = &ath6kl_htc_pipe_ops; +} diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c new file mode 100644 index 000000000..201e45554 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -0,0 +1,1945 @@ + +/* + * Copyright (c) 2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/moduleparam.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/of.h> +#include <linux/mmc/sdio_func.h> +#include <linux/vmalloc.h> + +#include "core.h" +#include "cfg80211.h" +#include "target.h" +#include "debug.h" +#include "hif-ops.h" +#include "htc-ops.h" + +static const struct ath6kl_hw hw_list[] = { + { + .id = AR6003_HW_2_0_VERSION, + .name = "ar6003 hw 2.0", + .dataset_patch_addr = 0x57e884, + .app_load_addr = 0x543180, + .board_ext_data_addr = 0x57e500, + .reserved_ram_size = 6912, + .refclk_hz = 26000000, + .uarttx_pin = 8, + .flags = ATH6KL_HW_SDIO_CRC_ERROR_WAR, + + /* hw2.0 needs override address hardcoded */ + .app_start_override_addr = 0x944C00, + + .fw = { + .dir = AR6003_HW_2_0_FW_DIR, + .otp = AR6003_HW_2_0_OTP_FILE, + .fw = AR6003_HW_2_0_FIRMWARE_FILE, + .tcmd = AR6003_HW_2_0_TCMD_FIRMWARE_FILE, + .patch = AR6003_HW_2_0_PATCH_FILE, + }, + + .fw_board = AR6003_HW_2_0_BOARD_DATA_FILE, + .fw_default_board = AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE, + }, + { + .id = AR6003_HW_2_1_1_VERSION, + .name = "ar6003 hw 2.1.1", + .dataset_patch_addr = 0x57ff74, + .app_load_addr = 0x1234, + .board_ext_data_addr = 0x542330, + .reserved_ram_size = 512, + .refclk_hz = 26000000, + .uarttx_pin = 8, + .testscript_addr = 0x57ef74, + .flags = ATH6KL_HW_SDIO_CRC_ERROR_WAR, + + .fw = { + .dir = AR6003_HW_2_1_1_FW_DIR, + .otp = AR6003_HW_2_1_1_OTP_FILE, + .fw = AR6003_HW_2_1_1_FIRMWARE_FILE, + .tcmd = AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE, + .patch = AR6003_HW_2_1_1_PATCH_FILE, + .utf = AR6003_HW_2_1_1_UTF_FIRMWARE_FILE, + .testscript = AR6003_HW_2_1_1_TESTSCRIPT_FILE, + }, + + .fw_board = AR6003_HW_2_1_1_BOARD_DATA_FILE, + .fw_default_board = AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE, + }, + { + .id = AR6004_HW_1_0_VERSION, + .name = "ar6004 hw 1.0", + .dataset_patch_addr = 0x57e884, + .app_load_addr = 0x1234, + .board_ext_data_addr = 0x437000, + .reserved_ram_size = 19456, + .board_addr = 0x433900, + .refclk_hz = 26000000, + .uarttx_pin = 11, + .flags = 0, + + .fw = { + .dir = AR6004_HW_1_0_FW_DIR, + .fw = AR6004_HW_1_0_FIRMWARE_FILE, + }, + + .fw_board = AR6004_HW_1_0_BOARD_DATA_FILE, + .fw_default_board = AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE, + }, + { + .id = AR6004_HW_1_1_VERSION, + .name = "ar6004 hw 1.1", + .dataset_patch_addr = 0x57e884, + .app_load_addr = 0x1234, + .board_ext_data_addr = 0x437000, + .reserved_ram_size = 11264, + .board_addr = 0x43d400, + .refclk_hz = 40000000, + .uarttx_pin = 11, + .flags = 0, + .fw = { + .dir = AR6004_HW_1_1_FW_DIR, + .fw = AR6004_HW_1_1_FIRMWARE_FILE, + }, + + .fw_board = AR6004_HW_1_1_BOARD_DATA_FILE, + .fw_default_board = AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE, + }, + { + .id = AR6004_HW_1_2_VERSION, + .name = "ar6004 hw 1.2", + .dataset_patch_addr = 0x436ecc, + .app_load_addr = 0x1234, + .board_ext_data_addr = 0x437000, + .reserved_ram_size = 9216, + .board_addr = 0x435c00, + .refclk_hz = 40000000, + .uarttx_pin = 11, + .flags = 0, + + .fw = { + .dir = AR6004_HW_1_2_FW_DIR, + .fw = AR6004_HW_1_2_FIRMWARE_FILE, + }, + .fw_board = AR6004_HW_1_2_BOARD_DATA_FILE, + .fw_default_board = AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE, + }, + { + .id = AR6004_HW_1_3_VERSION, + .name = "ar6004 hw 1.3", + .dataset_patch_addr = 0x437860, + .app_load_addr = 0x1234, + .board_ext_data_addr = 0x437000, + .reserved_ram_size = 7168, + .board_addr = 0x436400, + .refclk_hz = 0, + .uarttx_pin = 11, + .flags = 0, + + .fw = { + .dir = AR6004_HW_1_3_FW_DIR, + .fw = AR6004_HW_1_3_FIRMWARE_FILE, + .tcmd = AR6004_HW_1_3_TCMD_FIRMWARE_FILE, + .utf = AR6004_HW_1_3_UTF_FIRMWARE_FILE, + .testscript = AR6004_HW_1_3_TESTSCRIPT_FILE, + }, + + .fw_board = AR6004_HW_1_3_BOARD_DATA_FILE, + .fw_default_board = AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE, + }, + { + .id = AR6004_HW_3_0_VERSION, + .name = "ar6004 hw 3.0", + .dataset_patch_addr = 0, + .app_load_addr = 0x1234, + .board_ext_data_addr = 0, + .reserved_ram_size = 7168, + .board_addr = 0x436400, + .testscript_addr = 0, + .uarttx_pin = 11, + .flags = 0, + + .fw = { + .dir = AR6004_HW_3_0_FW_DIR, + .fw = AR6004_HW_3_0_FIRMWARE_FILE, + .tcmd = AR6004_HW_3_0_TCMD_FIRMWARE_FILE, + .utf = AR6004_HW_3_0_UTF_FIRMWARE_FILE, + .testscript = AR6004_HW_3_0_TESTSCRIPT_FILE, + }, + + .fw_board = AR6004_HW_3_0_BOARD_DATA_FILE, + .fw_default_board = AR6004_HW_3_0_DEFAULT_BOARD_DATA_FILE, + }, +}; + +/* + * Include definitions here that can be used to tune the WLAN module + * behavior. Different customers can tune the behavior as per their needs, + * here. + */ + +/* + * This configuration item enable/disable keepalive support. + * Keepalive support: In the absence of any data traffic to AP, null + * frames will be sent to the AP at periodic interval, to keep the association + * active. This configuration item defines the periodic interval. + * Use value of zero to disable keepalive support + * Default: 60 seconds + */ +#define WLAN_CONFIG_KEEP_ALIVE_INTERVAL 60 + +/* + * This configuration item sets the value of disconnect timeout + * Firmware delays sending the disconnec event to the host for this + * timeout after is gets disconnected from the current AP. + * If the firmware successly roams within the disconnect timeout + * it sends a new connect event + */ +#define WLAN_CONFIG_DISCONNECT_TIMEOUT 10 + + +#define ATH6KL_DATA_OFFSET 64 +struct sk_buff *ath6kl_buf_alloc(int size) +{ + struct sk_buff *skb; + u16 reserved; + + /* Add chacheline space at front and back of buffer */ + reserved = roundup((2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET + + sizeof(struct htc_packet) + ATH6KL_HTC_ALIGN_BYTES, 4); + skb = dev_alloc_skb(size + reserved); + + if (skb) + skb_reserve(skb, reserved - L1_CACHE_BYTES); + return skb; +} + +void ath6kl_init_profile_info(struct ath6kl_vif *vif) +{ + vif->ssid_len = 0; + memset(vif->ssid, 0, sizeof(vif->ssid)); + + vif->dot11_auth_mode = OPEN_AUTH; + vif->auth_mode = NONE_AUTH; + vif->prwise_crypto = NONE_CRYPT; + vif->prwise_crypto_len = 0; + vif->grp_crypto = NONE_CRYPT; + vif->grp_crypto_len = 0; + memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list)); + memset(vif->req_bssid, 0, sizeof(vif->req_bssid)); + memset(vif->bssid, 0, sizeof(vif->bssid)); + vif->bss_ch = 0; +} + +static int ath6kl_set_host_app_area(struct ath6kl *ar) +{ + u32 address, data; + struct host_app_area host_app_area; + + /* Fetch the address of the host_app_area_s + * instance in the host interest area */ + address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_app_host_interest)); + address = TARG_VTOP(ar->target_type, address); + + if (ath6kl_diag_read32(ar, address, &data)) + return -EIO; + + address = TARG_VTOP(ar->target_type, data); + host_app_area.wmi_protocol_ver = cpu_to_le32(WMI_PROTOCOL_VERSION); + if (ath6kl_diag_write(ar, address, (u8 *) &host_app_area, + sizeof(struct host_app_area))) + return -EIO; + + return 0; +} + +static inline void set_ac2_ep_map(struct ath6kl *ar, + u8 ac, + enum htc_endpoint_id ep) +{ + ar->ac2ep_map[ac] = ep; + ar->ep2ac_map[ep] = ac; +} + +/* connect to a service */ +static int ath6kl_connectservice(struct ath6kl *ar, + struct htc_service_connect_req *con_req, + char *desc) +{ + int status; + struct htc_service_connect_resp response; + + memset(&response, 0, sizeof(response)); + + status = ath6kl_htc_conn_service(ar->htc_target, con_req, &response); + if (status) { + ath6kl_err("failed to connect to %s service status:%d\n", + desc, status); + return status; + } + + switch (con_req->svc_id) { + case WMI_CONTROL_SVC: + if (test_bit(WMI_ENABLED, &ar->flag)) + ath6kl_wmi_set_control_ep(ar->wmi, response.endpoint); + ar->ctrl_ep = response.endpoint; + break; + case WMI_DATA_BE_SVC: + set_ac2_ep_map(ar, WMM_AC_BE, response.endpoint); + break; + case WMI_DATA_BK_SVC: + set_ac2_ep_map(ar, WMM_AC_BK, response.endpoint); + break; + case WMI_DATA_VI_SVC: + set_ac2_ep_map(ar, WMM_AC_VI, response.endpoint); + break; + case WMI_DATA_VO_SVC: + set_ac2_ep_map(ar, WMM_AC_VO, response.endpoint); + break; + default: + ath6kl_err("service id is not mapped %d\n", con_req->svc_id); + return -EINVAL; + } + + return 0; +} + +static int ath6kl_init_service_ep(struct ath6kl *ar) +{ + struct htc_service_connect_req connect; + + memset(&connect, 0, sizeof(connect)); + + /* these fields are the same for all service endpoints */ + connect.ep_cb.tx_comp_multi = ath6kl_tx_complete; + connect.ep_cb.rx = ath6kl_rx; + connect.ep_cb.rx_refill = ath6kl_rx_refill; + connect.ep_cb.tx_full = ath6kl_tx_queue_full; + + /* + * Set the max queue depth so that our ath6kl_tx_queue_full handler + * gets called. + */ + connect.max_txq_depth = MAX_DEFAULT_SEND_QUEUE_DEPTH; + connect.ep_cb.rx_refill_thresh = ATH6KL_MAX_RX_BUFFERS / 4; + if (!connect.ep_cb.rx_refill_thresh) + connect.ep_cb.rx_refill_thresh++; + + /* connect to control service */ + connect.svc_id = WMI_CONTROL_SVC; + if (ath6kl_connectservice(ar, &connect, "WMI CONTROL")) + return -EIO; + + connect.flags |= HTC_FLGS_TX_BNDL_PAD_EN; + + /* + * Limit the HTC message size on the send path, although e can + * receive A-MSDU frames of 4K, we will only send ethernet-sized + * (802.3) frames on the send path. + */ + connect.max_rxmsg_sz = WMI_MAX_TX_DATA_FRAME_LENGTH; + + /* + * To reduce the amount of committed memory for larger A_MSDU + * frames, use the recv-alloc threshold mechanism for larger + * packets. + */ + connect.ep_cb.rx_alloc_thresh = ATH6KL_BUFFER_SIZE; + connect.ep_cb.rx_allocthresh = ath6kl_alloc_amsdu_rxbuf; + + /* + * For the remaining data services set the connection flag to + * reduce dribbling, if configured to do so. + */ + connect.conn_flags |= HTC_CONN_FLGS_REDUCE_CRED_DRIB; + connect.conn_flags &= ~HTC_CONN_FLGS_THRESH_MASK; + connect.conn_flags |= HTC_CONN_FLGS_THRESH_LVL_HALF; + + connect.svc_id = WMI_DATA_BE_SVC; + + if (ath6kl_connectservice(ar, &connect, "WMI DATA BE")) + return -EIO; + + /* connect to back-ground map this to WMI LOW_PRI */ + connect.svc_id = WMI_DATA_BK_SVC; + if (ath6kl_connectservice(ar, &connect, "WMI DATA BK")) + return -EIO; + + /* connect to Video service, map this to HI PRI */ + connect.svc_id = WMI_DATA_VI_SVC; + if (ath6kl_connectservice(ar, &connect, "WMI DATA VI")) + return -EIO; + + /* + * Connect to VO service, this is currently not mapped to a WMI + * priority stream due to historical reasons. WMI originally + * defined 3 priorities over 3 mailboxes We can change this when + * WMI is reworked so that priorities are not dependent on + * mailboxes. + */ + connect.svc_id = WMI_DATA_VO_SVC; + if (ath6kl_connectservice(ar, &connect, "WMI DATA VO")) + return -EIO; + + return 0; +} + +void ath6kl_init_control_info(struct ath6kl_vif *vif) +{ + ath6kl_init_profile_info(vif); + vif->def_txkey_index = 0; + memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list)); + vif->ch_hint = 0; +} + +/* + * Set HTC/Mbox operational parameters, this can only be called when the + * target is in the BMI phase. + */ +static int ath6kl_set_htc_params(struct ath6kl *ar, u32 mbox_isr_yield_val, + u8 htc_ctrl_buf) +{ + int status; + u32 blk_size; + + blk_size = ar->mbox_info.block_size; + + if (htc_ctrl_buf) + blk_size |= ((u32)htc_ctrl_buf) << 16; + + /* set the host interest area for the block size */ + status = ath6kl_bmi_write_hi32(ar, hi_mbox_io_block_sz, blk_size); + if (status) { + ath6kl_err("bmi_write_memory for IO block size failed\n"); + goto out; + } + + ath6kl_dbg(ATH6KL_DBG_TRC, "block size set: %d (target addr:0x%X)\n", + blk_size, + ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_mbox_io_block_sz))); + + if (mbox_isr_yield_val) { + /* set the host interest area for the mbox ISR yield limit */ + status = ath6kl_bmi_write_hi32(ar, hi_mbox_isr_yield_limit, + mbox_isr_yield_val); + if (status) { + ath6kl_err("bmi_write_memory for yield limit failed\n"); + goto out; + } + } + +out: + return status; +} + +static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx) +{ + int ret; + + /* + * Configure the device for rx dot11 header rules. "0,0" are the + * default values. Required if checksum offload is needed. Set + * RxMetaVersion to 2. + */ + ret = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx, + ar->rx_meta_ver, 0, 0); + if (ret) { + ath6kl_err("unable to set the rx frame format: %d\n", ret); + return ret; + } + + if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN) { + ret = ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1, + IGNORE_PS_FAIL_DURING_SCAN); + if (ret) { + ath6kl_err("unable to set power save fail event policy: %d\n", + ret); + return ret; + } + } + + if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER)) { + ret = ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0, + WMI_FOLLOW_BARKER_IN_ERP); + if (ret) { + ath6kl_err("unable to set barker preamble policy: %d\n", + ret); + return ret; + } + } + + ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx, + WLAN_CONFIG_KEEP_ALIVE_INTERVAL); + if (ret) { + ath6kl_err("unable to set keep alive interval: %d\n", ret); + return ret; + } + + ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, idx, + WLAN_CONFIG_DISCONNECT_TIMEOUT); + if (ret) { + ath6kl_err("unable to set disconnect timeout: %d\n", ret); + return ret; + } + + if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST)) { + ret = ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED); + if (ret) { + ath6kl_err("unable to set txop bursting: %d\n", ret); + return ret; + } + } + + if (ar->p2p && (ar->vif_max == 1 || idx)) { + ret = ath6kl_wmi_info_req_cmd(ar->wmi, idx, + P2P_FLAG_CAPABILITIES_REQ | + P2P_FLAG_MACADDR_REQ | + P2P_FLAG_HMODEL_REQ); + if (ret) { + ath6kl_dbg(ATH6KL_DBG_TRC, + "failed to request P2P capabilities (%d) - assuming P2P not supported\n", + ret); + ar->p2p = false; + } + } + + if (ar->p2p && (ar->vif_max == 1 || idx)) { + /* Enable Probe Request reporting for P2P */ + ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, idx, true); + if (ret) { + ath6kl_dbg(ATH6KL_DBG_TRC, + "failed to enable Probe Request reporting (%d)\n", + ret); + } + } + + return ret; +} + +int ath6kl_configure_target(struct ath6kl *ar) +{ + u32 param, ram_reserved_size; + u8 fw_iftype, fw_mode = 0, fw_submode = 0; + int i, status; + + param = !!(ar->conf_flags & ATH6KL_CONF_UART_DEBUG); + if (ath6kl_bmi_write_hi32(ar, hi_serial_enable, param)) { + ath6kl_err("bmi_write_memory for uart debug failed\n"); + return -EIO; + } + + /* + * Note: Even though the firmware interface type is + * chosen as BSS_STA for all three interfaces, can + * be configured to IBSS/AP as long as the fw submode + * remains normal mode (0 - AP, STA and IBSS). But + * due to an target assert in firmware only one interface is + * configured for now. + */ + fw_iftype = HI_OPTION_FW_MODE_BSS_STA; + + for (i = 0; i < ar->vif_max; i++) + fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS); + + /* + * Submodes when fw does not support dynamic interface + * switching: + * vif[0] - AP/STA/IBSS + * vif[1] - "P2P dev"/"P2P GO"/"P2P Client" + * vif[2] - "P2P dev"/"P2P GO"/"P2P Client" + * Otherwise, All the interface are initialized to p2p dev. + */ + + if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, + ar->fw_capabilities)) { + for (i = 0; i < ar->vif_max; i++) + fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV << + (i * HI_OPTION_FW_SUBMODE_BITS); + } else { + for (i = 0; i < ar->max_norm_iface; i++) + fw_submode |= HI_OPTION_FW_SUBMODE_NONE << + (i * HI_OPTION_FW_SUBMODE_BITS); + + for (i = ar->max_norm_iface; i < ar->vif_max; i++) + fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV << + (i * HI_OPTION_FW_SUBMODE_BITS); + + if (ar->p2p && ar->vif_max == 1) + fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV; + } + + if (ath6kl_bmi_write_hi32(ar, hi_app_host_interest, + HTC_PROTOCOL_VERSION) != 0) { + ath6kl_err("bmi_write_memory for htc version failed\n"); + return -EIO; + } + + /* set the firmware mode to STA/IBSS/AP */ + param = 0; + + if (ath6kl_bmi_read_hi32(ar, hi_option_flag, ¶m) != 0) { + ath6kl_err("bmi_read_memory for setting fwmode failed\n"); + return -EIO; + } + + param |= (ar->vif_max << HI_OPTION_NUM_DEV_SHIFT); + param |= fw_mode << HI_OPTION_FW_MODE_SHIFT; + param |= fw_submode << HI_OPTION_FW_SUBMODE_SHIFT; + + param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT); + param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT); + + if (ath6kl_bmi_write_hi32(ar, hi_option_flag, param) != 0) { + ath6kl_err("bmi_write_memory for setting fwmode failed\n"); + return -EIO; + } + + ath6kl_dbg(ATH6KL_DBG_TRC, "firmware mode set\n"); + + /* + * Hardcode the address use for the extended board data + * Ideally this should be pre-allocate by the OS at boot time + * But since it is a new feature and board data is loaded + * at init time, we have to workaround this from host. + * It is difficult to patch the firmware boot code, + * but possible in theory. + */ + + if ((ar->target_type == TARGET_TYPE_AR6003) || + (ar->version.target_ver == AR6004_HW_1_3_VERSION) || + (ar->version.target_ver == AR6004_HW_3_0_VERSION)) { + param = ar->hw.board_ext_data_addr; + ram_reserved_size = ar->hw.reserved_ram_size; + + if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) { + ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n"); + return -EIO; + } + + if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz, + ram_reserved_size) != 0) { + ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n"); + return -EIO; + } + } + + /* set the block size for the target */ + if (ath6kl_set_htc_params(ar, MBOX_YIELD_LIMIT, 0)) + /* use default number of control buffers */ + return -EIO; + + /* Configure GPIO AR600x UART */ + status = ath6kl_bmi_write_hi32(ar, hi_dbg_uart_txpin, + ar->hw.uarttx_pin); + if (status) + return status; + + /* Only set the baud rate if we're actually doing debug */ + if (ar->conf_flags & ATH6KL_CONF_UART_DEBUG) { + status = ath6kl_bmi_write_hi32(ar, hi_desired_baud_rate, + ar->hw.uarttx_rate); + if (status) + return status; + } + + /* Configure target refclk_hz */ + if (ar->hw.refclk_hz != 0) { + status = ath6kl_bmi_write_hi32(ar, hi_refclk_hz, + ar->hw.refclk_hz); + if (status) + return status; + } + + return 0; +} + +/* firmware upload */ +static int ath6kl_get_fw(struct ath6kl *ar, const char *filename, + u8 **fw, size_t *fw_len) +{ + const struct firmware *fw_entry; + int ret; + + ret = request_firmware(&fw_entry, filename, ar->dev); + if (ret) + return ret; + + *fw_len = fw_entry->size; + *fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); + + if (*fw == NULL) + ret = -ENOMEM; + + release_firmware(fw_entry); + + return ret; +} + +#ifdef CONFIG_OF +/* + * Check the device tree for a board-id and use it to construct + * the pathname to the firmware file. Used (for now) to find a + * fallback to the "bdata.bin" file--typically a symlink to the + * appropriate board-specific file. + */ +static bool check_device_tree(struct ath6kl *ar) +{ + static const char *board_id_prop = "atheros,board-id"; + struct device_node *node; + char board_filename[64]; + const char *board_id; + int ret; + + for_each_compatible_node(node, NULL, "atheros,ath6kl") { + board_id = of_get_property(node, board_id_prop, NULL); + if (board_id == NULL) { + ath6kl_warn("No \"%s\" property on %pOFn node.\n", + board_id_prop, node); + continue; + } + snprintf(board_filename, sizeof(board_filename), + "%s/bdata.%s.bin", ar->hw.fw.dir, board_id); + + ret = ath6kl_get_fw(ar, board_filename, &ar->fw_board, + &ar->fw_board_len); + if (ret) { + ath6kl_err("Failed to get DT board file %s: %d\n", + board_filename, ret); + continue; + } + of_node_put(node); + return true; + } + return false; +} +#else +static bool check_device_tree(struct ath6kl *ar) +{ + return false; +} +#endif /* CONFIG_OF */ + +static int ath6kl_fetch_board_file(struct ath6kl *ar) +{ + const char *filename; + int ret; + + if (ar->fw_board != NULL) + return 0; + + if (WARN_ON(ar->hw.fw_board == NULL)) + return -EINVAL; + + filename = ar->hw.fw_board; + + ret = ath6kl_get_fw(ar, filename, &ar->fw_board, + &ar->fw_board_len); + if (ret == 0) { + /* managed to get proper board file */ + return 0; + } + + if (check_device_tree(ar)) { + /* got board file from device tree */ + return 0; + } + + /* there was no proper board file, try to use default instead */ + ath6kl_warn("Failed to get board file %s (%d), trying to find default board file.\n", + filename, ret); + + filename = ar->hw.fw_default_board; + + ret = ath6kl_get_fw(ar, filename, &ar->fw_board, + &ar->fw_board_len); + if (ret) { + ath6kl_err("Failed to get default board file %s: %d\n", + filename, ret); + return ret; + } + + ath6kl_warn("WARNING! No proper board file was not found, instead using a default board file.\n"); + ath6kl_warn("Most likely your hardware won't work as specified. Install correct board file!\n"); + + return 0; +} + +static int ath6kl_fetch_otp_file(struct ath6kl *ar) +{ + char filename[100]; + int ret; + + if (ar->fw_otp != NULL) + return 0; + + if (ar->hw.fw.otp == NULL) { + ath6kl_dbg(ATH6KL_DBG_BOOT, + "no OTP file configured for this hw\n"); + return 0; + } + + snprintf(filename, sizeof(filename), "%s/%s", + ar->hw.fw.dir, ar->hw.fw.otp); + + ret = ath6kl_get_fw(ar, filename, &ar->fw_otp, + &ar->fw_otp_len); + if (ret) { + ath6kl_err("Failed to get OTP file %s: %d\n", + filename, ret); + return ret; + } + + return 0; +} + +static int ath6kl_fetch_testmode_file(struct ath6kl *ar) +{ + char filename[100]; + int ret; + + if (ar->testmode == 0) + return 0; + + ath6kl_dbg(ATH6KL_DBG_BOOT, "testmode %d\n", ar->testmode); + + if (ar->testmode == 2) { + if (ar->hw.fw.utf == NULL) { + ath6kl_warn("testmode 2 not supported\n"); + return -EOPNOTSUPP; + } + + snprintf(filename, sizeof(filename), "%s/%s", + ar->hw.fw.dir, ar->hw.fw.utf); + } else { + if (ar->hw.fw.tcmd == NULL) { + ath6kl_warn("testmode 1 not supported\n"); + return -EOPNOTSUPP; + } + + snprintf(filename, sizeof(filename), "%s/%s", + ar->hw.fw.dir, ar->hw.fw.tcmd); + } + + set_bit(TESTMODE, &ar->flag); + + ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len); + if (ret) { + ath6kl_err("Failed to get testmode %d firmware file %s: %d\n", + ar->testmode, filename, ret); + return ret; + } + + return 0; +} + +static int ath6kl_fetch_fw_file(struct ath6kl *ar) +{ + char filename[100]; + int ret; + + if (ar->fw != NULL) + return 0; + + /* FIXME: remove WARN_ON() as we won't support FW API 1 for long */ + if (WARN_ON(ar->hw.fw.fw == NULL)) + return -EINVAL; + + snprintf(filename, sizeof(filename), "%s/%s", + ar->hw.fw.dir, ar->hw.fw.fw); + + ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len); + if (ret) { + ath6kl_err("Failed to get firmware file %s: %d\n", + filename, ret); + return ret; + } + + return 0; +} + +static int ath6kl_fetch_patch_file(struct ath6kl *ar) +{ + char filename[100]; + int ret; + + if (ar->fw_patch != NULL) + return 0; + + if (ar->hw.fw.patch == NULL) + return 0; + + snprintf(filename, sizeof(filename), "%s/%s", + ar->hw.fw.dir, ar->hw.fw.patch); + + ret = ath6kl_get_fw(ar, filename, &ar->fw_patch, + &ar->fw_patch_len); + if (ret) { + ath6kl_err("Failed to get patch file %s: %d\n", + filename, ret); + return ret; + } + + return 0; +} + +static int ath6kl_fetch_testscript_file(struct ath6kl *ar) +{ + char filename[100]; + int ret; + + if (ar->testmode != 2) + return 0; + + if (ar->fw_testscript != NULL) + return 0; + + if (ar->hw.fw.testscript == NULL) + return 0; + + snprintf(filename, sizeof(filename), "%s/%s", + ar->hw.fw.dir, ar->hw.fw.testscript); + + ret = ath6kl_get_fw(ar, filename, &ar->fw_testscript, + &ar->fw_testscript_len); + if (ret) { + ath6kl_err("Failed to get testscript file %s: %d\n", + filename, ret); + return ret; + } + + return 0; +} + +static int ath6kl_fetch_fw_api1(struct ath6kl *ar) +{ + int ret; + + ret = ath6kl_fetch_otp_file(ar); + if (ret) + return ret; + + ret = ath6kl_fetch_fw_file(ar); + if (ret) + return ret; + + ret = ath6kl_fetch_patch_file(ar); + if (ret) + return ret; + + ret = ath6kl_fetch_testscript_file(ar); + if (ret) + return ret; + + return 0; +} + +static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name) +{ + size_t magic_len, len, ie_len; + const struct firmware *fw; + struct ath6kl_fw_ie *hdr; + char filename[100]; + const u8 *data; + int ret, ie_id, i, index, bit; + __le32 *val; + + snprintf(filename, sizeof(filename), "%s/%s", ar->hw.fw.dir, name); + + ret = request_firmware(&fw, filename, ar->dev); + if (ret) { + ath6kl_err("Failed request firmware, rv: %d\n", ret); + return ret; + } + + data = fw->data; + len = fw->size; + + /* magic also includes the null byte, check that as well */ + magic_len = strlen(ATH6KL_FIRMWARE_MAGIC) + 1; + + if (len < magic_len) { + ath6kl_err("Magic length is invalid, len: %zd magic_len: %zd\n", + len, magic_len); + ret = -EINVAL; + goto out; + } + + if (memcmp(data, ATH6KL_FIRMWARE_MAGIC, magic_len) != 0) { + ath6kl_err("Magic is invalid, magic_len: %zd\n", + magic_len); + ret = -EINVAL; + goto out; + } + + len -= magic_len; + data += magic_len; + + /* loop elements */ + while (len > sizeof(struct ath6kl_fw_ie)) { + /* hdr is unaligned! */ + hdr = (struct ath6kl_fw_ie *) data; + + ie_id = le32_to_cpup(&hdr->id); + ie_len = le32_to_cpup(&hdr->len); + + len -= sizeof(*hdr); + data += sizeof(*hdr); + + ath6kl_dbg(ATH6KL_DBG_BOOT, "ie-id: %d len: %zd (0x%zx)\n", + ie_id, ie_len, ie_len); + + if (len < ie_len) { + ath6kl_err("IE len is invalid, len: %zd ie_len: %zd ie-id: %d\n", + len, ie_len, ie_id); + ret = -EINVAL; + goto out; + } + + switch (ie_id) { + case ATH6KL_FW_IE_FW_VERSION: + strscpy(ar->wiphy->fw_version, data, + min(sizeof(ar->wiphy->fw_version), ie_len+1)); + + ath6kl_dbg(ATH6KL_DBG_BOOT, + "found fw version %s\n", + ar->wiphy->fw_version); + break; + case ATH6KL_FW_IE_OTP_IMAGE: + ath6kl_dbg(ATH6KL_DBG_BOOT, "found otp image ie (%zd B)\n", + ie_len); + + ar->fw_otp = kmemdup(data, ie_len, GFP_KERNEL); + + if (ar->fw_otp == NULL) { + ath6kl_err("fw_otp cannot be allocated\n"); + ret = -ENOMEM; + goto out; + } + + ar->fw_otp_len = ie_len; + break; + case ATH6KL_FW_IE_FW_IMAGE: + ath6kl_dbg(ATH6KL_DBG_BOOT, "found fw image ie (%zd B)\n", + ie_len); + + /* in testmode we already might have a fw file */ + if (ar->fw != NULL) + break; + + ar->fw = vmalloc(ie_len); + + if (ar->fw == NULL) { + ath6kl_err("fw storage cannot be allocated, len: %zd\n", ie_len); + ret = -ENOMEM; + goto out; + } + + memcpy(ar->fw, data, ie_len); + ar->fw_len = ie_len; + break; + case ATH6KL_FW_IE_PATCH_IMAGE: + ath6kl_dbg(ATH6KL_DBG_BOOT, "found patch image ie (%zd B)\n", + ie_len); + + ar->fw_patch = kmemdup(data, ie_len, GFP_KERNEL); + + if (ar->fw_patch == NULL) { + ath6kl_err("fw_patch storage cannot be allocated, len: %zd\n", ie_len); + ret = -ENOMEM; + goto out; + } + + ar->fw_patch_len = ie_len; + break; + case ATH6KL_FW_IE_RESERVED_RAM_SIZE: + val = (__le32 *) data; + ar->hw.reserved_ram_size = le32_to_cpup(val); + + ath6kl_dbg(ATH6KL_DBG_BOOT, + "found reserved ram size ie %d\n", + ar->hw.reserved_ram_size); + break; + case ATH6KL_FW_IE_CAPABILITIES: + ath6kl_dbg(ATH6KL_DBG_BOOT, + "found firmware capabilities ie (%zd B)\n", + ie_len); + + for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) { + index = i / 8; + bit = i % 8; + + if (index == ie_len) + break; + + if (data[index] & (1 << bit)) + __set_bit(i, ar->fw_capabilities); + } + + ath6kl_dbg_dump(ATH6KL_DBG_BOOT, "capabilities", "", + ar->fw_capabilities, + sizeof(ar->fw_capabilities)); + break; + case ATH6KL_FW_IE_PATCH_ADDR: + if (ie_len != sizeof(*val)) + break; + + val = (__le32 *) data; + ar->hw.dataset_patch_addr = le32_to_cpup(val); + + ath6kl_dbg(ATH6KL_DBG_BOOT, + "found patch address ie 0x%x\n", + ar->hw.dataset_patch_addr); + break; + case ATH6KL_FW_IE_BOARD_ADDR: + if (ie_len != sizeof(*val)) + break; + + val = (__le32 *) data; + ar->hw.board_addr = le32_to_cpup(val); + + ath6kl_dbg(ATH6KL_DBG_BOOT, + "found board address ie 0x%x\n", + ar->hw.board_addr); + break; + case ATH6KL_FW_IE_VIF_MAX: + if (ie_len != sizeof(*val)) + break; + + val = (__le32 *) data; + ar->vif_max = min_t(unsigned int, le32_to_cpup(val), + ATH6KL_VIF_MAX); + + if (ar->vif_max > 1 && !ar->p2p) + ar->max_norm_iface = 2; + + ath6kl_dbg(ATH6KL_DBG_BOOT, + "found vif max ie %d\n", ar->vif_max); + break; + default: + ath6kl_dbg(ATH6KL_DBG_BOOT, "Unknown fw ie: %u\n", + le32_to_cpup(&hdr->id)); + break; + } + + len -= ie_len; + data += ie_len; + } + + ret = 0; +out: + release_firmware(fw); + + return ret; +} + +int ath6kl_init_fetch_firmwares(struct ath6kl *ar) +{ + int ret; + + ret = ath6kl_fetch_board_file(ar); + if (ret) + return ret; + + ret = ath6kl_fetch_testmode_file(ar); + if (ret) + return ret; + + ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API5_FILE); + if (ret == 0) { + ar->fw_api = 5; + goto out; + } + + ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API4_FILE); + if (ret == 0) { + ar->fw_api = 4; + goto out; + } + + ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API3_FILE); + if (ret == 0) { + ar->fw_api = 3; + goto out; + } + + ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API2_FILE); + if (ret == 0) { + ar->fw_api = 2; + goto out; + } + + ret = ath6kl_fetch_fw_api1(ar); + if (ret) + return ret; + + ar->fw_api = 1; + +out: + ath6kl_dbg(ATH6KL_DBG_BOOT, "using fw api %d\n", ar->fw_api); + + return 0; +} + +static int ath6kl_upload_board_file(struct ath6kl *ar) +{ + u32 board_address, board_ext_address, param; + u32 board_data_size, board_ext_data_size; + int ret; + + if (WARN_ON(ar->fw_board == NULL)) + return -ENOENT; + + /* + * Determine where in Target RAM to write Board Data. + * For AR6004, host determine Target RAM address for + * writing board data. + */ + if (ar->hw.board_addr != 0) { + board_address = ar->hw.board_addr; + ath6kl_bmi_write_hi32(ar, hi_board_data, + board_address); + } else { + ret = ath6kl_bmi_read_hi32(ar, hi_board_data, &board_address); + if (ret) { + ath6kl_err("Failed to get board file target address.\n"); + return ret; + } + } + + /* determine where in target ram to write extended board data */ + ret = ath6kl_bmi_read_hi32(ar, hi_board_ext_data, &board_ext_address); + if (ret) { + ath6kl_err("Failed to get extended board file target address.\n"); + return ret; + } + + if (ar->target_type == TARGET_TYPE_AR6003 && + board_ext_address == 0) { + ath6kl_err("Failed to get board file target address.\n"); + return -EINVAL; + } + + switch (ar->target_type) { + case TARGET_TYPE_AR6003: + board_data_size = AR6003_BOARD_DATA_SZ; + board_ext_data_size = AR6003_BOARD_EXT_DATA_SZ; + if (ar->fw_board_len > (board_data_size + board_ext_data_size)) + board_ext_data_size = AR6003_BOARD_EXT_DATA_SZ_V2; + break; + case TARGET_TYPE_AR6004: + board_data_size = AR6004_BOARD_DATA_SZ; + board_ext_data_size = AR6004_BOARD_EXT_DATA_SZ; + break; + default: + WARN_ON(1); + return -EINVAL; + } + + if (board_ext_address && + ar->fw_board_len == (board_data_size + board_ext_data_size)) { + /* write extended board data */ + ath6kl_dbg(ATH6KL_DBG_BOOT, + "writing extended board data to 0x%x (%d B)\n", + board_ext_address, board_ext_data_size); + + ret = ath6kl_bmi_write(ar, board_ext_address, + ar->fw_board + board_data_size, + board_ext_data_size); + if (ret) { + ath6kl_err("Failed to write extended board data: %d\n", + ret); + return ret; + } + + /* record that extended board data is initialized */ + param = (board_ext_data_size << 16) | 1; + + ath6kl_bmi_write_hi32(ar, hi_board_ext_data_config, param); + } + + if (ar->fw_board_len < board_data_size) { + ath6kl_err("Too small board file: %zu\n", ar->fw_board_len); + ret = -EINVAL; + return ret; + } + + ath6kl_dbg(ATH6KL_DBG_BOOT, "writing board file to 0x%x (%d B)\n", + board_address, board_data_size); + + ret = ath6kl_bmi_write(ar, board_address, ar->fw_board, + board_data_size); + + if (ret) { + ath6kl_err("Board file bmi write failed: %d\n", ret); + return ret; + } + + /* record the fact that Board Data IS initialized */ + if ((ar->version.target_ver == AR6004_HW_1_3_VERSION) || + (ar->version.target_ver == AR6004_HW_3_0_VERSION)) + param = board_data_size; + else + param = 1; + + ath6kl_bmi_write_hi32(ar, hi_board_data_initialized, param); + + return ret; +} + +static int ath6kl_upload_otp(struct ath6kl *ar) +{ + u32 address, param; + bool from_hw = false; + int ret; + + if (ar->fw_otp == NULL) + return 0; + + address = ar->hw.app_load_addr; + + ath6kl_dbg(ATH6KL_DBG_BOOT, "writing otp to 0x%x (%zd B)\n", address, + ar->fw_otp_len); + + ret = ath6kl_bmi_fast_download(ar, address, ar->fw_otp, + ar->fw_otp_len); + if (ret) { + ath6kl_err("Failed to upload OTP file: %d\n", ret); + return ret; + } + + /* read firmware start address */ + ret = ath6kl_bmi_read_hi32(ar, hi_app_start, &address); + + if (ret) { + ath6kl_err("Failed to read hi_app_start: %d\n", ret); + return ret; + } + + if (ar->hw.app_start_override_addr == 0) { + ar->hw.app_start_override_addr = address; + from_hw = true; + } + + ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr%s 0x%x\n", + from_hw ? " (from hw)" : "", + ar->hw.app_start_override_addr); + + /* execute the OTP code */ + ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n", + ar->hw.app_start_override_addr); + param = 0; + ath6kl_bmi_execute(ar, ar->hw.app_start_override_addr, ¶m); + + return ret; +} + +static int ath6kl_upload_firmware(struct ath6kl *ar) +{ + u32 address; + int ret; + + if (WARN_ON(ar->fw == NULL)) + return 0; + + address = ar->hw.app_load_addr; + + ath6kl_dbg(ATH6KL_DBG_BOOT, "writing firmware to 0x%x (%zd B)\n", + address, ar->fw_len); + + ret = ath6kl_bmi_fast_download(ar, address, ar->fw, ar->fw_len); + + if (ret) { + ath6kl_err("Failed to write firmware: %d\n", ret); + return ret; + } + + /* + * Set starting address for firmware + * Don't need to setup app_start override addr on AR6004 + */ + if (ar->target_type != TARGET_TYPE_AR6004) { + address = ar->hw.app_start_override_addr; + ath6kl_bmi_set_app_start(ar, address); + } + return ret; +} + +static int ath6kl_upload_patch(struct ath6kl *ar) +{ + u32 address; + int ret; + + if (ar->fw_patch == NULL) + return 0; + + address = ar->hw.dataset_patch_addr; + + ath6kl_dbg(ATH6KL_DBG_BOOT, "writing patch to 0x%x (%zd B)\n", + address, ar->fw_patch_len); + + ret = ath6kl_bmi_write(ar, address, ar->fw_patch, ar->fw_patch_len); + if (ret) { + ath6kl_err("Failed to write patch file: %d\n", ret); + return ret; + } + + ath6kl_bmi_write_hi32(ar, hi_dset_list_head, address); + + return 0; +} + +static int ath6kl_upload_testscript(struct ath6kl *ar) +{ + u32 address; + int ret; + + if (ar->testmode != 2) + return 0; + + if (ar->fw_testscript == NULL) + return 0; + + address = ar->hw.testscript_addr; + + ath6kl_dbg(ATH6KL_DBG_BOOT, "writing testscript to 0x%x (%zd B)\n", + address, ar->fw_testscript_len); + + ret = ath6kl_bmi_write(ar, address, ar->fw_testscript, + ar->fw_testscript_len); + if (ret) { + ath6kl_err("Failed to write testscript file: %d\n", ret); + return ret; + } + + ath6kl_bmi_write_hi32(ar, hi_ota_testscript, address); + + if ((ar->version.target_ver != AR6004_HW_1_3_VERSION) && + (ar->version.target_ver != AR6004_HW_3_0_VERSION)) + ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz, 4096); + + ath6kl_bmi_write_hi32(ar, hi_test_apps_related, 1); + + return 0; +} + +static int ath6kl_init_upload(struct ath6kl *ar) +{ + u32 param, options, sleep, address; + int status = 0; + + if (ar->target_type != TARGET_TYPE_AR6003 && + ar->target_type != TARGET_TYPE_AR6004) + return -EINVAL; + + /* temporarily disable system sleep */ + address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS; + status = ath6kl_bmi_reg_read(ar, address, ¶m); + if (status) + return status; + + options = param; + + param |= ATH6KL_OPTION_SLEEP_DISABLE; + status = ath6kl_bmi_reg_write(ar, address, param); + if (status) + return status; + + address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS; + status = ath6kl_bmi_reg_read(ar, address, ¶m); + if (status) + return status; + + sleep = param; + + param |= SM(SYSTEM_SLEEP_DISABLE, 1); + status = ath6kl_bmi_reg_write(ar, address, param); + if (status) + return status; + + ath6kl_dbg(ATH6KL_DBG_TRC, "old options: %d, old sleep: %d\n", + options, sleep); + + /* program analog PLL register */ + /* no need to control 40/44MHz clock on AR6004 */ + if (ar->target_type != TARGET_TYPE_AR6004) { + status = ath6kl_bmi_reg_write(ar, ATH6KL_ANALOG_PLL_REGISTER, + 0xF9104001); + + if (status) + return status; + + /* Run at 80/88MHz by default */ + param = SM(CPU_CLOCK_STANDARD, 1); + + address = RTC_BASE_ADDRESS + CPU_CLOCK_ADDRESS; + status = ath6kl_bmi_reg_write(ar, address, param); + if (status) + return status; + } + + param = 0; + address = RTC_BASE_ADDRESS + LPO_CAL_ADDRESS; + param = SM(LPO_CAL_ENABLE, 1); + status = ath6kl_bmi_reg_write(ar, address, param); + if (status) + return status; + + /* WAR to avoid SDIO CRC err */ + if (ar->hw.flags & ATH6KL_HW_SDIO_CRC_ERROR_WAR) { + ath6kl_err("temporary war to avoid sdio crc error\n"); + + param = 0x28; + address = GPIO_BASE_ADDRESS + GPIO_PIN9_ADDRESS; + status = ath6kl_bmi_reg_write(ar, address, param); + if (status) + return status; + + param = 0x20; + + address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS; + status = ath6kl_bmi_reg_write(ar, address, param); + if (status) + return status; + + address = GPIO_BASE_ADDRESS + GPIO_PIN11_ADDRESS; + status = ath6kl_bmi_reg_write(ar, address, param); + if (status) + return status; + + address = GPIO_BASE_ADDRESS + GPIO_PIN12_ADDRESS; + status = ath6kl_bmi_reg_write(ar, address, param); + if (status) + return status; + + address = GPIO_BASE_ADDRESS + GPIO_PIN13_ADDRESS; + status = ath6kl_bmi_reg_write(ar, address, param); + if (status) + return status; + } + + /* write EEPROM data to Target RAM */ + status = ath6kl_upload_board_file(ar); + if (status) + return status; + + /* transfer One time Programmable data */ + status = ath6kl_upload_otp(ar); + if (status) + return status; + + /* Download Target firmware */ + status = ath6kl_upload_firmware(ar); + if (status) + return status; + + status = ath6kl_upload_patch(ar); + if (status) + return status; + + /* Download the test script */ + status = ath6kl_upload_testscript(ar); + if (status) + return status; + + /* Restore system sleep */ + address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS; + status = ath6kl_bmi_reg_write(ar, address, sleep); + if (status) + return status; + + address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS; + param = options | 0x20; + status = ath6kl_bmi_reg_write(ar, address, param); + if (status) + return status; + + return status; +} + +int ath6kl_init_hw_params(struct ath6kl *ar) +{ + const struct ath6kl_hw *hw; + int i; + + for (i = 0; i < ARRAY_SIZE(hw_list); i++) { + hw = &hw_list[i]; + + if (hw->id == ar->version.target_ver) + break; + } + + if (i == ARRAY_SIZE(hw_list)) { + ath6kl_err("Unsupported hardware version: 0x%x\n", + ar->version.target_ver); + return -EINVAL; + } + + ar->hw = *hw; + + ath6kl_dbg(ATH6KL_DBG_BOOT, + "target_ver 0x%x target_type 0x%x dataset_patch 0x%x app_load_addr 0x%x\n", + ar->version.target_ver, ar->target_type, + ar->hw.dataset_patch_addr, ar->hw.app_load_addr); + ath6kl_dbg(ATH6KL_DBG_BOOT, + "app_start_override_addr 0x%x board_ext_data_addr 0x%x reserved_ram_size 0x%x", + ar->hw.app_start_override_addr, ar->hw.board_ext_data_addr, + ar->hw.reserved_ram_size); + ath6kl_dbg(ATH6KL_DBG_BOOT, + "refclk_hz %d uarttx_pin %d", + ar->hw.refclk_hz, ar->hw.uarttx_pin); + + return 0; +} + +static const char *ath6kl_init_get_hif_name(enum ath6kl_hif_type type) +{ + switch (type) { + case ATH6KL_HIF_TYPE_SDIO: + return "sdio"; + case ATH6KL_HIF_TYPE_USB: + return "usb"; + } + + return NULL; +} + + +static const struct fw_capa_str_map { + int id; + const char *name; +} fw_capa_map[] = { + { ATH6KL_FW_CAPABILITY_HOST_P2P, "host-p2p" }, + { ATH6KL_FW_CAPABILITY_SCHED_SCAN, "sched-scan" }, + { ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, "sta-p2pdev-duplex" }, + { ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT, "inactivity-timeout" }, + { ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, "rsn-cap-override" }, + { ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER, "wow-mc-filter" }, + { ATH6KL_FW_CAPABILITY_BMISS_ENHANCE, "bmiss-enhance" }, + { ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST, "sscan-match-list" }, + { ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD, "rssi-scan-thold" }, + { ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR, "custom-mac-addr" }, + { ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, "tx-err-notify" }, + { ATH6KL_FW_CAPABILITY_REGDOMAIN, "regdomain" }, + { ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, "sched-scan-v2" }, + { ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL, "hb-poll" }, + { ATH6KL_FW_CAPABILITY_64BIT_RATES, "64bit-rates" }, + { ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS, "ap-inactivity-mins" }, + { ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT, "map-lp-endpoint" }, + { ATH6KL_FW_CAPABILITY_RATETABLE_MCS15, "ratetable-mcs15" }, + { ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM, "no-ip-checksum" }, +}; + +static const char *ath6kl_init_get_fw_capa_name(unsigned int id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(fw_capa_map); i++) { + if (fw_capa_map[i].id == id) + return fw_capa_map[i].name; + } + + return "<unknown>"; +} + +static void ath6kl_init_get_fwcaps(struct ath6kl *ar, char *buf, size_t buf_len) +{ + u8 *data = (u8 *) ar->fw_capabilities; + size_t trunc_len, len = 0; + int i, index, bit; + char *trunc = "..."; + + for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) { + index = i / 8; + bit = i % 8; + + if (index >= sizeof(ar->fw_capabilities) * 4) + break; + + if (buf_len - len < 4) { + ath6kl_warn("firmware capability buffer too small!\n"); + + /* add "..." to the end of string */ + trunc_len = strlen(trunc) + 1; + strncpy(buf + buf_len - trunc_len, trunc, trunc_len); + + return; + } + + if (data[index] & (1 << bit)) { + len += scnprintf(buf + len, buf_len - len, "%s,", + ath6kl_init_get_fw_capa_name(i)); + } + } + + /* overwrite the last comma */ + if (len > 0) + len--; + + buf[len] = '\0'; +} + +static int ath6kl_init_hw_reset(struct ath6kl *ar) +{ + ath6kl_dbg(ATH6KL_DBG_BOOT, "cold resetting the device"); + + return ath6kl_diag_write32(ar, RESET_CONTROL_ADDRESS, + cpu_to_le32(RESET_CONTROL_COLD_RST)); +} + +static int __ath6kl_init_hw_start(struct ath6kl *ar) +{ + long timeleft; + int ret, i; + char buf[200]; + + ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n"); + + ret = ath6kl_hif_power_on(ar); + if (ret) + return ret; + + ret = ath6kl_configure_target(ar); + if (ret) + goto err_power_off; + + ret = ath6kl_init_upload(ar); + if (ret) + goto err_power_off; + + /* Do we need to finish the BMI phase */ + ret = ath6kl_bmi_done(ar); + if (ret) + goto err_power_off; + + /* + * The reason we have to wait for the target here is that the + * driver layer has to init BMI in order to set the host block + * size. + */ + ret = ath6kl_htc_wait_target(ar->htc_target); + + if (ret == -ETIMEDOUT) { + /* + * Most likely USB target is in odd state after reboot and + * needs a reset. A cold reset makes the whole device + * disappear from USB bus and initialisation starts from + * beginning. + */ + ath6kl_warn("htc wait target timed out, resetting device\n"); + ath6kl_init_hw_reset(ar); + goto err_power_off; + } else if (ret) { + ath6kl_err("htc wait target failed: %d\n", ret); + goto err_power_off; + } + + ret = ath6kl_init_service_ep(ar); + if (ret) { + ath6kl_err("Endpoint service initialization failed: %d\n", ret); + goto err_cleanup_scatter; + } + + /* setup credit distribution */ + ath6kl_htc_credit_setup(ar->htc_target, &ar->credit_state_info); + + /* start HTC */ + ret = ath6kl_htc_start(ar->htc_target); + if (ret) { + /* FIXME: call this */ + ath6kl_cookie_cleanup(ar); + goto err_cleanup_scatter; + } + + /* Wait for Wmi event to be ready */ + timeleft = wait_event_interruptible_timeout(ar->event_wq, + test_bit(WMI_READY, + &ar->flag), + WMI_TIMEOUT); + if (timeleft <= 0) { + clear_bit(WMI_READY, &ar->flag); + ath6kl_err("wmi is not ready or wait was interrupted: %ld\n", + timeleft); + ret = -EIO; + goto err_htc_stop; + } + + ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n"); + + if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) { + ath6kl_info("%s %s fw %s api %d%s\n", + ar->hw.name, + ath6kl_init_get_hif_name(ar->hif_type), + ar->wiphy->fw_version, + ar->fw_api, + test_bit(TESTMODE, &ar->flag) ? " testmode" : ""); + ath6kl_init_get_fwcaps(ar, buf, sizeof(buf)); + ath6kl_info("firmware supports: %s\n", buf); + } + + if (ar->version.abi_ver != ATH6KL_ABI_VERSION) { + ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n", + ATH6KL_ABI_VERSION, ar->version.abi_ver); + ret = -EIO; + goto err_htc_stop; + } + + ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__); + + /* communicate the wmi protocol verision to the target */ + /* FIXME: return error */ + if ((ath6kl_set_host_app_area(ar)) != 0) + ath6kl_err("unable to set the host app area\n"); + + for (i = 0; i < ar->vif_max; i++) { + ret = ath6kl_target_config_wlan_params(ar, i); + if (ret) + goto err_htc_stop; + } + + return 0; + +err_htc_stop: + ath6kl_htc_stop(ar->htc_target); +err_cleanup_scatter: + ath6kl_hif_cleanup_scatter(ar); +err_power_off: + ath6kl_hif_power_off(ar); + + return ret; +} + +int ath6kl_init_hw_start(struct ath6kl *ar) +{ + int err; + + err = __ath6kl_init_hw_start(ar); + if (err) + return err; + ar->state = ATH6KL_STATE_ON; + return 0; +} + +static int __ath6kl_init_hw_stop(struct ath6kl *ar) +{ + int ret; + + ath6kl_dbg(ATH6KL_DBG_BOOT, "hw stop\n"); + + ath6kl_htc_stop(ar->htc_target); + + ath6kl_hif_stop(ar); + + ath6kl_bmi_reset(ar); + + ret = ath6kl_hif_power_off(ar); + if (ret) + ath6kl_warn("failed to power off hif: %d\n", ret); + + return 0; +} + +int ath6kl_init_hw_stop(struct ath6kl *ar) +{ + int err; + + err = __ath6kl_init_hw_stop(ar); + if (err) + return err; + ar->state = ATH6KL_STATE_OFF; + return 0; +} + +void ath6kl_init_hw_restart(struct ath6kl *ar) +{ + clear_bit(WMI_READY, &ar->flag); + + ath6kl_cfg80211_stop_all(ar); + + if (__ath6kl_init_hw_stop(ar)) { + ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Failed to stop during fw error recovery\n"); + return; + } + + if (__ath6kl_init_hw_start(ar)) { + ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Failed to restart during fw error recovery\n"); + return; + } +} + +void ath6kl_stop_txrx(struct ath6kl *ar) +{ + struct ath6kl_vif *vif, *tmp_vif; + int i; + + set_bit(DESTROY_IN_PROGRESS, &ar->flag); + + if (down_interruptible(&ar->sem)) { + ath6kl_err("down_interruptible failed\n"); + return; + } + + for (i = 0; i < AP_MAX_NUM_STA; i++) + aggr_reset_state(ar->sta_list[i].aggr_conn); + + spin_lock_bh(&ar->list_lock); + list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) { + list_del(&vif->list); + spin_unlock_bh(&ar->list_lock); + ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag)); + rtnl_lock(); + wiphy_lock(ar->wiphy); + ath6kl_cfg80211_vif_cleanup(vif); + wiphy_unlock(ar->wiphy); + rtnl_unlock(); + spin_lock_bh(&ar->list_lock); + } + spin_unlock_bh(&ar->list_lock); + + clear_bit(WMI_READY, &ar->flag); + + if (ar->fw_recovery.enable) + del_timer_sync(&ar->fw_recovery.hb_timer); + + /* + * After wmi_shudown all WMI events will be dropped. We + * need to cleanup the buffers allocated in AP mode and + * give disconnect notification to stack, which usually + * happens in the disconnect_event. Simulate the disconnect + * event by calling the function directly. Sometimes + * disconnect_event will be received when the debug logs + * are collected. + */ + ath6kl_wmi_shutdown(ar->wmi); + + clear_bit(WMI_ENABLED, &ar->flag); + if (ar->htc_target) { + ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__); + ath6kl_htc_stop(ar->htc_target); + } + + /* + * Try to reset the device if we can. The driver may have been + * configure NOT to reset the target during a debug session. + */ + ath6kl_init_hw_reset(ar); + + up(&ar->sem); +} +EXPORT_SYMBOL(ath6kl_stop_txrx); diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c new file mode 100644 index 000000000..d3aa9e7a3 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -0,0 +1,1311 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "core.h" +#include "hif-ops.h" +#include "cfg80211.h" +#include "target.h" +#include "debug.h" + +struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr) +{ + struct ath6kl *ar = vif->ar; + struct ath6kl_sta *conn = NULL; + u8 i, max_conn; + + if (is_zero_ether_addr(node_addr)) + return NULL; + + max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0; + + for (i = 0; i < max_conn; i++) { + if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) { + conn = &ar->sta_list[i]; + break; + } + } + + return conn; +} + +struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid) +{ + struct ath6kl_sta *conn = NULL; + u8 ctr; + + for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { + if (ar->sta_list[ctr].aid == aid) { + conn = &ar->sta_list[ctr]; + break; + } + } + return conn; +} + +static void ath6kl_add_new_sta(struct ath6kl_vif *vif, u8 *mac, u16 aid, + u8 *wpaie, size_t ielen, u8 keymgmt, + u8 ucipher, u8 auth, u8 apsd_info) +{ + struct ath6kl *ar = vif->ar; + struct ath6kl_sta *sta; + u8 free_slot; + + free_slot = aid - 1; + + sta = &ar->sta_list[free_slot]; + memcpy(sta->mac, mac, ETH_ALEN); + if (ielen <= ATH6KL_MAX_IE) + memcpy(sta->wpa_ie, wpaie, ielen); + sta->aid = aid; + sta->keymgmt = keymgmt; + sta->ucipher = ucipher; + sta->auth = auth; + sta->apsd_info = apsd_info; + + ar->sta_list_index = ar->sta_list_index | (1 << free_slot); + ar->ap_stats.sta[free_slot].aid = cpu_to_le32(aid); + aggr_conn_init(vif, vif->aggr_cntxt, sta->aggr_conn); +} + +static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i) +{ + struct ath6kl_sta *sta = &ar->sta_list[i]; + struct ath6kl_mgmt_buff *entry, *tmp; + + /* empty the queued pkts in the PS queue if any */ + spin_lock_bh(&sta->psq_lock); + skb_queue_purge(&sta->psq); + skb_queue_purge(&sta->apsdq); + + if (sta->mgmt_psq_len != 0) { + list_for_each_entry_safe(entry, tmp, &sta->mgmt_psq, list) { + kfree(entry); + } + INIT_LIST_HEAD(&sta->mgmt_psq); + sta->mgmt_psq_len = 0; + } + + spin_unlock_bh(&sta->psq_lock); + + memset(&ar->ap_stats.sta[sta->aid - 1], 0, + sizeof(struct wmi_per_sta_stat)); + eth_zero_addr(sta->mac); + memset(sta->wpa_ie, 0, ATH6KL_MAX_IE); + sta->aid = 0; + sta->sta_flags = 0; + + ar->sta_list_index = ar->sta_list_index & ~(1 << i); + aggr_reset_state(sta->aggr_conn); +} + +static u8 ath6kl_remove_sta(struct ath6kl *ar, u8 *mac, u16 reason) +{ + u8 i, removed = 0; + + if (is_zero_ether_addr(mac)) + return removed; + + if (is_broadcast_ether_addr(mac)) { + ath6kl_dbg(ATH6KL_DBG_TRC, "deleting all station\n"); + + for (i = 0; i < AP_MAX_NUM_STA; i++) { + if (!is_zero_ether_addr(ar->sta_list[i].mac)) { + ath6kl_sta_cleanup(ar, i); + removed = 1; + } + } + } else { + for (i = 0; i < AP_MAX_NUM_STA; i++) { + if (memcmp(ar->sta_list[i].mac, mac, ETH_ALEN) == 0) { + ath6kl_dbg(ATH6KL_DBG_TRC, + "deleting station %pM aid=%d reason=%d\n", + mac, ar->sta_list[i].aid, reason); + ath6kl_sta_cleanup(ar, i); + removed = 1; + break; + } + } + } + + return removed; +} + +enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac) +{ + struct ath6kl *ar = devt; + return ar->ac2ep_map[ac]; +} + +struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar) +{ + struct ath6kl_cookie *cookie; + + cookie = ar->cookie_list; + if (cookie != NULL) { + ar->cookie_list = cookie->arc_list_next; + ar->cookie_count--; + } + + return cookie; +} + +void ath6kl_cookie_init(struct ath6kl *ar) +{ + u32 i; + + ar->cookie_list = NULL; + ar->cookie_count = 0; + + memset(ar->cookie_mem, 0, sizeof(ar->cookie_mem)); + + for (i = 0; i < MAX_COOKIE_NUM; i++) + ath6kl_free_cookie(ar, &ar->cookie_mem[i]); +} + +void ath6kl_cookie_cleanup(struct ath6kl *ar) +{ + ar->cookie_list = NULL; + ar->cookie_count = 0; +} + +void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie) +{ + /* Insert first */ + + if (!ar || !cookie) + return; + + cookie->arc_list_next = ar->cookie_list; + ar->cookie_list = cookie; + ar->cookie_count++; +} + +/* + * Read from the hardware through its diagnostic window. No cooperation + * from the firmware is required for this. + */ +int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value) +{ + int ret; + + ret = ath6kl_hif_diag_read32(ar, address, value); + if (ret) { + ath6kl_warn("failed to read32 through diagnose window: %d\n", + ret); + return ret; + } + + return 0; +} + +/* + * Write to the ATH6KL through its diagnostic window. No cooperation from + * the Target is required for this. + */ +int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value) +{ + int ret; + + ret = ath6kl_hif_diag_write32(ar, address, value); + + if (ret) { + ath6kl_err("failed to write 0x%x during diagnose window to 0x%x\n", + address, value); + return ret; + } + + return 0; +} + +int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length) +{ + u32 count, *buf = data; + int ret; + + if (WARN_ON(length % 4)) + return -EINVAL; + + for (count = 0; count < length / 4; count++, address += 4) { + ret = ath6kl_diag_read32(ar, address, &buf[count]); + if (ret) + return ret; + } + + return 0; +} + +int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length) +{ + u32 count; + __le32 *buf = data; + int ret; + + if (WARN_ON(length % 4)) + return -EINVAL; + + for (count = 0; count < length / 4; count++, address += 4) { + ret = ath6kl_diag_write32(ar, address, buf[count]); + if (ret) + return ret; + } + + return 0; +} + +int ath6kl_read_fwlogs(struct ath6kl *ar) +{ + struct ath6kl_dbglog_hdr debug_hdr; + struct ath6kl_dbglog_buf debug_buf; + u32 address, length, firstbuf, debug_hdr_addr; + int ret, loop; + u8 *buf; + + buf = kmalloc(ATH6KL_FWLOG_PAYLOAD_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + address = TARG_VTOP(ar->target_type, + ath6kl_get_hi_item_addr(ar, + HI_ITEM(hi_dbglog_hdr))); + + ret = ath6kl_diag_read32(ar, address, &debug_hdr_addr); + if (ret) + goto out; + + /* Get the contents of the ring buffer */ + if (debug_hdr_addr == 0) { + ath6kl_warn("Invalid address for debug_hdr_addr\n"); + ret = -EINVAL; + goto out; + } + + address = TARG_VTOP(ar->target_type, debug_hdr_addr); + ret = ath6kl_diag_read(ar, address, &debug_hdr, sizeof(debug_hdr)); + if (ret) + goto out; + + address = TARG_VTOP(ar->target_type, + le32_to_cpu(debug_hdr.dbuf_addr)); + firstbuf = address; + ret = ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf)); + if (ret) + goto out; + + loop = 100; + + do { + address = TARG_VTOP(ar->target_type, + le32_to_cpu(debug_buf.buffer_addr)); + length = le32_to_cpu(debug_buf.length); + + if (length != 0 && (le32_to_cpu(debug_buf.length) <= + le32_to_cpu(debug_buf.bufsize))) { + length = ALIGN(length, 4); + + ret = ath6kl_diag_read(ar, address, + buf, length); + if (ret) + goto out; + + ath6kl_debug_fwlog_event(ar, buf, length); + } + + address = TARG_VTOP(ar->target_type, + le32_to_cpu(debug_buf.next)); + ret = ath6kl_diag_read(ar, address, &debug_buf, + sizeof(debug_buf)); + if (ret) + goto out; + + loop--; + + if (WARN_ON(loop == 0)) { + ret = -ETIMEDOUT; + goto out; + } + } while (address != firstbuf); + +out: + kfree(buf); + + return ret; +} + +static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif) +{ + u8 index; + u8 keyusage; + + for (index = 0; index <= WMI_MAX_KEY_INDEX; index++) { + if (vif->wep_key_list[index].key_len) { + keyusage = GROUP_USAGE; + if (index == vif->def_txkey_index) + keyusage |= TX_USAGE; + + ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx, + index, + WEP_CRYPT, + keyusage, + vif->wep_key_list[index].key_len, + NULL, 0, + vif->wep_key_list[index].key, + KEY_OP_INIT_VAL, NULL, + NO_SYNC_WMIFLAG); + } + } +} + +void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel) +{ + struct ath6kl *ar = vif->ar; + struct ath6kl_req_key *ik; + int res; + u8 key_rsc[ATH6KL_KEY_SEQ_LEN]; + + ik = &ar->ap_mode_bkey; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel); + + switch (vif->auth_mode) { + case NONE_AUTH: + if (vif->prwise_crypto == WEP_CRYPT) + ath6kl_install_static_wep_keys(vif); + if (!ik->valid || ik->key_type != WAPI_CRYPT) + break; + /* for WAPI, we need to set the delayed group key, continue: */ + fallthrough; + case WPA_PSK_AUTH: + case WPA2_PSK_AUTH: + case (WPA_PSK_AUTH | WPA2_PSK_AUTH): + if (!ik->valid) + break; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "Delayed addkey for the initial group key for AP mode\n"); + memset(key_rsc, 0, sizeof(key_rsc)); + res = ath6kl_wmi_addkey_cmd( + ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type, + GROUP_USAGE, ik->key_len, key_rsc, ATH6KL_KEY_SEQ_LEN, + ik->key, + KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG); + if (res) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "Delayed addkey failed: %d\n", res); + } + break; + } + + if (ar->last_ch != channel) + /* we actually don't know the phymode, default to HT20 */ + ath6kl_cfg80211_ch_switch_notify(vif, channel, WMI_11G_HT20); + + ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0); + set_bit(CONNECTED, &vif->flags); + netif_carrier_on(vif->ndev); +} + +void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, + u8 keymgmt, u8 ucipher, u8 auth, + u8 assoc_req_len, u8 *assoc_info, u8 apsd_info) +{ + u8 *ies = NULL, *wpa_ie = NULL, *pos; + size_t ies_len = 0; + struct station_info *sinfo; + + ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid); + + if (aid < 1 || aid > AP_MAX_NUM_STA) + return; + + if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) { + struct ieee80211_mgmt *mgmt = + (struct ieee80211_mgmt *) assoc_info; + if (ieee80211_is_assoc_req(mgmt->frame_control) && + assoc_req_len >= sizeof(struct ieee80211_hdr_3addr) + + sizeof(mgmt->u.assoc_req)) { + ies = mgmt->u.assoc_req.variable; + ies_len = assoc_info + assoc_req_len - ies; + } else if (ieee80211_is_reassoc_req(mgmt->frame_control) && + assoc_req_len >= sizeof(struct ieee80211_hdr_3addr) + + sizeof(mgmt->u.reassoc_req)) { + ies = mgmt->u.reassoc_req.variable; + ies_len = assoc_info + assoc_req_len - ies; + } + } + + pos = ies; + while (pos && pos + 1 < ies + ies_len) { + if (pos + 2 + pos[1] > ies + ies_len) + break; + if (pos[0] == WLAN_EID_RSN) + wpa_ie = pos; /* RSN IE */ + else if (pos[0] == WLAN_EID_VENDOR_SPECIFIC && + pos[1] >= 4 && + pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2) { + if (pos[5] == 0x01) + wpa_ie = pos; /* WPA IE */ + else if (pos[5] == 0x04) { + wpa_ie = pos; /* WPS IE */ + break; /* overrides WPA/RSN IE */ + } + } else if (pos[0] == 0x44 && wpa_ie == NULL) { + /* + * Note: WAPI Parameter Set IE re-uses Element ID that + * was officially allocated for BSS AC Access Delay. As + * such, we need to be a bit more careful on when + * parsing the frame. However, BSS AC Access Delay + * element is not supposed to be included in + * (Re)Association Request frames, so this should not + * cause problems. + */ + wpa_ie = pos; /* WAPI IE */ + break; + } + pos += 2 + pos[1]; + } + + ath6kl_add_new_sta(vif, mac_addr, aid, wpa_ie, + wpa_ie ? 2 + wpa_ie[1] : 0, + keymgmt, ucipher, auth, apsd_info); + + /* send event to application */ + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) + return; + + /* TODO: sinfo.generation */ + + sinfo->assoc_req_ies = ies; + sinfo->assoc_req_ies_len = ies_len; + + cfg80211_new_sta(vif->ndev, mac_addr, sinfo, GFP_KERNEL); + + netif_wake_queue(vif->ndev); + + kfree(sinfo); +} + +void disconnect_timer_handler(struct timer_list *t) +{ + struct ath6kl_vif *vif = from_timer(vif, t, disconnect_timer); + + ath6kl_init_profile_info(vif); + ath6kl_disconnect(vif); +} + +void ath6kl_disconnect(struct ath6kl_vif *vif) +{ + if (test_bit(CONNECTED, &vif->flags) || + test_bit(CONNECT_PEND, &vif->flags)) { + ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx); + /* + * Disconnect command is issued, clear the connect pending + * flag. The connected flag will be cleared in + * disconnect event notification. + */ + clear_bit(CONNECT_PEND, &vif->flags); + } +} + +/* WMI Event handlers */ + +void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver, + enum wmi_phy_cap cap) +{ + struct ath6kl *ar = devt; + + memcpy(ar->mac_addr, datap, ETH_ALEN); + + ath6kl_dbg(ATH6KL_DBG_BOOT, + "ready event mac addr %pM sw_ver 0x%x abi_ver 0x%x cap 0x%x\n", + ar->mac_addr, sw_ver, abi_ver, cap); + + ar->version.wlan_ver = sw_ver; + ar->version.abi_ver = abi_ver; + ar->hw.cap = cap; + + if (strlen(ar->wiphy->fw_version) == 0) { + snprintf(ar->wiphy->fw_version, + sizeof(ar->wiphy->fw_version), + "%u.%u.%u.%u", + (ar->version.wlan_ver & 0xf0000000) >> 28, + (ar->version.wlan_ver & 0x0f000000) >> 24, + (ar->version.wlan_ver & 0x00ff0000) >> 16, + (ar->version.wlan_ver & 0x0000ffff)); + } + + /* indicate to the waiting thread that the ready event was received */ + set_bit(WMI_READY, &ar->flag); + wake_up(&ar->event_wq); +} + +void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status) +{ + struct ath6kl *ar = vif->ar; + bool aborted = false; + + if (status != WMI_SCAN_STATUS_SUCCESS) + aborted = true; + + ath6kl_cfg80211_scan_complete_event(vif, aborted); + + if (!ar->usr_bss_filter) { + clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); + ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, + NONE_BSS_FILTER, 0); + } + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status); +} + +static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel) +{ + struct ath6kl *ar = vif->ar; + + vif->profile.ch = cpu_to_le16(channel); + + switch (vif->nw_type) { + case AP_NETWORK: + /* + * reconfigure any saved RSN IE capabilites in the beacon / + * probe response to stay in sync with the supplicant. + */ + if (vif->rsn_capab && + test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, + ar->fw_capabilities)) + ath6kl_wmi_set_ie_cmd(ar->wmi, vif->fw_vif_idx, + WLAN_EID_RSN, WMI_RSN_IE_CAPB, + (const u8 *) &vif->rsn_capab, + sizeof(vif->rsn_capab)); + + return ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, + &vif->profile); + default: + ath6kl_err("won't switch channels nw_type=%d\n", vif->nw_type); + return -ENOTSUPP; + } +} + +static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel) +{ + struct ath6kl_vif *vif; + int res = 0; + + if (!ar->want_ch_switch) + return; + + spin_lock_bh(&ar->list_lock); + list_for_each_entry(vif, &ar->vif_list, list) { + if (ar->want_ch_switch & (1 << vif->fw_vif_idx)) + res = ath6kl_commit_ch_switch(vif, channel); + + /* if channel switch failed, oh well we tried */ + ar->want_ch_switch &= ~(1 << vif->fw_vif_idx); + + if (res) + ath6kl_err("channel switch failed nw_type %d res %d\n", + vif->nw_type, res); + } + spin_unlock_bh(&ar->list_lock); +} + +void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid, + u16 listen_int, u16 beacon_int, + enum network_type net_type, u8 beacon_ie_len, + u8 assoc_req_len, u8 assoc_resp_len, + u8 *assoc_info) +{ + struct ath6kl *ar = vif->ar; + + ath6kl_cfg80211_connect_event(vif, channel, bssid, + listen_int, beacon_int, + net_type, beacon_ie_len, + assoc_req_len, assoc_resp_len, + assoc_info); + + memcpy(vif->bssid, bssid, sizeof(vif->bssid)); + vif->bss_ch = channel; + + if (vif->nw_type == INFRA_NETWORK) { + ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, + vif->listen_intvl_t, 0); + ath6kl_check_ch_switch(ar, channel); + } + + netif_wake_queue(vif->ndev); + + /* Update connect & link status atomically */ + spin_lock_bh(&vif->if_lock); + set_bit(CONNECTED, &vif->flags); + clear_bit(CONNECT_PEND, &vif->flags); + netif_carrier_on(vif->ndev); + spin_unlock_bh(&vif->if_lock); + + aggr_reset_state(vif->aggr_cntxt->aggr_conn); + vif->reconnect_flag = 0; + + if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) { + memset(ar->node_map, 0, sizeof(ar->node_map)); + ar->node_num = 0; + ar->next_ep_id = ENDPOINT_2; + } + + if (!ar->usr_bss_filter) { + set_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); + ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, + CURRENT_BSS_FILTER, 0); + } +} + +void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast) +{ + struct ath6kl_sta *sta; + struct ath6kl *ar = vif->ar; + u8 tsc[6]; + + /* + * For AP case, keyid will have aid of STA which sent pkt with + * MIC error. Use this aid to get MAC & send it to hostapd. + */ + if (vif->nw_type == AP_NETWORK) { + sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2)); + if (!sta) + return; + + ath6kl_dbg(ATH6KL_DBG_TRC, + "ap tkip mic error received from aid=%d\n", keyid); + + memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */ + cfg80211_michael_mic_failure(vif->ndev, sta->mac, + NL80211_KEYTYPE_PAIRWISE, keyid, + tsc, GFP_KERNEL); + } else { + ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast); + } +} + +static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len) +{ + struct wmi_target_stats *tgt_stats = + (struct wmi_target_stats *) ptr; + struct ath6kl *ar = vif->ar; + struct target_stats *stats = &vif->target_stats; + struct tkip_ccmp_stats *ccmp_stats; + s32 rate; + u8 ac; + + if (len < sizeof(*tgt_stats)) + return; + + ath6kl_dbg(ATH6KL_DBG_TRC, "updating target stats\n"); + + stats->tx_pkt += le32_to_cpu(tgt_stats->stats.tx.pkt); + stats->tx_byte += le32_to_cpu(tgt_stats->stats.tx.byte); + stats->tx_ucast_pkt += le32_to_cpu(tgt_stats->stats.tx.ucast_pkt); + stats->tx_ucast_byte += le32_to_cpu(tgt_stats->stats.tx.ucast_byte); + stats->tx_mcast_pkt += le32_to_cpu(tgt_stats->stats.tx.mcast_pkt); + stats->tx_mcast_byte += le32_to_cpu(tgt_stats->stats.tx.mcast_byte); + stats->tx_bcast_pkt += le32_to_cpu(tgt_stats->stats.tx.bcast_pkt); + stats->tx_bcast_byte += le32_to_cpu(tgt_stats->stats.tx.bcast_byte); + stats->tx_rts_success_cnt += + le32_to_cpu(tgt_stats->stats.tx.rts_success_cnt); + + for (ac = 0; ac < WMM_NUM_AC; ac++) + stats->tx_pkt_per_ac[ac] += + le32_to_cpu(tgt_stats->stats.tx.pkt_per_ac[ac]); + + stats->tx_err += le32_to_cpu(tgt_stats->stats.tx.err); + stats->tx_fail_cnt += le32_to_cpu(tgt_stats->stats.tx.fail_cnt); + stats->tx_retry_cnt += le32_to_cpu(tgt_stats->stats.tx.retry_cnt); + stats->tx_mult_retry_cnt += + le32_to_cpu(tgt_stats->stats.tx.mult_retry_cnt); + stats->tx_rts_fail_cnt += + le32_to_cpu(tgt_stats->stats.tx.rts_fail_cnt); + + rate = a_sle32_to_cpu(tgt_stats->stats.tx.ucast_rate); + stats->tx_ucast_rate = ath6kl_wmi_get_rate(ar->wmi, rate); + + stats->rx_pkt += le32_to_cpu(tgt_stats->stats.rx.pkt); + stats->rx_byte += le32_to_cpu(tgt_stats->stats.rx.byte); + stats->rx_ucast_pkt += le32_to_cpu(tgt_stats->stats.rx.ucast_pkt); + stats->rx_ucast_byte += le32_to_cpu(tgt_stats->stats.rx.ucast_byte); + stats->rx_mcast_pkt += le32_to_cpu(tgt_stats->stats.rx.mcast_pkt); + stats->rx_mcast_byte += le32_to_cpu(tgt_stats->stats.rx.mcast_byte); + stats->rx_bcast_pkt += le32_to_cpu(tgt_stats->stats.rx.bcast_pkt); + stats->rx_bcast_byte += le32_to_cpu(tgt_stats->stats.rx.bcast_byte); + stats->rx_frgment_pkt += le32_to_cpu(tgt_stats->stats.rx.frgment_pkt); + stats->rx_err += le32_to_cpu(tgt_stats->stats.rx.err); + stats->rx_crc_err += le32_to_cpu(tgt_stats->stats.rx.crc_err); + stats->rx_key_cache_miss += + le32_to_cpu(tgt_stats->stats.rx.key_cache_miss); + stats->rx_decrypt_err += le32_to_cpu(tgt_stats->stats.rx.decrypt_err); + stats->rx_dupl_frame += le32_to_cpu(tgt_stats->stats.rx.dupl_frame); + + rate = a_sle32_to_cpu(tgt_stats->stats.rx.ucast_rate); + stats->rx_ucast_rate = ath6kl_wmi_get_rate(ar->wmi, rate); + + ccmp_stats = &tgt_stats->stats.tkip_ccmp_stats; + + stats->tkip_local_mic_fail += + le32_to_cpu(ccmp_stats->tkip_local_mic_fail); + stats->tkip_cnter_measures_invoked += + le32_to_cpu(ccmp_stats->tkip_cnter_measures_invoked); + stats->tkip_fmt_err += le32_to_cpu(ccmp_stats->tkip_fmt_err); + + stats->ccmp_fmt_err += le32_to_cpu(ccmp_stats->ccmp_fmt_err); + stats->ccmp_replays += le32_to_cpu(ccmp_stats->ccmp_replays); + + stats->pwr_save_fail_cnt += + le32_to_cpu(tgt_stats->pm_stats.pwr_save_failure_cnt); + stats->noise_floor_calib = + a_sle32_to_cpu(tgt_stats->noise_floor_calib); + + stats->cs_bmiss_cnt += + le32_to_cpu(tgt_stats->cserv_stats.cs_bmiss_cnt); + stats->cs_low_rssi_cnt += + le32_to_cpu(tgt_stats->cserv_stats.cs_low_rssi_cnt); + stats->cs_connect_cnt += + le16_to_cpu(tgt_stats->cserv_stats.cs_connect_cnt); + stats->cs_discon_cnt += + le16_to_cpu(tgt_stats->cserv_stats.cs_discon_cnt); + + stats->cs_ave_beacon_rssi = + a_sle16_to_cpu(tgt_stats->cserv_stats.cs_ave_beacon_rssi); + + stats->cs_last_roam_msec = + tgt_stats->cserv_stats.cs_last_roam_msec; + stats->cs_snr = tgt_stats->cserv_stats.cs_snr; + stats->cs_rssi = a_sle16_to_cpu(tgt_stats->cserv_stats.cs_rssi); + + stats->lq_val = le32_to_cpu(tgt_stats->lq_val); + + stats->wow_pkt_dropped += + le32_to_cpu(tgt_stats->wow_stats.wow_pkt_dropped); + stats->wow_host_pkt_wakeups += + tgt_stats->wow_stats.wow_host_pkt_wakeups; + stats->wow_host_evt_wakeups += + tgt_stats->wow_stats.wow_host_evt_wakeups; + stats->wow_evt_discarded += + le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded); + + stats->arp_received = le32_to_cpu(tgt_stats->arp_stats.arp_received); + stats->arp_replied = le32_to_cpu(tgt_stats->arp_stats.arp_replied); + stats->arp_matched = le32_to_cpu(tgt_stats->arp_stats.arp_matched); + + if (test_bit(STATS_UPDATE_PEND, &vif->flags)) { + clear_bit(STATS_UPDATE_PEND, &vif->flags); + wake_up(&ar->event_wq); + } +} + +static void ath6kl_add_le32(__le32 *var, __le32 val) +{ + *var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val)); +} + +void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len) +{ + struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr; + struct ath6kl *ar = vif->ar; + struct wmi_ap_mode_stat *ap = &ar->ap_stats; + struct wmi_per_sta_stat *st_ap, *st_p; + u8 ac; + + if (vif->nw_type == AP_NETWORK) { + if (len < sizeof(*p)) + return; + + for (ac = 0; ac < AP_MAX_NUM_STA; ac++) { + st_ap = &ap->sta[ac]; + st_p = &p->sta[ac]; + + ath6kl_add_le32(&st_ap->tx_bytes, st_p->tx_bytes); + ath6kl_add_le32(&st_ap->tx_pkts, st_p->tx_pkts); + ath6kl_add_le32(&st_ap->tx_error, st_p->tx_error); + ath6kl_add_le32(&st_ap->tx_discard, st_p->tx_discard); + ath6kl_add_le32(&st_ap->rx_bytes, st_p->rx_bytes); + ath6kl_add_le32(&st_ap->rx_pkts, st_p->rx_pkts); + ath6kl_add_le32(&st_ap->rx_error, st_p->rx_error); + ath6kl_add_le32(&st_ap->rx_discard, st_p->rx_discard); + } + + } else { + ath6kl_update_target_stats(vif, ptr, len); + } +} + +void ath6kl_wakeup_event(void *dev) +{ + struct ath6kl *ar = (struct ath6kl *) dev; + + wake_up(&ar->event_wq); +} + +void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr) +{ + struct ath6kl *ar = (struct ath6kl *) devt; + + ar->tx_pwr = tx_pwr; + wake_up(&ar->event_wq); +} + +void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid) +{ + struct ath6kl_sta *conn; + struct sk_buff *skb; + bool psq_empty = false; + struct ath6kl *ar = vif->ar; + struct ath6kl_mgmt_buff *mgmt_buf; + + conn = ath6kl_find_sta_by_aid(ar, aid); + + if (!conn) + return; + /* + * Send out a packet queued on ps queue. When the ps queue + * becomes empty update the PVB for this station. + */ + spin_lock_bh(&conn->psq_lock); + psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0); + spin_unlock_bh(&conn->psq_lock); + + if (psq_empty) + /* TODO: Send out a NULL data frame */ + return; + + spin_lock_bh(&conn->psq_lock); + if (conn->mgmt_psq_len > 0) { + mgmt_buf = list_first_entry(&conn->mgmt_psq, + struct ath6kl_mgmt_buff, list); + list_del(&mgmt_buf->list); + conn->mgmt_psq_len--; + spin_unlock_bh(&conn->psq_lock); + + conn->sta_flags |= STA_PS_POLLED; + ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, + mgmt_buf->id, mgmt_buf->freq, + mgmt_buf->wait, mgmt_buf->buf, + mgmt_buf->len, mgmt_buf->no_cck); + conn->sta_flags &= ~STA_PS_POLLED; + kfree(mgmt_buf); + } else { + skb = skb_dequeue(&conn->psq); + spin_unlock_bh(&conn->psq_lock); + + conn->sta_flags |= STA_PS_POLLED; + ath6kl_data_tx(skb, vif->ndev); + conn->sta_flags &= ~STA_PS_POLLED; + } + + spin_lock_bh(&conn->psq_lock); + psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0); + spin_unlock_bh(&conn->psq_lock); + + if (psq_empty) + ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0); +} + +void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif) +{ + bool mcastq_empty = false; + struct sk_buff *skb; + struct ath6kl *ar = vif->ar; + + /* + * If there are no associated STAs, ignore the DTIM expiry event. + * There can be potential race conditions where the last associated + * STA may disconnect & before the host could clear the 'Indicate + * DTIM' request to the firmware, the firmware would have just + * indicated a DTIM expiry event. The race is between 'clear DTIM + * expiry cmd' going from the host to the firmware & the DTIM + * expiry event happening from the firmware to the host. + */ + if (!ar->sta_list_index) + return; + + spin_lock_bh(&ar->mcastpsq_lock); + mcastq_empty = skb_queue_empty(&ar->mcastpsq); + spin_unlock_bh(&ar->mcastpsq_lock); + + if (mcastq_empty) + return; + + /* set the STA flag to dtim_expired for the frame to go out */ + set_bit(DTIM_EXPIRED, &vif->flags); + + spin_lock_bh(&ar->mcastpsq_lock); + while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) { + spin_unlock_bh(&ar->mcastpsq_lock); + + ath6kl_data_tx(skb, vif->ndev); + + spin_lock_bh(&ar->mcastpsq_lock); + } + spin_unlock_bh(&ar->mcastpsq_lock); + + clear_bit(DTIM_EXPIRED, &vif->flags); + + /* clear the LSB of the BitMapCtl field of the TIM IE */ + ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0); +} + +void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid, + u8 assoc_resp_len, u8 *assoc_info, + u16 prot_reason_status) +{ + struct ath6kl *ar = vif->ar; + + if (vif->nw_type == AP_NETWORK) { + /* disconnect due to other STA vif switching channels */ + if (reason == BSS_DISCONNECTED && + prot_reason_status == WMI_AP_REASON_STA_ROAM) { + ar->want_ch_switch |= 1 << vif->fw_vif_idx; + /* bail back to this channel if STA vif fails connect */ + ar->last_ch = le16_to_cpu(vif->profile.ch); + } + + if (prot_reason_status == WMI_AP_REASON_MAX_STA) { + /* send max client reached notification to user space */ + cfg80211_conn_failed(vif->ndev, bssid, + NL80211_CONN_FAIL_MAX_CLIENTS, + GFP_KERNEL); + } + + if (prot_reason_status == WMI_AP_REASON_ACL) { + /* send blocked client notification to user space */ + cfg80211_conn_failed(vif->ndev, bssid, + NL80211_CONN_FAIL_BLOCKED_CLIENT, + GFP_KERNEL); + } + + if (!ath6kl_remove_sta(ar, bssid, prot_reason_status)) + return; + + /* if no more associated STAs, empty the mcast PS q */ + if (ar->sta_list_index == 0) { + spin_lock_bh(&ar->mcastpsq_lock); + skb_queue_purge(&ar->mcastpsq); + spin_unlock_bh(&ar->mcastpsq_lock); + + /* clear the LSB of the TIM IE's BitMapCtl field */ + if (test_bit(WMI_READY, &ar->flag)) + ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, + MCAST_AID, 0); + } + + if (!is_broadcast_ether_addr(bssid)) { + /* send event to application */ + cfg80211_del_sta(vif->ndev, bssid, GFP_KERNEL); + } + + if (memcmp(vif->ndev->dev_addr, bssid, ETH_ALEN) == 0) { + memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list)); + clear_bit(CONNECTED, &vif->flags); + } + return; + } + + ath6kl_cfg80211_disconnect_event(vif, reason, bssid, + assoc_resp_len, assoc_info, + prot_reason_status); + + aggr_reset_state(vif->aggr_cntxt->aggr_conn); + + del_timer(&vif->disconnect_timer); + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason); + + /* + * If the event is due to disconnect cmd from the host, only they + * the target would stop trying to connect. Under any other + * condition, target would keep trying to connect. + */ + if (reason == DISCONNECT_CMD) { + if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag)) + ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, + NONE_BSS_FILTER, 0); + } else { + set_bit(CONNECT_PEND, &vif->flags); + if (((reason == ASSOC_FAILED) && + (prot_reason_status == 0x11)) || + ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0) && + (vif->reconnect_flag == 1))) { + set_bit(CONNECTED, &vif->flags); + return; + } + } + + /* restart disconnected concurrent vifs waiting for new channel */ + ath6kl_check_ch_switch(ar, ar->last_ch); + + /* update connect & link status atomically */ + spin_lock_bh(&vif->if_lock); + clear_bit(CONNECTED, &vif->flags); + netif_carrier_off(vif->ndev); + spin_unlock_bh(&vif->if_lock); + + if ((reason != CSERV_DISCONNECT) || (vif->reconnect_flag != 1)) + vif->reconnect_flag = 0; + + if (reason != CSERV_DISCONNECT) + ar->user_key_ctrl = 0; + + netif_stop_queue(vif->ndev); + memset(vif->bssid, 0, sizeof(vif->bssid)); + vif->bss_ch = 0; + + ath6kl_tx_data_cleanup(ar); +} + +struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar) +{ + struct ath6kl_vif *vif; + + spin_lock_bh(&ar->list_lock); + if (list_empty(&ar->vif_list)) { + spin_unlock_bh(&ar->list_lock); + return NULL; + } + + vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list); + + spin_unlock_bh(&ar->list_lock); + + return vif; +} + +static int ath6kl_open(struct net_device *dev) +{ + struct ath6kl_vif *vif = netdev_priv(dev); + + set_bit(WLAN_ENABLED, &vif->flags); + + if (test_bit(CONNECTED, &vif->flags)) { + netif_carrier_on(dev); + netif_wake_queue(dev); + } else { + netif_carrier_off(dev); + } + + return 0; +} + +static int ath6kl_close(struct net_device *dev) +{ + struct ath6kl_vif *vif = netdev_priv(dev); + + netif_stop_queue(dev); + + ath6kl_cfg80211_stop(vif); + + clear_bit(WLAN_ENABLED, &vif->flags); + + return 0; +} + +static int ath6kl_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct ath6kl_vif *vif = netdev_priv(dev); + struct ath6kl *ar = vif->ar; + int err = 0; + + if ((features & NETIF_F_RXCSUM) && + (ar->rx_meta_ver != WMI_META_VERSION_2)) { + ar->rx_meta_ver = WMI_META_VERSION_2; + err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, + vif->fw_vif_idx, + ar->rx_meta_ver, 0, 0); + if (err) { + dev->features = features & ~NETIF_F_RXCSUM; + return err; + } + } else if (!(features & NETIF_F_RXCSUM) && + (ar->rx_meta_ver == WMI_META_VERSION_2)) { + ar->rx_meta_ver = 0; + err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, + vif->fw_vif_idx, + ar->rx_meta_ver, 0, 0); + if (err) { + dev->features = features | NETIF_F_RXCSUM; + return err; + } + } + + return err; +} + +static void ath6kl_set_multicast_list(struct net_device *ndev) +{ + struct ath6kl_vif *vif = netdev_priv(ndev); + bool mc_all_on = false; + int mc_count = netdev_mc_count(ndev); + struct netdev_hw_addr *ha; + bool found; + struct ath6kl_mc_filter *mc_filter, *tmp; + struct list_head mc_filter_new; + int ret; + + if (!test_bit(WMI_READY, &vif->ar->flag) || + !test_bit(WLAN_ENABLED, &vif->flags)) + return; + + /* Enable multicast-all filter. */ + mc_all_on = !!(ndev->flags & IFF_PROMISC) || + !!(ndev->flags & IFF_ALLMULTI) || + !!(mc_count > ATH6K_MAX_MC_FILTERS_PER_LIST); + + if (mc_all_on) + set_bit(NETDEV_MCAST_ALL_ON, &vif->flags); + else + clear_bit(NETDEV_MCAST_ALL_ON, &vif->flags); + + if (test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER, + vif->ar->fw_capabilities)) { + mc_all_on = mc_all_on || (vif->ar->state == ATH6KL_STATE_ON); + } + + if (!(ndev->flags & IFF_MULTICAST)) { + mc_all_on = false; + set_bit(NETDEV_MCAST_ALL_OFF, &vif->flags); + } else { + clear_bit(NETDEV_MCAST_ALL_OFF, &vif->flags); + } + + /* Enable/disable "multicast-all" filter*/ + ath6kl_dbg(ATH6KL_DBG_TRC, "%s multicast-all filter\n", + mc_all_on ? "enabling" : "disabling"); + + ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, vif->fw_vif_idx, + mc_all_on); + if (ret) { + ath6kl_warn("Failed to %s multicast-all receive\n", + mc_all_on ? "enable" : "disable"); + return; + } + + if (test_bit(NETDEV_MCAST_ALL_ON, &vif->flags)) + return; + + /* Keep the driver and firmware mcast list in sync. */ + list_for_each_entry_safe(mc_filter, tmp, &vif->mc_filter, list) { + found = false; + netdev_for_each_mc_addr(ha, ndev) { + if (memcmp(ha->addr, mc_filter->hw_addr, + ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) { + found = true; + break; + } + } + + if (!found) { + /* + * Delete the filter which was previously set + * but not in the new request. + */ + ath6kl_dbg(ATH6KL_DBG_TRC, + "Removing %pM from multicast filter\n", + mc_filter->hw_addr); + ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi, + vif->fw_vif_idx, mc_filter->hw_addr, + false); + if (ret) { + ath6kl_warn("Failed to remove multicast filter:%pM\n", + mc_filter->hw_addr); + return; + } + + list_del(&mc_filter->list); + kfree(mc_filter); + } + } + + INIT_LIST_HEAD(&mc_filter_new); + + netdev_for_each_mc_addr(ha, ndev) { + found = false; + list_for_each_entry(mc_filter, &vif->mc_filter, list) { + if (memcmp(ha->addr, mc_filter->hw_addr, + ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) { + found = true; + break; + } + } + + if (!found) { + mc_filter = kzalloc(sizeof(struct ath6kl_mc_filter), + GFP_ATOMIC); + if (!mc_filter) { + WARN_ON(1); + goto out; + } + + memcpy(mc_filter->hw_addr, ha->addr, + ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE); + /* Set the multicast filter */ + ath6kl_dbg(ATH6KL_DBG_TRC, + "Adding %pM to multicast filter list\n", + mc_filter->hw_addr); + ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi, + vif->fw_vif_idx, mc_filter->hw_addr, + true); + if (ret) { + ath6kl_warn("Failed to add multicast filter :%pM\n", + mc_filter->hw_addr); + kfree(mc_filter); + goto out; + } + + list_add_tail(&mc_filter->list, &mc_filter_new); + } + } + +out: + list_splice_tail(&mc_filter_new, &vif->mc_filter); +} + +static const struct net_device_ops ath6kl_netdev_ops = { + .ndo_open = ath6kl_open, + .ndo_stop = ath6kl_close, + .ndo_start_xmit = ath6kl_data_tx, + .ndo_set_features = ath6kl_set_features, + .ndo_set_rx_mode = ath6kl_set_multicast_list, +}; + +void init_netdev(struct net_device *dev) +{ + struct ath6kl *ar = ath6kl_priv(dev); + + dev->netdev_ops = &ath6kl_netdev_ops; + dev->needs_free_netdev = true; + dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; + + dev->needed_headroom = ETH_HLEN; + dev->needed_headroom += roundup(sizeof(struct ath6kl_llc_snap_hdr) + + sizeof(struct wmi_data_hdr) + + HTC_HDR_LENGTH + + WMI_MAX_TX_META_SZ + + ATH6KL_HTC_ALIGN_BYTES, 4); + + if (!test_bit(ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM, + ar->fw_capabilities)) + dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; + + return; +} diff --git a/drivers/net/wireless/ath/ath6kl/recovery.c b/drivers/net/wireless/ath/ath6kl/recovery.c new file mode 100644 index 000000000..c09e40c90 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/recovery.c @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "core.h" +#include "cfg80211.h" +#include "debug.h" + +static void ath6kl_recovery_work(struct work_struct *work) +{ + struct ath6kl *ar = container_of(work, struct ath6kl, + fw_recovery.recovery_work); + + ar->state = ATH6KL_STATE_RECOVERY; + + del_timer_sync(&ar->fw_recovery.hb_timer); + + ath6kl_init_hw_restart(ar); + + ar->state = ATH6KL_STATE_ON; + clear_bit(WMI_CTRL_EP_FULL, &ar->flag); + + ar->fw_recovery.err_reason = 0; + + if (ar->fw_recovery.hb_poll) + mod_timer(&ar->fw_recovery.hb_timer, jiffies + + msecs_to_jiffies(ar->fw_recovery.hb_poll)); +} + +void ath6kl_recovery_err_notify(struct ath6kl *ar, enum ath6kl_fw_err reason) +{ + if (!ar->fw_recovery.enable) + return; + + ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Fw error detected, reason:%d\n", + reason); + + set_bit(reason, &ar->fw_recovery.err_reason); + + if (!test_bit(RECOVERY_CLEANUP, &ar->flag) && + ar->state != ATH6KL_STATE_RECOVERY) + queue_work(ar->ath6kl_wq, &ar->fw_recovery.recovery_work); +} + +void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie) +{ + if (cookie == ar->fw_recovery.seq_num) + ar->fw_recovery.hb_pending = false; +} + +static void ath6kl_recovery_hb_timer(struct timer_list *t) +{ + struct ath6kl *ar = from_timer(ar, t, fw_recovery.hb_timer); + int err; + + if (test_bit(RECOVERY_CLEANUP, &ar->flag) || + (ar->state == ATH6KL_STATE_RECOVERY)) + return; + + if (ar->fw_recovery.hb_pending) + ar->fw_recovery.hb_misscnt++; + else + ar->fw_recovery.hb_misscnt = 0; + + if (ar->fw_recovery.hb_misscnt > ATH6KL_HB_RESP_MISS_THRES) { + ar->fw_recovery.hb_misscnt = 0; + ar->fw_recovery.seq_num = 0; + ar->fw_recovery.hb_pending = false; + ath6kl_recovery_err_notify(ar, ATH6KL_FW_HB_RESP_FAILURE); + return; + } + + ar->fw_recovery.seq_num++; + ar->fw_recovery.hb_pending = true; + + err = ath6kl_wmi_get_challenge_resp_cmd(ar->wmi, + ar->fw_recovery.seq_num, 0); + if (err) + ath6kl_warn("Failed to send hb challenge request, err:%d\n", + err); + + mod_timer(&ar->fw_recovery.hb_timer, jiffies + + msecs_to_jiffies(ar->fw_recovery.hb_poll)); +} + +void ath6kl_recovery_init(struct ath6kl *ar) +{ + struct ath6kl_fw_recovery *recovery = &ar->fw_recovery; + + clear_bit(RECOVERY_CLEANUP, &ar->flag); + INIT_WORK(&recovery->recovery_work, ath6kl_recovery_work); + recovery->seq_num = 0; + recovery->hb_misscnt = 0; + ar->fw_recovery.hb_pending = false; + timer_setup(&ar->fw_recovery.hb_timer, ath6kl_recovery_hb_timer, + TIMER_DEFERRABLE); + + if (ar->fw_recovery.hb_poll) + mod_timer(&ar->fw_recovery.hb_timer, jiffies + + msecs_to_jiffies(ar->fw_recovery.hb_poll)); +} + +void ath6kl_recovery_cleanup(struct ath6kl *ar) +{ + if (!ar->fw_recovery.enable) + return; + + set_bit(RECOVERY_CLEANUP, &ar->flag); + + del_timer_sync(&ar->fw_recovery.hb_timer); + cancel_work_sync(&ar->fw_recovery.recovery_work); +} + +void ath6kl_recovery_suspend(struct ath6kl *ar) +{ + if (!ar->fw_recovery.enable) + return; + + ath6kl_recovery_cleanup(ar); + + if (!ar->fw_recovery.err_reason) + return; + + /* Process pending fw error detection */ + ar->fw_recovery.err_reason = 0; + WARN_ON(ar->state != ATH6KL_STATE_ON); + ar->state = ATH6KL_STATE_RECOVERY; + ath6kl_init_hw_restart(ar); + ar->state = ATH6KL_STATE_ON; +} + +void ath6kl_recovery_resume(struct ath6kl *ar) +{ + if (!ar->fw_recovery.enable) + return; + + clear_bit(RECOVERY_CLEANUP, &ar->flag); + + if (!ar->fw_recovery.hb_poll) + return; + + ar->fw_recovery.hb_pending = false; + ar->fw_recovery.seq_num = 0; + ar->fw_recovery.hb_misscnt = 0; + mod_timer(&ar->fw_recovery.hb_timer, + jiffies + msecs_to_jiffies(ar->fw_recovery.hb_poll)); +} diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c new file mode 100644 index 000000000..8a43c48ec --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/sdio.c @@ -0,0 +1,1475 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/mmc/card.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/host.h> +#include <linux/mmc/sdio_func.h> +#include <linux/mmc/sdio_ids.h> +#include <linux/mmc/sdio.h> +#include <linux/mmc/sd.h> +#include "hif.h" +#include "hif-ops.h" +#include "target.h" +#include "debug.h" +#include "cfg80211.h" +#include "trace.h" + +struct ath6kl_sdio { + struct sdio_func *func; + + /* protects access to bus_req_freeq */ + spinlock_t lock; + + /* free list */ + struct list_head bus_req_freeq; + + /* available bus requests */ + struct bus_request bus_req[BUS_REQUEST_MAX_NUM]; + + struct ath6kl *ar; + + u8 *dma_buffer; + + /* protects access to dma_buffer */ + struct mutex dma_buffer_mutex; + + /* scatter request list head */ + struct list_head scat_req; + + atomic_t irq_handling; + wait_queue_head_t irq_wq; + + /* protects access to scat_req */ + spinlock_t scat_lock; + + bool scatter_enabled; + + bool is_disabled; + const struct sdio_device_id *id; + struct work_struct wr_async_work; + struct list_head wr_asyncq; + + /* protects access to wr_asyncq */ + spinlock_t wr_async_lock; +}; + +#define CMD53_ARG_READ 0 +#define CMD53_ARG_WRITE 1 +#define CMD53_ARG_BLOCK_BASIS 1 +#define CMD53_ARG_FIXED_ADDRESS 0 +#define CMD53_ARG_INCR_ADDRESS 1 + +static int ath6kl_sdio_config(struct ath6kl *ar); + +static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar) +{ + return ar->hif_priv; +} + +/* + * Macro to check if DMA buffer is WORD-aligned and DMA-able. + * Most host controllers assume the buffer is DMA'able and will + * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid + * check fails on stack memory. + */ +static inline bool buf_needs_bounce(u8 *buf) +{ + return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf); +} + +static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar) +{ + struct ath6kl_mbox_info *mbox_info = &ar->mbox_info; + + /* EP1 has an extended range */ + mbox_info->htc_addr = HIF_MBOX_BASE_ADDR; + mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR; + mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH; + mbox_info->block_size = HIF_MBOX_BLOCK_SIZE; + mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR; + mbox_info->gmbox_sz = HIF_GMBOX_WIDTH; +} + +static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func, + u8 mode, u8 opcode, u32 addr, + u16 blksz) +{ + *arg = (((rw & 1) << 31) | + ((func & 0x7) << 28) | + ((mode & 1) << 27) | + ((opcode & 1) << 26) | + ((addr & 0x1FFFF) << 9) | + (blksz & 0x1FF)); +} + +static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw, + unsigned int address, + unsigned char val) +{ + const u8 func = 0; + + *arg = ((write & 1) << 31) | + ((func & 0x7) << 28) | + ((raw & 1) << 27) | + (1 << 26) | + ((address & 0x1FFFF) << 9) | + (1 << 8) | + (val & 0xFF); +} + +static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card, + unsigned int address, + unsigned char byte) +{ + struct mmc_command io_cmd; + + memset(&io_cmd, 0, sizeof(io_cmd)); + ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte); + io_cmd.opcode = SD_IO_RW_DIRECT; + io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; + + return mmc_wait_for_cmd(card->host, &io_cmd, 0); +} + +static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr, + u8 *buf, u32 len) +{ + int ret = 0; + + sdio_claim_host(func); + + if (request & HIF_WRITE) { + /* FIXME: looks like ugly workaround for something */ + if (addr >= HIF_MBOX_BASE_ADDR && + addr <= HIF_MBOX_END_ADDR) + addr += (HIF_MBOX_WIDTH - len); + + /* FIXME: this also looks like ugly workaround */ + if (addr == HIF_MBOX0_EXT_BASE_ADDR) + addr += HIF_MBOX0_EXT_WIDTH - len; + + if (request & HIF_FIXED_ADDRESS) + ret = sdio_writesb(func, addr, buf, len); + else + ret = sdio_memcpy_toio(func, addr, buf, len); + } else { + if (request & HIF_FIXED_ADDRESS) + ret = sdio_readsb(func, buf, addr, len); + else + ret = sdio_memcpy_fromio(func, buf, addr, len); + } + + sdio_release_host(func); + + ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n", + request & HIF_WRITE ? "wr" : "rd", addr, + request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len); + ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len); + + trace_ath6kl_sdio(addr, request, buf, len); + + return ret; +} + +static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) +{ + struct bus_request *bus_req; + + spin_lock_bh(&ar_sdio->lock); + + if (list_empty(&ar_sdio->bus_req_freeq)) { + spin_unlock_bh(&ar_sdio->lock); + return NULL; + } + + bus_req = list_first_entry(&ar_sdio->bus_req_freeq, + struct bus_request, list); + list_del(&bus_req->list); + + spin_unlock_bh(&ar_sdio->lock); + ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", + __func__, bus_req); + + return bus_req; +} + +static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio, + struct bus_request *bus_req) +{ + ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", + __func__, bus_req); + + spin_lock_bh(&ar_sdio->lock); + list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); + spin_unlock_bh(&ar_sdio->lock); +} + +static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req, + struct mmc_data *data) +{ + struct scatterlist *sg; + int i; + + data->blksz = HIF_MBOX_BLOCK_SIZE; + data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE; + + ath6kl_dbg(ATH6KL_DBG_SCATTER, + "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n", + (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr, + data->blksz, data->blocks, scat_req->len, + scat_req->scat_entries); + + data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE : + MMC_DATA_READ; + + /* fill SG entries */ + sg = scat_req->sgentries; + sg_init_table(sg, scat_req->scat_entries); + + /* assemble SG list */ + for (i = 0; i < scat_req->scat_entries; i++, sg++) { + ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n", + i, scat_req->scat_list[i].buf, + scat_req->scat_list[i].len); + + sg_set_buf(sg, scat_req->scat_list[i].buf, + scat_req->scat_list[i].len); + } + + /* set scatter-gather table for request */ + data->sg = scat_req->sgentries; + data->sg_len = scat_req->scat_entries; +} + +static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio, + struct bus_request *req) +{ + struct mmc_request mmc_req; + struct mmc_command cmd; + struct mmc_data data; + struct hif_scatter_req *scat_req; + u8 opcode, rw; + int status, len; + + scat_req = req->scat_req; + + if (scat_req->virt_scat) { + len = scat_req->len; + if (scat_req->req & HIF_BLOCK_BASIS) + len = round_down(len, HIF_MBOX_BLOCK_SIZE); + + status = ath6kl_sdio_io(ar_sdio->func, scat_req->req, + scat_req->addr, scat_req->virt_dma_buf, + len); + goto scat_complete; + } + + memset(&mmc_req, 0, sizeof(struct mmc_request)); + memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&data, 0, sizeof(struct mmc_data)); + + ath6kl_sdio_setup_scat_data(scat_req, &data); + + opcode = (scat_req->req & HIF_FIXED_ADDRESS) ? + CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS; + + rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ; + + /* Fixup the address so that the last byte will fall on MBOX EOM */ + if (scat_req->req & HIF_WRITE) { + if (scat_req->addr == HIF_MBOX_BASE_ADDR) + scat_req->addr += HIF_MBOX_WIDTH - scat_req->len; + else + /* Uses extended address range */ + scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len; + } + + /* set command argument */ + ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num, + CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr, + data.blocks); + + cmd.opcode = SD_IO_RW_EXTENDED; + cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; + + mmc_req.cmd = &cmd; + mmc_req.data = &data; + + sdio_claim_host(ar_sdio->func); + + mmc_set_data_timeout(&data, ar_sdio->func->card); + + trace_ath6kl_sdio_scat(scat_req->addr, + scat_req->req, + scat_req->len, + scat_req->scat_entries, + scat_req->scat_list); + + /* synchronous call to process request */ + mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req); + + sdio_release_host(ar_sdio->func); + + status = cmd.error ? cmd.error : data.error; + +scat_complete: + scat_req->status = status; + + if (scat_req->status) + ath6kl_err("Scatter write request failed:%d\n", + scat_req->status); + + if (scat_req->req & HIF_ASYNCHRONOUS) + scat_req->complete(ar_sdio->ar->htc_target, scat_req); + + return status; +} + +static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio, + int n_scat_entry, int n_scat_req, + bool virt_scat) +{ + struct hif_scatter_req *s_req; + struct bus_request *bus_req; + int i, scat_req_sz, scat_list_sz, size; + u8 *virt_buf; + + scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item); + scat_req_sz = sizeof(*s_req) + scat_list_sz; + + if (!virt_scat) + size = sizeof(struct scatterlist) * n_scat_entry; + else + size = 2 * L1_CACHE_BYTES + + ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; + + for (i = 0; i < n_scat_req; i++) { + /* allocate the scatter request */ + s_req = kzalloc(scat_req_sz, GFP_KERNEL); + if (!s_req) + return -ENOMEM; + + if (virt_scat) { + virt_buf = kzalloc(size, GFP_KERNEL); + if (!virt_buf) { + kfree(s_req); + return -ENOMEM; + } + + s_req->virt_dma_buf = + (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf); + } else { + /* allocate sglist */ + s_req->sgentries = kzalloc(size, GFP_KERNEL); + + if (!s_req->sgentries) { + kfree(s_req); + return -ENOMEM; + } + } + + /* allocate a bus request for this scatter request */ + bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); + if (!bus_req) { + kfree(s_req->sgentries); + kfree(s_req->virt_dma_buf); + kfree(s_req); + return -ENOMEM; + } + + /* assign the scatter request to this bus request */ + bus_req->scat_req = s_req; + s_req->busrequest = bus_req; + + s_req->virt_scat = virt_scat; + + /* add it to the scatter pool */ + hif_scatter_req_add(ar_sdio->ar, s_req); + } + + return 0; +} + +static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, + u32 len, u32 request) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + u8 *tbuf = NULL; + int ret; + bool bounced = false; + + if (request & HIF_BLOCK_BASIS) + len = round_down(len, HIF_MBOX_BLOCK_SIZE); + + if (buf_needs_bounce(buf)) { + if (!ar_sdio->dma_buffer) + return -ENOMEM; + mutex_lock(&ar_sdio->dma_buffer_mutex); + tbuf = ar_sdio->dma_buffer; + + if (request & HIF_WRITE) + memcpy(tbuf, buf, len); + + bounced = true; + } else { + tbuf = buf; + } + + ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len); + if ((request & HIF_READ) && bounced) + memcpy(buf, tbuf, len); + + if (bounced) + mutex_unlock(&ar_sdio->dma_buffer_mutex); + + return ret; +} + +static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio, + struct bus_request *req) +{ + if (req->scat_req) { + ath6kl_sdio_scat_rw(ar_sdio, req); + } else { + void *context; + int status; + + status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address, + req->buffer, req->length, + req->request); + context = req->packet; + ath6kl_sdio_free_bus_req(ar_sdio, req); + ath6kl_hif_rw_comp_handler(context, status); + } +} + +static void ath6kl_sdio_write_async_work(struct work_struct *work) +{ + struct ath6kl_sdio *ar_sdio; + struct bus_request *req, *tmp_req; + + ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work); + + spin_lock_bh(&ar_sdio->wr_async_lock); + list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { + list_del(&req->list); + spin_unlock_bh(&ar_sdio->wr_async_lock); + __ath6kl_sdio_write_async(ar_sdio, req); + spin_lock_bh(&ar_sdio->wr_async_lock); + } + spin_unlock_bh(&ar_sdio->wr_async_lock); +} + +static void ath6kl_sdio_irq_handler(struct sdio_func *func) +{ + int status; + struct ath6kl_sdio *ar_sdio; + + ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n"); + + ar_sdio = sdio_get_drvdata(func); + atomic_set(&ar_sdio->irq_handling, 1); + /* + * Release the host during interrups so we can pick it back up when + * we process commands. + */ + sdio_release_host(ar_sdio->func); + + status = ath6kl_hif_intr_bh_handler(ar_sdio->ar); + sdio_claim_host(ar_sdio->func); + + atomic_set(&ar_sdio->irq_handling, 0); + wake_up(&ar_sdio->irq_wq); + + WARN_ON(status && status != -ECANCELED); +} + +static int ath6kl_sdio_power_on(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct sdio_func *func = ar_sdio->func; + int ret = 0; + + if (!ar_sdio->is_disabled) + return 0; + + ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n"); + + sdio_claim_host(func); + + ret = sdio_enable_func(func); + if (ret) { + ath6kl_err("Unable to enable sdio func: %d)\n", ret); + sdio_release_host(func); + return ret; + } + + sdio_release_host(func); + + /* + * Wait for hardware to initialise. It should take a lot less than + * 10 ms but let's be conservative here. + */ + msleep(10); + + ret = ath6kl_sdio_config(ar); + if (ret) { + ath6kl_err("Failed to config sdio: %d\n", ret); + goto out; + } + + ar_sdio->is_disabled = false; + +out: + return ret; +} + +static int ath6kl_sdio_power_off(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + int ret; + + if (ar_sdio->is_disabled) + return 0; + + ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n"); + + /* Disable the card */ + sdio_claim_host(ar_sdio->func); + ret = sdio_disable_func(ar_sdio->func); + sdio_release_host(ar_sdio->func); + + if (ret) + return ret; + + ar_sdio->is_disabled = true; + + return ret; +} + +static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer, + u32 length, u32 request, + struct htc_packet *packet) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct bus_request *bus_req; + + bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); + + if (WARN_ON_ONCE(!bus_req)) + return -ENOMEM; + + bus_req->address = address; + bus_req->buffer = buffer; + bus_req->length = length; + bus_req->request = request; + bus_req->packet = packet; + + spin_lock_bh(&ar_sdio->wr_async_lock); + list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); + spin_unlock_bh(&ar_sdio->wr_async_lock); + queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); + + return 0; +} + +static void ath6kl_sdio_irq_enable(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + int ret; + + sdio_claim_host(ar_sdio->func); + + /* Register the isr */ + ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler); + if (ret) + ath6kl_err("Failed to claim sdio irq: %d\n", ret); + + sdio_release_host(ar_sdio->func); +} + +static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + + return !atomic_read(&ar_sdio->irq_handling); +} + +static void ath6kl_sdio_irq_disable(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + int ret; + + sdio_claim_host(ar_sdio->func); + + if (atomic_read(&ar_sdio->irq_handling)) { + sdio_release_host(ar_sdio->func); + + ret = wait_event_interruptible(ar_sdio->irq_wq, + ath6kl_sdio_is_on_irq(ar)); + if (ret) + return; + + sdio_claim_host(ar_sdio->func); + } + + ret = sdio_release_irq(ar_sdio->func); + if (ret) + ath6kl_err("Failed to release sdio irq: %d\n", ret); + + sdio_release_host(ar_sdio->func); +} + +static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct hif_scatter_req *node = NULL; + + spin_lock_bh(&ar_sdio->scat_lock); + + if (!list_empty(&ar_sdio->scat_req)) { + node = list_first_entry(&ar_sdio->scat_req, + struct hif_scatter_req, list); + list_del(&node->list); + + node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req); + } + + spin_unlock_bh(&ar_sdio->scat_lock); + + return node; +} + +static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar, + struct hif_scatter_req *s_req) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + + spin_lock_bh(&ar_sdio->scat_lock); + + list_add_tail(&s_req->list, &ar_sdio->scat_req); + + spin_unlock_bh(&ar_sdio->scat_lock); +} + +/* scatter gather read write request */ +static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar, + struct hif_scatter_req *scat_req) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + u32 request = scat_req->req; + int status = 0; + + if (!scat_req->len) + return -EINVAL; + + ath6kl_dbg(ATH6KL_DBG_SCATTER, + "hif-scatter: total len: %d scatter entries: %d\n", + scat_req->len, scat_req->scat_entries); + + if (request & HIF_SYNCHRONOUS) { + status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest); + } else { + spin_lock_bh(&ar_sdio->wr_async_lock); + list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq); + spin_unlock_bh(&ar_sdio->wr_async_lock); + queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); + } + + return status; +} + +/* clean up scatter support */ +static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct hif_scatter_req *s_req, *tmp_req; + + /* empty the free list */ + spin_lock_bh(&ar_sdio->scat_lock); + list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) { + list_del(&s_req->list); + spin_unlock_bh(&ar_sdio->scat_lock); + + /* + * FIXME: should we also call completion handler with + * ath6kl_hif_rw_comp_handler() with status -ECANCELED so + * that the packet is properly freed? + */ + if (s_req->busrequest) { + s_req->busrequest->scat_req = NULL; + ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest); + } + kfree(s_req->virt_dma_buf); + kfree(s_req->sgentries); + kfree(s_req); + + spin_lock_bh(&ar_sdio->scat_lock); + } + spin_unlock_bh(&ar_sdio->scat_lock); + + ar_sdio->scatter_enabled = false; +} + +/* setup of HIF scatter resources */ +static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct htc_target *target = ar->htc_target; + int ret = 0; + bool virt_scat = false; + + if (ar_sdio->scatter_enabled) + return 0; + + ar_sdio->scatter_enabled = true; + + /* check if host supports scatter and it meets our requirements */ + if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) { + ath6kl_err("host only supports scatter of :%d entries, need: %d\n", + ar_sdio->func->card->host->max_segs, + MAX_SCATTER_ENTRIES_PER_REQ); + virt_scat = true; + } + + if (!virt_scat) { + ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio, + MAX_SCATTER_ENTRIES_PER_REQ, + MAX_SCATTER_REQUESTS, virt_scat); + + if (!ret) { + ath6kl_dbg(ATH6KL_DBG_BOOT, + "hif-scatter enabled requests %d entries %d\n", + MAX_SCATTER_REQUESTS, + MAX_SCATTER_ENTRIES_PER_REQ); + + target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ; + target->max_xfer_szper_scatreq = + MAX_SCATTER_REQ_TRANSFER_SIZE; + } else { + ath6kl_sdio_cleanup_scatter(ar); + ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n"); + } + } + + if (virt_scat || ret) { + ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio, + ATH6KL_SCATTER_ENTRIES_PER_REQ, + ATH6KL_SCATTER_REQS, virt_scat); + + if (ret) { + ath6kl_err("failed to alloc virtual scatter resources !\n"); + ath6kl_sdio_cleanup_scatter(ar); + return ret; + } + + ath6kl_dbg(ATH6KL_DBG_BOOT, + "virtual scatter enabled requests %d entries %d\n", + ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ); + + target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ; + target->max_xfer_szper_scatreq = + ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; + } + + return 0; +} + +static int ath6kl_sdio_config(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct sdio_func *func = ar_sdio->func; + int ret; + + sdio_claim_host(func); + + if (ar_sdio->id->device >= SDIO_DEVICE_ID_ATHEROS_AR6003_00) { + /* enable 4-bit ASYNC interrupt on AR6003 or later */ + ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG, + SDIO_IRQ_MODE_ASYNC_4BIT_IRQ); + if (ret) { + ath6kl_err("Failed to enable 4-bit async irq mode %d\n", + ret); + goto out; + } + + ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n"); + } + + /* give us some time to enable, in ms */ + func->enable_timeout = 100; + + ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE); + if (ret) { + ath6kl_err("Set sdio block size %d failed: %d)\n", + HIF_MBOX_BLOCK_SIZE, ret); + goto out; + } + +out: + sdio_release_host(func); + + return ret; +} + +static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct sdio_func *func = ar_sdio->func; + mmc_pm_flag_t flags; + int ret; + + flags = sdio_get_host_pm_caps(func); + + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags); + + if (!(flags & MMC_PM_WAKE_SDIO_IRQ) || + !(flags & MMC_PM_KEEP_POWER)) + return -EINVAL; + + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (ret) { + ath6kl_err("set sdio keep pwr flag failed: %d\n", ret); + return ret; + } + + /* sdio irq wakes up host */ + ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); + if (ret) + ath6kl_err("set sdio wake irq flag failed: %d\n", ret); + + return ret; +} + +static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct sdio_func *func = ar_sdio->func; + mmc_pm_flag_t flags; + bool try_deepsleep = false; + int ret; + + if (ar->suspend_mode == WLAN_POWER_STATE_WOW || + (!ar->suspend_mode && wow)) { + ret = ath6kl_set_sdio_pm_caps(ar); + if (ret) + goto cut_pwr; + + ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow); + if (ret && ret != -ENOTCONN) + ath6kl_err("wow suspend failed: %d\n", ret); + + if (ret && + (!ar->wow_suspend_mode || + ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP)) + try_deepsleep = true; + else if (ret && + ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR) + goto cut_pwr; + if (!ret) + return 0; + } + + if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP || + !ar->suspend_mode || try_deepsleep) { + flags = sdio_get_host_pm_caps(func); + if (!(flags & MMC_PM_KEEP_POWER)) + goto cut_pwr; + + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (ret) + goto cut_pwr; + + /* + * Workaround to support Deep Sleep with MSM, set the host pm + * flag as MMC_PM_WAKE_SDIO_IRQ to allow SDCC deiver to disable + * the sdc2_clock and internally allows MSM to enter + * TCXO shutdown properly. + */ + if ((flags & MMC_PM_WAKE_SDIO_IRQ)) { + ret = sdio_set_host_pm_flags(func, + MMC_PM_WAKE_SDIO_IRQ); + if (ret) + goto cut_pwr; + } + + ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, + NULL); + if (ret) + goto cut_pwr; + + return 0; + } + +cut_pwr: + if (func->card && func->card->host) + func->card->host->pm_flags &= ~MMC_PM_KEEP_POWER; + + return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL); +} + +static int ath6kl_sdio_resume(struct ath6kl *ar) +{ + switch (ar->state) { + case ATH6KL_STATE_OFF: + case ATH6KL_STATE_CUTPOWER: + ath6kl_dbg(ATH6KL_DBG_SUSPEND, + "sdio resume configuring sdio\n"); + + /* need to set sdio settings after power is cut from sdio */ + ath6kl_sdio_config(ar); + break; + + case ATH6KL_STATE_ON: + break; + + case ATH6KL_STATE_DEEPSLEEP: + break; + + case ATH6KL_STATE_WOW: + break; + + case ATH6KL_STATE_SUSPENDING: + break; + + case ATH6KL_STATE_RESUMING: + break; + + case ATH6KL_STATE_RECOVERY: + break; + } + + ath6kl_cfg80211_resume(ar); + + return 0; +} + +/* set the window address register (using 4-byte register access ). */ +static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr) +{ + int status; + u8 addr_val[4]; + s32 i; + + /* + * Write bytes 1,2,3 of the register to set the upper address bytes, + * the LSB is written last to initiate the access cycle + */ + + for (i = 1; i <= 3; i++) { + /* + * Fill the buffer with the address byte value we want to + * hit 4 times. + */ + memset(addr_val, ((u8 *)&addr)[i], 4); + + /* + * Hit each byte of the register address with a 4-byte + * write operation to the same address, this is a harmless + * operation. + */ + status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val, + 4, HIF_WR_SYNC_BYTE_FIX); + if (status) + break; + } + + if (status) { + ath6kl_err("%s: failed to write initial bytes of 0x%x to window reg: 0x%X\n", + __func__, addr, reg_addr); + return status; + } + + /* + * Write the address register again, this time write the whole + * 4-byte value. The effect here is that the LSB write causes the + * cycle to start, the extra 3 byte write to bytes 1,2,3 has no + * effect since we are writing the same values again + */ + status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr), + 4, HIF_WR_SYNC_BYTE_INC); + + if (status) { + ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n", + __func__, addr, reg_addr); + return status; + } + + return 0; +} + +static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data) +{ + int status; + + /* set window register to start read cycle */ + status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, + address); + + if (status) + return status; + + /* read the data */ + status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, + (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC); + if (status) { + ath6kl_err("%s: failed to read from window data addr\n", + __func__); + return status; + } + + return status; +} + +static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address, + __le32 data) +{ + int status; + u32 val = (__force u32) data; + + /* set write data */ + status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, + (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC); + if (status) { + ath6kl_err("%s: failed to write 0x%x to window data addr\n", + __func__, data); + return status; + } + + /* set window register, which starts the write cycle */ + return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS, + address); +} + +static int ath6kl_sdio_bmi_credits(struct ath6kl *ar) +{ + u32 addr; + unsigned long timeout; + int ret; + + ar->bmi.cmd_credits = 0; + + /* Read the counter register to get the command credits */ + addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4; + + timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); + while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { + /* + * Hit the credit counter with a 4-byte access, the first byte + * read will hit the counter and cause a decrement, while the + * remaining 3 bytes has no effect. The rationale behind this + * is to make all HIF accesses 4-byte aligned. + */ + ret = ath6kl_sdio_read_write_sync(ar, addr, + (u8 *)&ar->bmi.cmd_credits, 4, + HIF_RD_SYNC_BYTE_INC); + if (ret) { + ath6kl_err("Unable to decrement the command credit count register: %d\n", + ret); + return ret; + } + + /* The counter is only 8 bits. + * Ignore anything in the upper 3 bytes + */ + ar->bmi.cmd_credits &= 0xFF; + } + + if (!ar->bmi.cmd_credits) { + ath6kl_err("bmi communication timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar) +{ + unsigned long timeout; + u32 rx_word = 0; + int ret = 0; + + timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); + while ((time_before(jiffies, timeout)) && !rx_word) { + ret = ath6kl_sdio_read_write_sync(ar, + RX_LOOKAHEAD_VALID_ADDRESS, + (u8 *)&rx_word, sizeof(rx_word), + HIF_RD_SYNC_BYTE_INC); + if (ret) { + ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n"); + return ret; + } + + /* all we really want is one bit */ + rx_word &= (1 << ENDPOINT1); + } + + if (!rx_word) { + ath6kl_err("bmi_recv_buf FIFO empty\n"); + return -EINVAL; + } + + return ret; +} + +static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) +{ + int ret; + u32 addr; + + ret = ath6kl_sdio_bmi_credits(ar); + if (ret) + return ret; + + addr = ar->mbox_info.htc_addr; + + ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, + HIF_WR_SYNC_BYTE_INC); + if (ret) { + ath6kl_err("unable to send the bmi data to the device\n"); + return ret; + } + + return 0; +} + +static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) +{ + int ret; + u32 addr; + + /* + * During normal bootup, small reads may be required. + * Rather than issue an HIF Read and then wait as the Target + * adds successive bytes to the FIFO, we wait here until + * we know that response data is available. + * + * This allows us to cleanly timeout on an unexpected + * Target failure rather than risk problems at the HIF level. + * In particular, this avoids SDIO timeouts and possibly garbage + * data on some host controllers. And on an interconnect + * such as Compact Flash (as well as some SDIO masters) which + * does not provide any indication on data timeout, it avoids + * a potential hang or garbage response. + * + * Synchronization is more difficult for reads larger than the + * size of the MBOX FIFO (128B), because the Target is unable + * to push the 129th byte of data until AFTER the Host posts an + * HIF Read and removes some FIFO data. So for large reads the + * Host proceeds to post an HIF Read BEFORE all the data is + * actually available to read. Fortunately, large BMI reads do + * not occur in practice -- they're supported for debug/development. + * + * So Host/Target BMI synchronization is divided into these cases: + * CASE 1: length < 4 + * Should not happen + * + * CASE 2: 4 <= length <= 128 + * Wait for first 4 bytes to be in FIFO + * If CONSERVATIVE_BMI_READ is enabled, also wait for + * a BMI command credit, which indicates that the ENTIRE + * response is available in the FIFO + * + * CASE 3: length > 128 + * Wait for the first 4 bytes to be in FIFO + * + * For most uses, a small timeout should be sufficient and we will + * usually see a response quickly; but there may be some unusual + * (debug) cases of BMI_EXECUTE where we want an larger timeout. + * For now, we use an unbounded busy loop while waiting for + * BMI_EXECUTE. + * + * If BMI_EXECUTE ever needs to support longer-latency execution, + * especially in production, this code needs to be enhanced to sleep + * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently + * a function of Host processor speed. + */ + if (len >= 4) { /* NB: Currently, always true */ + ret = ath6kl_bmi_get_rx_lkahd(ar); + if (ret) + return ret; + } + + addr = ar->mbox_info.htc_addr; + ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, + HIF_RD_SYNC_BYTE_INC); + if (ret) { + ath6kl_err("Unable to read the bmi data from the device: %d\n", + ret); + return ret; + } + + return 0; +} + +static void ath6kl_sdio_stop(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct bus_request *req, *tmp_req; + void *context; + + /* FIXME: make sure that wq is not queued again */ + + cancel_work_sync(&ar_sdio->wr_async_work); + + spin_lock_bh(&ar_sdio->wr_async_lock); + + list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { + list_del(&req->list); + + if (req->scat_req) { + /* this is a scatter gather request */ + req->scat_req->status = -ECANCELED; + req->scat_req->complete(ar_sdio->ar->htc_target, + req->scat_req); + } else { + context = req->packet; + ath6kl_sdio_free_bus_req(ar_sdio, req); + ath6kl_hif_rw_comp_handler(context, -ECANCELED); + } + } + + spin_unlock_bh(&ar_sdio->wr_async_lock); + + WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4); +} + +static const struct ath6kl_hif_ops ath6kl_sdio_ops = { + .read_write_sync = ath6kl_sdio_read_write_sync, + .write_async = ath6kl_sdio_write_async, + .irq_enable = ath6kl_sdio_irq_enable, + .irq_disable = ath6kl_sdio_irq_disable, + .scatter_req_get = ath6kl_sdio_scatter_req_get, + .scatter_req_add = ath6kl_sdio_scatter_req_add, + .enable_scatter = ath6kl_sdio_enable_scatter, + .scat_req_rw = ath6kl_sdio_async_rw_scatter, + .cleanup_scatter = ath6kl_sdio_cleanup_scatter, + .suspend = ath6kl_sdio_suspend, + .resume = ath6kl_sdio_resume, + .diag_read32 = ath6kl_sdio_diag_read32, + .diag_write32 = ath6kl_sdio_diag_write32, + .bmi_read = ath6kl_sdio_bmi_read, + .bmi_write = ath6kl_sdio_bmi_write, + .power_on = ath6kl_sdio_power_on, + .power_off = ath6kl_sdio_power_off, + .stop = ath6kl_sdio_stop, +}; + +#ifdef CONFIG_PM_SLEEP + +/* + * Empty handlers so that mmc subsystem doesn't remove us entirely during + * suspend. We instead follow cfg80211 suspend/resume handlers. + */ +static int ath6kl_sdio_pm_suspend(struct device *device) +{ + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n"); + + return 0; +} + +static int ath6kl_sdio_pm_resume(struct device *device) +{ + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n"); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend, + ath6kl_sdio_pm_resume); + +#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops) + +#else + +#define ATH6KL_SDIO_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + +static int ath6kl_sdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + int ret; + struct ath6kl_sdio *ar_sdio; + struct ath6kl *ar; + int count; + + ath6kl_dbg(ATH6KL_DBG_BOOT, + "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", + func->num, func->vendor, func->device, + func->max_blksize, func->cur_blksize); + + ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL); + if (!ar_sdio) + return -ENOMEM; + + ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL); + if (!ar_sdio->dma_buffer) { + ret = -ENOMEM; + goto err_hif; + } + + ar_sdio->func = func; + sdio_set_drvdata(func, ar_sdio); + + ar_sdio->id = id; + ar_sdio->is_disabled = true; + + spin_lock_init(&ar_sdio->lock); + spin_lock_init(&ar_sdio->scat_lock); + spin_lock_init(&ar_sdio->wr_async_lock); + mutex_init(&ar_sdio->dma_buffer_mutex); + + INIT_LIST_HEAD(&ar_sdio->scat_req); + INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); + INIT_LIST_HEAD(&ar_sdio->wr_asyncq); + + INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work); + + init_waitqueue_head(&ar_sdio->irq_wq); + + for (count = 0; count < BUS_REQUEST_MAX_NUM; count++) + ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]); + + ar = ath6kl_core_create(&ar_sdio->func->dev); + if (!ar) { + ath6kl_err("Failed to alloc ath6kl core\n"); + ret = -ENOMEM; + goto err_dma; + } + + ar_sdio->ar = ar; + ar->hif_type = ATH6KL_HIF_TYPE_SDIO; + ar->hif_priv = ar_sdio; + ar->hif_ops = &ath6kl_sdio_ops; + ar->bmi.max_data_size = 256; + + ath6kl_sdio_set_mbox_info(ar); + + ret = ath6kl_sdio_config(ar); + if (ret) { + ath6kl_err("Failed to config sdio: %d\n", ret); + goto err_core_alloc; + } + + ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_MBOX); + if (ret) { + ath6kl_err("Failed to init ath6kl core\n"); + goto err_core_alloc; + } + + return ret; + +err_core_alloc: + ath6kl_core_destroy(ar_sdio->ar); +err_dma: + kfree(ar_sdio->dma_buffer); +err_hif: + kfree(ar_sdio); + + return ret; +} + +static void ath6kl_sdio_remove(struct sdio_func *func) +{ + struct ath6kl_sdio *ar_sdio; + + ath6kl_dbg(ATH6KL_DBG_BOOT, + "sdio removed func %d vendor 0x%x device 0x%x\n", + func->num, func->vendor, func->device); + + ar_sdio = sdio_get_drvdata(func); + + ath6kl_stop_txrx(ar_sdio->ar); + cancel_work_sync(&ar_sdio->wr_async_work); + + ath6kl_core_cleanup(ar_sdio->ar); + ath6kl_core_destroy(ar_sdio->ar); + + kfree(ar_sdio->dma_buffer); + kfree(ar_sdio); +} + +static const struct sdio_device_id ath6kl_sdio_devices[] = { + {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6003_00)}, + {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6003_01)}, + {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_00)}, + {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_01)}, + {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_02)}, + {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_18)}, + {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_19)}, + {}, +}; + +MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices); + +static struct sdio_driver ath6kl_sdio_driver = { + .name = "ath6kl_sdio", + .id_table = ath6kl_sdio_devices, + .probe = ath6kl_sdio_probe, + .remove = ath6kl_sdio_remove, + .drv.pm = ATH6KL_SDIO_PM_OPS, +}; + +static int __init ath6kl_sdio_init(void) +{ + int ret; + + ret = sdio_register_driver(&ath6kl_sdio_driver); + if (ret) + ath6kl_err("sdio driver registration failed: %d\n", ret); + + return ret; +} + +static void __exit ath6kl_sdio_exit(void) +{ + sdio_unregister_driver(&ath6kl_sdio_driver); +} + +module_init(ath6kl_sdio_init); +module_exit(ath6kl_sdio_exit); + +MODULE_AUTHOR("Atheros Communications, Inc."); +MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices"); +MODULE_LICENSE("Dual BSD/GPL"); + +MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_OTP_FILE); +MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_PATCH_FILE); +MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_OTP_FILE); +MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_PATCH_FILE); +MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR "/" AR6004_HW_1_0_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_2_FW_DIR "/" AR6004_HW_1_2_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_3_FW_DIR "/" AR6004_HW_1_3_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6004_HW_1_3_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE); diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h new file mode 100644 index 000000000..d5eeeae77 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/target.h @@ -0,0 +1,356 @@ +/* + * Copyright (c) 2004-2010 Atheros Communications Inc. + * Copyright (c) 2011 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef TARGET_H +#define TARGET_H + +#define AR6003_BOARD_DATA_SZ 1024 +#define AR6003_BOARD_EXT_DATA_SZ 768 +#define AR6003_BOARD_EXT_DATA_SZ_V2 1024 + +#define AR6004_BOARD_DATA_SZ 6144 +#define AR6004_BOARD_EXT_DATA_SZ 0 + +#define RESET_CONTROL_ADDRESS 0x00004000 +#define RESET_CONTROL_COLD_RST 0x00000100 +#define RESET_CONTROL_MBOX_RST 0x00000004 + +#define CPU_CLOCK_STANDARD_S 0 +#define CPU_CLOCK_STANDARD 0x00000003 +#define CPU_CLOCK_ADDRESS 0x00000020 + +#define CLOCK_CONTROL_ADDRESS 0x00000028 +#define CLOCK_CONTROL_LF_CLK32_S 2 +#define CLOCK_CONTROL_LF_CLK32 0x00000004 + +#define SYSTEM_SLEEP_ADDRESS 0x000000c4 +#define SYSTEM_SLEEP_DISABLE_S 0 +#define SYSTEM_SLEEP_DISABLE 0x00000001 + +#define LPO_CAL_ADDRESS 0x000000e0 +#define LPO_CAL_ENABLE_S 20 +#define LPO_CAL_ENABLE 0x00100000 + +#define GPIO_PIN9_ADDRESS 0x0000004c +#define GPIO_PIN10_ADDRESS 0x00000050 +#define GPIO_PIN11_ADDRESS 0x00000054 +#define GPIO_PIN12_ADDRESS 0x00000058 +#define GPIO_PIN13_ADDRESS 0x0000005c + +#define HOST_INT_STATUS_ADDRESS 0x00000400 +#define HOST_INT_STATUS_ERROR_S 7 +#define HOST_INT_STATUS_ERROR 0x00000080 + +#define HOST_INT_STATUS_CPU_S 6 +#define HOST_INT_STATUS_CPU 0x00000040 + +#define HOST_INT_STATUS_COUNTER_S 4 +#define HOST_INT_STATUS_COUNTER 0x00000010 + +#define CPU_INT_STATUS_ADDRESS 0x00000401 + +#define ERROR_INT_STATUS_ADDRESS 0x00000402 +#define ERROR_INT_STATUS_WAKEUP_S 2 +#define ERROR_INT_STATUS_WAKEUP 0x00000004 + +#define ERROR_INT_STATUS_RX_UNDERFLOW_S 1 +#define ERROR_INT_STATUS_RX_UNDERFLOW 0x00000002 + +#define ERROR_INT_STATUS_TX_OVERFLOW_S 0 +#define ERROR_INT_STATUS_TX_OVERFLOW 0x00000001 + +#define COUNTER_INT_STATUS_ADDRESS 0x00000403 +#define COUNTER_INT_STATUS_COUNTER_S 0 +#define COUNTER_INT_STATUS_COUNTER 0x000000ff + +#define RX_LOOKAHEAD_VALID_ADDRESS 0x00000405 + +#define INT_STATUS_ENABLE_ADDRESS 0x00000418 +#define INT_STATUS_ENABLE_ERROR_S 7 +#define INT_STATUS_ENABLE_ERROR 0x00000080 + +#define INT_STATUS_ENABLE_CPU_S 6 +#define INT_STATUS_ENABLE_CPU 0x00000040 + +#define INT_STATUS_ENABLE_INT_S 5 +#define INT_STATUS_ENABLE_INT 0x00000020 +#define INT_STATUS_ENABLE_COUNTER_S 4 +#define INT_STATUS_ENABLE_COUNTER 0x00000010 + +#define INT_STATUS_ENABLE_MBOX_DATA_S 0 +#define INT_STATUS_ENABLE_MBOX_DATA 0x0000000f + +#define CPU_INT_STATUS_ENABLE_ADDRESS 0x00000419 +#define CPU_INT_STATUS_ENABLE_BIT_S 0 +#define CPU_INT_STATUS_ENABLE_BIT 0x000000ff + +#define ERROR_STATUS_ENABLE_ADDRESS 0x0000041a +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_S 1 +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW 0x00000002 + +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_S 0 +#define ERROR_STATUS_ENABLE_TX_OVERFLOW 0x00000001 + +#define COUNTER_INT_STATUS_ENABLE_ADDRESS 0x0000041b +#define COUNTER_INT_STATUS_ENABLE_BIT_S 0 +#define COUNTER_INT_STATUS_ENABLE_BIT 0x000000ff + +#define COUNT_ADDRESS 0x00000420 + +#define COUNT_DEC_ADDRESS 0x00000440 + +#define WINDOW_DATA_ADDRESS 0x00000474 +#define WINDOW_WRITE_ADDR_ADDRESS 0x00000478 +#define WINDOW_READ_ADDR_ADDRESS 0x0000047c +#define CPU_DBG_SEL_ADDRESS 0x00000483 +#define CPU_DBG_ADDRESS 0x00000484 + +#define LOCAL_SCRATCH_ADDRESS 0x000000c0 +#define ATH6KL_OPTION_SLEEP_DISABLE 0x08 + +#define RTC_BASE_ADDRESS 0x00004000 +#define GPIO_BASE_ADDRESS 0x00014000 +#define MBOX_BASE_ADDRESS 0x00018000 +#define ANALOG_INTF_BASE_ADDRESS 0x0001c000 + +/* real name of the register is unknown */ +#define ATH6KL_ANALOG_PLL_REGISTER (ANALOG_INTF_BASE_ADDRESS + 0x284) + +#define SM(f, v) (((v) << f##_S) & f) +#define MS(f, v) (((v) & f) >> f##_S) + +/* + * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the + * host_interest structure. + * + * Host Interest is shared between Host and Target in order to coordinate + * between the two, and is intended to remain constant (with additions only + * at the end). + */ +#define ATH6KL_AR6003_HI_START_ADDR 0x00540600 +#define ATH6KL_AR6004_HI_START_ADDR 0x00400800 + +/* + * These are items that the Host may need to access + * via BMI or via the Diagnostic Window. The position + * of items in this structure must remain constant. + * across firmware revisions! + * + * Types for each item must be fixed size across target and host platforms. + * The structure is used only to calculate offset for each register with + * HI_ITEM() macro, no values are stored to it. + * + * More items may be added at the end. + */ +struct host_interest { + /* + * Pointer to application-defined area, if any. + * Set by Target application during startup. + */ + u32 hi_app_host_interest; /* 0x00 */ + + /* Pointer to register dump area, valid after Target crash. */ + u32 hi_failure_state; /* 0x04 */ + + /* Pointer to debug logging header */ + u32 hi_dbglog_hdr; /* 0x08 */ + + u32 hi_unused1; /* 0x0c */ + + /* + * General-purpose flag bits, similar to ATH6KL_OPTION_* flags. + * Can be used by application rather than by OS. + */ + u32 hi_option_flag; /* 0x10 */ + + /* + * Boolean that determines whether or not to + * display messages on the serial port. + */ + u32 hi_serial_enable; /* 0x14 */ + + /* Start address of DataSet index, if any */ + u32 hi_dset_list_head; /* 0x18 */ + + /* Override Target application start address */ + u32 hi_app_start; /* 0x1c */ + + /* Clock and voltage tuning */ + u32 hi_skip_clock_init; /* 0x20 */ + u32 hi_core_clock_setting; /* 0x24 */ + u32 hi_cpu_clock_setting; /* 0x28 */ + u32 hi_system_sleep_setting; /* 0x2c */ + u32 hi_xtal_control_setting; /* 0x30 */ + u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */ + u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */ + u32 hi_ref_voltage_trim_setting; /* 0x3c */ + u32 hi_clock_info; /* 0x40 */ + + /* + * Flash configuration overrides, used only + * when firmware is not executing from flash. + * (When using flash, modify the global variables + * with equivalent names.) + */ + u32 hi_bank0_addr_value; /* 0x44 */ + u32 hi_bank0_read_value; /* 0x48 */ + u32 hi_bank0_write_value; /* 0x4c */ + u32 hi_bank0_config_value; /* 0x50 */ + + /* Pointer to Board Data */ + u32 hi_board_data; /* 0x54 */ + u32 hi_board_data_initialized; /* 0x58 */ + + u32 hi_dset_ram_index_tbl; /* 0x5c */ + + u32 hi_desired_baud_rate; /* 0x60 */ + u32 hi_dbglog_config; /* 0x64 */ + u32 hi_end_ram_reserve_sz; /* 0x68 */ + u32 hi_mbox_io_block_sz; /* 0x6c */ + + u32 hi_num_bpatch_streams; /* 0x70 -- unused */ + u32 hi_mbox_isr_yield_limit; /* 0x74 */ + + u32 hi_refclk_hz; /* 0x78 */ + u32 hi_ext_clk_detected; /* 0x7c */ + u32 hi_dbg_uart_txpin; /* 0x80 */ + u32 hi_dbg_uart_rxpin; /* 0x84 */ + u32 hi_hci_uart_baud; /* 0x88 */ + u32 hi_hci_uart_pin_assignments; /* 0x8C */ + /* + * NOTE: byte [0] = tx pin, [1] = rx pin, [2] = rts pin, [3] = cts + * pin + */ + u32 hi_hci_uart_baud_scale_val; /* 0x90 */ + u32 hi_hci_uart_baud_step_val; /* 0x94 */ + + u32 hi_allocram_start; /* 0x98 */ + u32 hi_allocram_sz; /* 0x9c */ + u32 hi_hci_bridge_flags; /* 0xa0 */ + u32 hi_hci_uart_support_pins; /* 0xa4 */ + /* + * NOTE: byte [0] = RESET pin (bit 7 is polarity), + * bytes[1]..bytes[3] are for future use + */ + u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */ + /* + * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high + * [31:16]: wakeup timeout in ms + */ + + /* Pointer to extended board data */ + u32 hi_board_ext_data; /* 0xac */ + u32 hi_board_ext_data_config; /* 0xb0 */ + + /* + * Bit [0] : valid + * Bit[31:16: size + */ + /* + * hi_reset_flag is used to do some stuff when target reset. + * such as restore app_start after warm reset or + * preserve host Interest area, or preserve ROM data, literals etc. + */ + u32 hi_reset_flag; /* 0xb4 */ + /* indicate hi_reset_flag is valid */ + u32 hi_reset_flag_valid; /* 0xb8 */ + u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */ + /* + * 0xbc - [31:0]: idle timeout in ms + */ + /* ACS flags */ + u32 hi_acs_flags; /* 0xc0 */ + u32 hi_console_flags; /* 0xc4 */ + u32 hi_nvram_state; /* 0xc8 */ + u32 hi_option_flag2; /* 0xcc */ + + /* If non-zero, override values sent to Host in WMI_READY event. */ + u32 hi_sw_version_override; /* 0xd0 */ + u32 hi_abi_version_override; /* 0xd4 */ + + /* + * Percentage of high priority RX traffic to total expected RX traffic - + * applicable only to ar6004 + */ + u32 hi_hp_rx_traffic_ratio; /* 0xd8 */ + + /* test applications flags */ + u32 hi_test_apps_related; /* 0xdc */ + /* location of test script */ + u32 hi_ota_testscript; /* 0xe0 */ + /* location of CAL data */ + u32 hi_cal_data; /* 0xe4 */ + /* Number of packet log buffers */ + u32 hi_pktlog_num_buffers; /* 0xe8 */ + +} __packed; + +#define HI_ITEM(item) offsetof(struct host_interest, item) + +#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3 + +#define HI_OPTION_FW_MODE_IBSS 0x0 +#define HI_OPTION_FW_MODE_BSS_STA 0x1 +#define HI_OPTION_FW_MODE_AP 0x2 + +#define HI_OPTION_FW_SUBMODE_NONE 0x0 +#define HI_OPTION_FW_SUBMODE_P2PDEV 0x1 +#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2 +#define HI_OPTION_FW_SUBMODE_P2PGO 0x3 + +#define HI_OPTION_NUM_DEV_SHIFT 0x9 + +#define HI_OPTION_FW_BRIDGE_SHIFT 0x04 + +/* Fw Mode/SubMode Mask +|------------------------------------------------------------------------------| +| SUB | SUB | SUB | SUB | | | | +| MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0| +| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) +|------------------------------------------------------------------------------| +*/ +#define HI_OPTION_FW_MODE_BITS 0x2 +#define HI_OPTION_FW_MODE_SHIFT 0xC + +#define HI_OPTION_FW_SUBMODE_BITS 0x2 +#define HI_OPTION_FW_SUBMODE_SHIFT 0x14 + +/* Convert a Target virtual address into a Target physical address */ +#define AR6003_VTOP(vaddr) ((vaddr) & 0x001fffff) +#define AR6004_VTOP(vaddr) (vaddr) + +#define TARG_VTOP(target_type, vaddr) \ + (((target_type) == TARGET_TYPE_AR6003) ? AR6003_VTOP(vaddr) : \ + (((target_type) == TARGET_TYPE_AR6004) ? AR6004_VTOP(vaddr) : 0)) + +#define ATH6KL_FWLOG_PAYLOAD_SIZE 1500 + +struct ath6kl_dbglog_buf { + __le32 next; + __le32 buffer_addr; + __le32 bufsize; + __le32 length; + __le32 count; + __le32 free; +} __packed; + +struct ath6kl_dbglog_hdr { + __le32 dbuf_addr; + __le32 dropped; +} __packed; + +#endif diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c new file mode 100644 index 000000000..89c7c4e25 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/testmode.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2010-2011 Atheros Communications Inc. + * Copyright (c) 2011 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "testmode.h" +#include "debug.h" + +#include <net/netlink.h> + +enum ath6kl_tm_attr { + __ATH6KL_TM_ATTR_INVALID = 0, + ATH6KL_TM_ATTR_CMD = 1, + ATH6KL_TM_ATTR_DATA = 2, + + /* keep last */ + __ATH6KL_TM_ATTR_AFTER_LAST, + ATH6KL_TM_ATTR_MAX = __ATH6KL_TM_ATTR_AFTER_LAST - 1, +}; + +enum ath6kl_tm_cmd { + ATH6KL_TM_CMD_TCMD = 0, + ATH6KL_TM_CMD_RX_REPORT = 1, /* not used anymore */ +}; + +#define ATH6KL_TM_DATA_MAX_LEN 5000 + +static const struct nla_policy ath6kl_tm_policy[ATH6KL_TM_ATTR_MAX + 1] = { + [ATH6KL_TM_ATTR_CMD] = { .type = NLA_U32 }, + [ATH6KL_TM_ATTR_DATA] = { .type = NLA_BINARY, + .len = ATH6KL_TM_DATA_MAX_LEN }, +}; + +void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len) +{ + struct sk_buff *skb; + + if (!buf || buf_len == 0) + return; + + skb = cfg80211_testmode_alloc_event_skb(ar->wiphy, buf_len, GFP_KERNEL); + if (!skb) { + ath6kl_warn("failed to allocate testmode rx skb!\n"); + return; + } + if (nla_put_u32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD) || + nla_put(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf)) + goto nla_put_failure; + cfg80211_testmode_event(skb, GFP_KERNEL); + return; + +nla_put_failure: + kfree_skb(skb); + ath6kl_warn("nla_put failed on testmode rx skb!\n"); +} + +int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, + void *data, int len) +{ + struct ath6kl *ar = wiphy_priv(wiphy); + struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1]; + int err, buf_len; + void *buf; + + err = nla_parse_deprecated(tb, ATH6KL_TM_ATTR_MAX, data, len, + ath6kl_tm_policy, NULL); + if (err) + return err; + + if (!tb[ATH6KL_TM_ATTR_CMD]) + return -EINVAL; + + switch (nla_get_u32(tb[ATH6KL_TM_ATTR_CMD])) { + case ATH6KL_TM_CMD_TCMD: + if (!tb[ATH6KL_TM_ATTR_DATA]) + return -EINVAL; + + buf = nla_data(tb[ATH6KL_TM_ATTR_DATA]); + buf_len = nla_len(tb[ATH6KL_TM_ATTR_DATA]); + + ath6kl_wmi_test_cmd(ar->wmi, buf, buf_len); + + return 0; + + case ATH6KL_TM_CMD_RX_REPORT: + default: + return -EOPNOTSUPP; + } +} diff --git a/drivers/net/wireless/ath/ath6kl/testmode.h b/drivers/net/wireless/ath/ath6kl/testmode.h new file mode 100644 index 000000000..9fbcdec3e --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/testmode.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2010-2011 Atheros Communications Inc. + * Copyright (c) 2011 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "core.h" + +#ifdef CONFIG_NL80211_TESTMODE + +void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len); +int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, + void *data, int len); + +#else + +static inline void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, + size_t buf_len) +{ +} + +static inline int ath6kl_tm_cmd(struct wiphy *wiphy, + struct wireless_dev *wdev, + void *data, int len) +{ + return 0; +} + +#endif diff --git a/drivers/net/wireless/ath/ath6kl/trace.c b/drivers/net/wireless/ath/ath6kl/trace.c new file mode 100644 index 000000000..e7d64b128 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/trace.c @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> + +#define CREATE_TRACE_POINTS +#include "trace.h" + +EXPORT_TRACEPOINT_SYMBOL(ath6kl_sdio); +EXPORT_TRACEPOINT_SYMBOL(ath6kl_sdio_scat); diff --git a/drivers/net/wireless/ath/ath6kl/trace.h b/drivers/net/wireless/ath/ath6kl/trace.h new file mode 100644 index 000000000..231a94769 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/trace.h @@ -0,0 +1,327 @@ +/* SPDX-License-Identifier: ISC */ +#if !defined(_ATH6KL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) + +#include <net/cfg80211.h> +#include <linux/skbuff.h> +#include <linux/tracepoint.h> +#include "wmi.h" +#include "hif.h" + +#if !defined(_ATH6KL_TRACE_H) +static inline unsigned int ath6kl_get_wmi_id(void *buf, size_t buf_len) +{ + struct wmi_cmd_hdr *hdr = buf; + + if (buf_len < sizeof(*hdr)) + return 0; + + return le16_to_cpu(hdr->cmd_id); +} +#endif /* __ATH6KL_TRACE_H */ + +#define _ATH6KL_TRACE_H + +/* create empty functions when tracing is disabled */ +#if !defined(CONFIG_ATH6KL_TRACING) +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(...) +#undef DEFINE_EVENT +#define DEFINE_EVENT(evt_class, name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#endif /* !CONFIG_ATH6KL_TRACING || __CHECKER__ */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ath6kl + +TRACE_EVENT(ath6kl_wmi_cmd, + TP_PROTO(void *buf, size_t buf_len), + + TP_ARGS(buf, buf_len), + + TP_STRUCT__entry( + __field(unsigned int, id) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __entry->id = ath6kl_get_wmi_id(buf, buf_len); + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "id %d len %zd", + __entry->id, __entry->buf_len + ) +); + +TRACE_EVENT(ath6kl_wmi_event, + TP_PROTO(void *buf, size_t buf_len), + + TP_ARGS(buf, buf_len), + + TP_STRUCT__entry( + __field(unsigned int, id) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __entry->id = ath6kl_get_wmi_id(buf, buf_len); + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "id %d len %zd", + __entry->id, __entry->buf_len + ) +); + +TRACE_EVENT(ath6kl_sdio, + TP_PROTO(unsigned int addr, int flags, + void *buf, size_t buf_len), + + TP_ARGS(addr, flags, buf, buf_len), + + TP_STRUCT__entry( + __field(unsigned int, tx) + __field(unsigned int, addr) + __field(int, flags) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __entry->addr = addr; + __entry->flags = flags; + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + + if (flags & HIF_WRITE) + __entry->tx = 1; + else + __entry->tx = 0; + ), + + TP_printk( + "%s addr 0x%x flags 0x%x len %zd\n", + __entry->tx ? "tx" : "rx", + __entry->addr, + __entry->flags, + __entry->buf_len + ) +); + +TRACE_EVENT(ath6kl_sdio_scat, + TP_PROTO(unsigned int addr, int flags, unsigned int total_len, + unsigned int entries, struct hif_scatter_item *list), + + TP_ARGS(addr, flags, total_len, entries, list), + + TP_STRUCT__entry( + __field(unsigned int, tx) + __field(unsigned int, addr) + __field(int, flags) + __field(unsigned int, entries) + __field(size_t, total_len) + __dynamic_array(unsigned int, len_array, entries) + __dynamic_array(u8, data, total_len) + ), + + TP_fast_assign( + unsigned int *len_array; + int i, offset = 0; + size_t len; + + __entry->addr = addr; + __entry->flags = flags; + __entry->entries = entries; + __entry->total_len = total_len; + + if (flags & HIF_WRITE) + __entry->tx = 1; + else + __entry->tx = 0; + + len_array = __get_dynamic_array(len_array); + + for (i = 0; i < entries; i++) { + len = list[i].len; + + memcpy((u8 *) __get_dynamic_array(data) + offset, + list[i].buf, len); + + len_array[i] = len; + offset += len; + } + ), + + TP_printk( + "%s addr 0x%x flags 0x%x entries %d total_len %zd\n", + __entry->tx ? "tx" : "rx", + __entry->addr, + __entry->flags, + __entry->entries, + __entry->total_len + ) +); + +TRACE_EVENT(ath6kl_sdio_irq, + TP_PROTO(void *buf, size_t buf_len), + + TP_ARGS(buf, buf_len), + + TP_STRUCT__entry( + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "irq len %zd\n", __entry->buf_len + ) +); + +TRACE_EVENT(ath6kl_htc_rx, + TP_PROTO(int status, int endpoint, void *buf, + size_t buf_len), + + TP_ARGS(status, endpoint, buf, buf_len), + + TP_STRUCT__entry( + __field(int, status) + __field(int, endpoint) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __entry->status = status; + __entry->endpoint = endpoint; + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "status %d endpoint %d len %zd\n", + __entry->status, + __entry->endpoint, + __entry->buf_len + ) +); + +TRACE_EVENT(ath6kl_htc_tx, + TP_PROTO(int status, int endpoint, void *buf, + size_t buf_len), + + TP_ARGS(status, endpoint, buf, buf_len), + + TP_STRUCT__entry( + __field(int, status) + __field(int, endpoint) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __entry->status = status; + __entry->endpoint = endpoint; + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "status %d endpoint %d len %zd\n", + __entry->status, + __entry->endpoint, + __entry->buf_len + ) +); + +#define ATH6KL_MSG_MAX 200 + +DECLARE_EVENT_CLASS(ath6kl_log_event, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf), + TP_STRUCT__entry( + __vstring(msg, vaf->fmt, vaf->va) + ), + TP_fast_assign( + __assign_vstr(msg, vaf->fmt, vaf->va); + ), + TP_printk("%s", __get_str(msg)) +); + +DEFINE_EVENT(ath6kl_log_event, ath6kl_log_err, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(ath6kl_log_event, ath6kl_log_warn, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(ath6kl_log_event, ath6kl_log_info, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +TRACE_EVENT(ath6kl_log_dbg, + TP_PROTO(unsigned int level, struct va_format *vaf), + TP_ARGS(level, vaf), + TP_STRUCT__entry( + __field(unsigned int, level) + __vstring(msg, vaf->fmt, vaf->va) + ), + TP_fast_assign( + __entry->level = level; + __assign_vstr(msg, vaf->fmt, vaf->va); + ), + TP_printk("%s", __get_str(msg)) +); + +TRACE_EVENT(ath6kl_log_dbg_dump, + TP_PROTO(const char *msg, const char *prefix, + const void *buf, size_t buf_len), + + TP_ARGS(msg, prefix, buf, buf_len), + + TP_STRUCT__entry( + __string(msg, msg) + __string(prefix, prefix) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __assign_str(msg, msg); + __assign_str(prefix, prefix); + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "%s/%s\n", __get_str(prefix), __get_str(msg) + ) +); + +#endif /* _ ATH6KL_TRACE_H || TRACE_HEADER_MULTI_READ*/ + +/* we don't want to use include/trace/events */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c new file mode 100644 index 000000000..a56fab623 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/txrx.c @@ -0,0 +1,1869 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "core.h" +#include "debug.h" +#include "htc-ops.h" +#include "trace.h" + +/* + * tid - tid_mux0..tid_mux3 + * aid - tid_mux4..tid_mux7 + */ +#define ATH6KL_TID_MASK 0xf +#define ATH6KL_AID_SHIFT 4 + +static inline u8 ath6kl_get_tid(u8 tid_mux) +{ + return tid_mux & ATH6KL_TID_MASK; +} + +static inline u8 ath6kl_get_aid(u8 tid_mux) +{ + return tid_mux >> ATH6KL_AID_SHIFT; +} + +static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev, + u32 *map_no) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct ethhdr *eth_hdr; + u32 i, ep_map = -1; + u8 *datap; + + *map_no = 0; + datap = skb->data; + eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr)); + + if (is_multicast_ether_addr(eth_hdr->h_dest)) + return ENDPOINT_2; + + for (i = 0; i < ar->node_num; i++) { + if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr, + ETH_ALEN) == 0) { + *map_no = i + 1; + ar->node_map[i].tx_pend++; + return ar->node_map[i].ep_id; + } + + if ((ep_map == -1) && !ar->node_map[i].tx_pend) + ep_map = i; + } + + if (ep_map == -1) { + ep_map = ar->node_num; + ar->node_num++; + if (ar->node_num > MAX_NODE_NUM) + return ENDPOINT_UNUSED; + } + + memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN); + + for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) { + if (!ar->tx_pending[i]) { + ar->node_map[ep_map].ep_id = i; + break; + } + + /* + * No free endpoint is available, start redistribution on + * the inuse endpoints. + */ + if (i == ENDPOINT_5) { + ar->node_map[ep_map].ep_id = ar->next_ep_id; + ar->next_ep_id++; + if (ar->next_ep_id > ENDPOINT_5) + ar->next_ep_id = ENDPOINT_2; + } + } + + *map_no = ep_map + 1; + ar->node_map[ep_map].tx_pend++; + + return ar->node_map[ep_map].ep_id; +} + +static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn, + struct ath6kl_vif *vif, + struct sk_buff *skb, + u32 *flags) +{ + struct ath6kl *ar = vif->ar; + bool is_apsdq_empty = false; + struct ethhdr *datap = (struct ethhdr *) skb->data; + u8 up = 0, traffic_class, *ip_hdr; + u16 ether_type; + struct ath6kl_llc_snap_hdr *llc_hdr; + + if (conn->sta_flags & STA_PS_APSD_TRIGGER) { + /* + * This tx is because of a uAPSD trigger, determine + * more and EOSP bit. Set EOSP if queue is empty + * or sufficient frames are delivered for this trigger. + */ + spin_lock_bh(&conn->psq_lock); + if (!skb_queue_empty(&conn->apsdq)) + *flags |= WMI_DATA_HDR_FLAGS_MORE; + else if (conn->sta_flags & STA_PS_APSD_EOSP) + *flags |= WMI_DATA_HDR_FLAGS_EOSP; + *flags |= WMI_DATA_HDR_FLAGS_UAPSD; + spin_unlock_bh(&conn->psq_lock); + return false; + } else if (!conn->apsd_info) { + return false; + } + + if (test_bit(WMM_ENABLED, &vif->flags)) { + ether_type = be16_to_cpu(datap->h_proto); + if (is_ethertype(ether_type)) { + /* packet is in DIX format */ + ip_hdr = (u8 *)(datap + 1); + } else { + /* packet is in 802.3 format */ + llc_hdr = (struct ath6kl_llc_snap_hdr *) + (datap + 1); + ether_type = be16_to_cpu(llc_hdr->eth_type); + ip_hdr = (u8 *)(llc_hdr + 1); + } + + if (ether_type == IP_ETHERTYPE) + up = ath6kl_wmi_determine_user_priority( + ip_hdr, 0); + } + + traffic_class = ath6kl_wmi_get_traffic_class(up); + + if ((conn->apsd_info & (1 << traffic_class)) == 0) + return false; + + /* Queue the frames if the STA is sleeping */ + spin_lock_bh(&conn->psq_lock); + is_apsdq_empty = skb_queue_empty(&conn->apsdq); + skb_queue_tail(&conn->apsdq, skb); + spin_unlock_bh(&conn->psq_lock); + + /* + * If this is the first pkt getting queued + * for this STA, update the PVB for this STA + */ + if (is_apsdq_empty) { + ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi, + vif->fw_vif_idx, + conn->aid, 1, 0); + } + *flags |= WMI_DATA_HDR_FLAGS_UAPSD; + + return true; +} + +static bool ath6kl_process_psq(struct ath6kl_sta *conn, + struct ath6kl_vif *vif, + struct sk_buff *skb, + u32 *flags) +{ + bool is_psq_empty = false; + struct ath6kl *ar = vif->ar; + + if (conn->sta_flags & STA_PS_POLLED) { + spin_lock_bh(&conn->psq_lock); + if (!skb_queue_empty(&conn->psq)) + *flags |= WMI_DATA_HDR_FLAGS_MORE; + spin_unlock_bh(&conn->psq_lock); + return false; + } + + /* Queue the frames if the STA is sleeping */ + spin_lock_bh(&conn->psq_lock); + is_psq_empty = skb_queue_empty(&conn->psq); + skb_queue_tail(&conn->psq, skb); + spin_unlock_bh(&conn->psq_lock); + + /* + * If this is the first pkt getting queued + * for this STA, update the PVB for this + * STA. + */ + if (is_psq_empty) + ath6kl_wmi_set_pvb_cmd(ar->wmi, + vif->fw_vif_idx, + conn->aid, 1); + return true; +} + +static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb, + u32 *flags) +{ + struct ethhdr *datap = (struct ethhdr *) skb->data; + struct ath6kl_sta *conn = NULL; + bool ps_queued = false; + struct ath6kl *ar = vif->ar; + + if (is_multicast_ether_addr(datap->h_dest)) { + u8 ctr = 0; + bool q_mcast = false; + + for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { + if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) { + q_mcast = true; + break; + } + } + + if (q_mcast) { + /* + * If this transmit is not because of a Dtim Expiry + * q it. + */ + if (!test_bit(DTIM_EXPIRED, &vif->flags)) { + bool is_mcastq_empty = false; + + spin_lock_bh(&ar->mcastpsq_lock); + is_mcastq_empty = + skb_queue_empty(&ar->mcastpsq); + skb_queue_tail(&ar->mcastpsq, skb); + spin_unlock_bh(&ar->mcastpsq_lock); + + /* + * If this is the first Mcast pkt getting + * queued indicate to the target to set the + * BitmapControl LSB of the TIM IE. + */ + if (is_mcastq_empty) + ath6kl_wmi_set_pvb_cmd(ar->wmi, + vif->fw_vif_idx, + MCAST_AID, 1); + + ps_queued = true; + } else { + /* + * This transmit is because of Dtim expiry. + * Determine if MoreData bit has to be set. + */ + spin_lock_bh(&ar->mcastpsq_lock); + if (!skb_queue_empty(&ar->mcastpsq)) + *flags |= WMI_DATA_HDR_FLAGS_MORE; + spin_unlock_bh(&ar->mcastpsq_lock); + } + } + } else { + conn = ath6kl_find_sta(vif, datap->h_dest); + if (!conn) { + dev_kfree_skb(skb); + + /* Inform the caller that the skb is consumed */ + return true; + } + + if (conn->sta_flags & STA_PS_SLEEP) { + ps_queued = ath6kl_process_uapsdq(conn, + vif, skb, flags); + if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD)) + ps_queued = ath6kl_process_psq(conn, + vif, skb, flags); + } + } + return ps_queued; +} + +/* Tx functions */ + +int ath6kl_control_tx(void *devt, struct sk_buff *skb, + enum htc_endpoint_id eid) +{ + struct ath6kl *ar = devt; + int status = 0; + struct ath6kl_cookie *cookie = NULL; + + trace_ath6kl_wmi_cmd(skb->data, skb->len); + + if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) { + dev_kfree_skb(skb); + return -EACCES; + } + + if (WARN_ON_ONCE(eid == ENDPOINT_UNUSED || + eid >= ENDPOINT_MAX)) { + status = -EINVAL; + goto fail_ctrl_tx; + } + + spin_lock_bh(&ar->lock); + + ath6kl_dbg(ATH6KL_DBG_WLAN_TX, + "%s: skb=0x%p, len=0x%x eid =%d\n", __func__, + skb, skb->len, eid); + + if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) { + /* + * Control endpoint is full, don't allocate resources, we + * are just going to drop this packet. + */ + cookie = NULL; + ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n", + skb, skb->len); + } else { + cookie = ath6kl_alloc_cookie(ar); + } + + if (cookie == NULL) { + spin_unlock_bh(&ar->lock); + status = -ENOMEM; + goto fail_ctrl_tx; + } + + ar->tx_pending[eid]++; + + if (eid != ar->ctrl_ep) + ar->total_tx_data_pend++; + + spin_unlock_bh(&ar->lock); + + cookie->skb = skb; + cookie->map_no = 0; + set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, + eid, ATH6KL_CONTROL_PKT_TAG); + cookie->htc_pkt.skb = skb; + + /* + * This interface is asynchronous, if there is an error, cleanup + * will happen in the TX completion callback. + */ + ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt); + + return 0; + +fail_ctrl_tx: + dev_kfree_skb(skb); + return status; +} + +netdev_tx_t ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_cookie *cookie = NULL; + enum htc_endpoint_id eid = ENDPOINT_UNUSED; + struct ath6kl_vif *vif = netdev_priv(dev); + u32 map_no = 0; + u16 htc_tag = ATH6KL_DATA_PKT_TAG; + u8 ac = 99; /* initialize to unmapped ac */ + bool chk_adhoc_ps_mapping = false; + int ret; + struct wmi_tx_meta_v2 meta_v2; + void *meta; + u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed; + u8 meta_ver = 0; + u32 flags = 0; + + ath6kl_dbg(ATH6KL_DBG_WLAN_TX, + "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__, + skb, skb->data, skb->len); + + /* If target is not associated */ + if (!test_bit(CONNECTED, &vif->flags)) + goto fail_tx; + + if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON)) + goto fail_tx; + + if (!test_bit(WMI_READY, &ar->flag)) + goto fail_tx; + + /* AP mode Power saving processing */ + if (vif->nw_type == AP_NETWORK) { + if (ath6kl_powersave_ap(vif, skb, &flags)) + return 0; + } + + if (test_bit(WMI_ENABLED, &ar->flag)) { + if ((dev->features & NETIF_F_IP_CSUM) && + (csum == CHECKSUM_PARTIAL)) { + csum_start = skb->csum_start - + (skb_network_header(skb) - skb->head) + + sizeof(struct ath6kl_llc_snap_hdr); + csum_dest = skb->csum_offset + csum_start; + } + + if (skb_cow_head(skb, dev->needed_headroom)) { + dev->stats.tx_dropped++; + kfree_skb(skb); + return 0; + } + + if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) { + ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n"); + goto fail_tx; + } + + if ((dev->features & NETIF_F_IP_CSUM) && + (csum == CHECKSUM_PARTIAL)) { + meta_v2.csum_start = csum_start; + meta_v2.csum_dest = csum_dest; + + /* instruct target to calculate checksum */ + meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD; + meta_ver = WMI_META_VERSION_2; + meta = &meta_v2; + } else { + meta_ver = 0; + meta = NULL; + } + + ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb, + DATA_MSGTYPE, flags, 0, + meta_ver, + meta, vif->fw_vif_idx); + + if (ret) { + ath6kl_warn("failed to add wmi data header:%d\n" + , ret); + goto fail_tx; + } + + if ((vif->nw_type == ADHOC_NETWORK) && + ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags)) + chk_adhoc_ps_mapping = true; + else { + /* get the stream mapping */ + ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, + vif->fw_vif_idx, skb, + 0, test_bit(WMM_ENABLED, &vif->flags), &ac); + if (ret) + goto fail_tx; + } + } else { + goto fail_tx; + } + + spin_lock_bh(&ar->lock); + + if (chk_adhoc_ps_mapping) + eid = ath6kl_ibss_map_epid(skb, dev, &map_no); + else + eid = ar->ac2ep_map[ac]; + + if (eid == 0 || eid == ENDPOINT_UNUSED) { + ath6kl_err("eid %d is not mapped!\n", eid); + spin_unlock_bh(&ar->lock); + goto fail_tx; + } + + /* allocate resource for this packet */ + cookie = ath6kl_alloc_cookie(ar); + + if (!cookie) { + spin_unlock_bh(&ar->lock); + goto fail_tx; + } + + /* update counts while the lock is held */ + ar->tx_pending[eid]++; + ar->total_tx_data_pend++; + + spin_unlock_bh(&ar->lock); + + if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) && + skb_cloned(skb)) { + /* + * We will touch (move the buffer data to align it. Since the + * skb buffer is cloned and not only the header is changed, we + * have to copy it to allow the changes. Since we are copying + * the data here, we may as well align it by reserving suitable + * headroom to avoid the memmove in ath6kl_htc_tx_buf_align(). + */ + struct sk_buff *nskb; + + nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC); + if (nskb == NULL) + goto fail_tx; + kfree_skb(skb); + skb = nskb; + } + + cookie->skb = skb; + cookie->map_no = map_no; + set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, + eid, htc_tag); + cookie->htc_pkt.skb = skb; + + ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ", + skb->data, skb->len); + + /* + * HTC interface is asynchronous, if this fails, cleanup will + * happen in the ath6kl_tx_complete callback. + */ + ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt); + + return 0; + +fail_tx: + dev_kfree_skb(skb); + + dev->stats.tx_dropped++; + dev->stats.tx_aborted_errors++; + + return 0; +} + +/* indicate tx activity or inactivity on a WMI stream */ +void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active) +{ + struct ath6kl *ar = devt; + enum htc_endpoint_id eid; + int i; + + eid = ar->ac2ep_map[traffic_class]; + + if (!test_bit(WMI_ENABLED, &ar->flag)) + goto notify_htc; + + spin_lock_bh(&ar->lock); + + ar->ac_stream_active[traffic_class] = active; + + if (active) { + /* + * Keep track of the active stream with the highest + * priority. + */ + if (ar->ac_stream_pri_map[traffic_class] > + ar->hiac_stream_active_pri) + /* set the new highest active priority */ + ar->hiac_stream_active_pri = + ar->ac_stream_pri_map[traffic_class]; + + } else { + /* + * We may have to search for the next active stream + * that is the highest priority. + */ + if (ar->hiac_stream_active_pri == + ar->ac_stream_pri_map[traffic_class]) { + /* + * The highest priority stream just went inactive + * reset and search for the "next" highest "active" + * priority stream. + */ + ar->hiac_stream_active_pri = 0; + + for (i = 0; i < WMM_NUM_AC; i++) { + if (ar->ac_stream_active[i] && + (ar->ac_stream_pri_map[i] > + ar->hiac_stream_active_pri)) + /* + * Set the new highest active + * priority. + */ + ar->hiac_stream_active_pri = + ar->ac_stream_pri_map[i]; + } + } + } + + spin_unlock_bh(&ar->lock); + +notify_htc: + /* notify HTC, this may cause credit distribution changes */ + ath6kl_htc_activity_changed(ar->htc_target, eid, active); +} + +enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, + struct htc_packet *packet) +{ + struct ath6kl *ar = target->dev->ar; + struct ath6kl_vif *vif; + enum htc_endpoint_id endpoint = packet->endpoint; + enum htc_send_full_action action = HTC_SEND_FULL_KEEP; + + if (endpoint == ar->ctrl_ep) { + /* + * Under normal WMI if this is getting full, then something + * is running rampant the host should not be exhausting the + * WMI queue with too many commands the only exception to + * this is during testing using endpointping. + */ + set_bit(WMI_CTRL_EP_FULL, &ar->flag); + ath6kl_err("wmi ctrl ep is full\n"); + ath6kl_recovery_err_notify(ar, ATH6KL_FW_EP_FULL); + return action; + } + + if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG) + return action; + + /* + * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for + * the highest active stream. + */ + if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] < + ar->hiac_stream_active_pri && + ar->cookie_count <= + target->endpoint[endpoint].tx_drop_packet_threshold) + /* + * Give preference to the highest priority stream by + * dropping the packets which overflowed. + */ + action = HTC_SEND_FULL_DROP; + + /* FIXME: Locking */ + spin_lock_bh(&ar->list_lock); + list_for_each_entry(vif, &ar->vif_list, list) { + if (vif->nw_type == ADHOC_NETWORK || + action != HTC_SEND_FULL_DROP) { + spin_unlock_bh(&ar->list_lock); + + set_bit(NETQ_STOPPED, &vif->flags); + netif_stop_queue(vif->ndev); + + return action; + } + } + spin_unlock_bh(&ar->list_lock); + + return action; +} + +/* TODO this needs to be looked at */ +static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif, + enum htc_endpoint_id eid, u32 map_no) +{ + struct ath6kl *ar = vif->ar; + u32 i; + + if (vif->nw_type != ADHOC_NETWORK) + return; + + if (!ar->ibss_ps_enable) + return; + + if (eid == ar->ctrl_ep) + return; + + if (map_no == 0) + return; + + map_no--; + ar->node_map[map_no].tx_pend--; + + if (ar->node_map[map_no].tx_pend) + return; + + if (map_no != (ar->node_num - 1)) + return; + + for (i = ar->node_num; i > 0; i--) { + if (ar->node_map[i - 1].tx_pend) + break; + + memset(&ar->node_map[i - 1], 0, + sizeof(struct ath6kl_node_mapping)); + ar->node_num--; + } +} + +void ath6kl_tx_complete(struct htc_target *target, + struct list_head *packet_queue) +{ + struct ath6kl *ar = target->dev->ar; + struct sk_buff_head skb_queue; + struct htc_packet *packet; + struct sk_buff *skb; + struct ath6kl_cookie *ath6kl_cookie; + u32 map_no = 0; + int status; + enum htc_endpoint_id eid; + bool wake_event = false; + bool flushing[ATH6KL_VIF_MAX] = {false}; + u8 if_idx; + struct ath6kl_vif *vif; + + skb_queue_head_init(&skb_queue); + + /* lock the driver as we update internal state */ + spin_lock_bh(&ar->lock); + + /* reap completed packets */ + while (!list_empty(packet_queue)) { + packet = list_first_entry(packet_queue, struct htc_packet, + list); + list_del(&packet->list); + + if (WARN_ON_ONCE(packet->endpoint == ENDPOINT_UNUSED || + packet->endpoint >= ENDPOINT_MAX)) + continue; + + ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt; + if (WARN_ON_ONCE(!ath6kl_cookie)) + continue; + + status = packet->status; + skb = ath6kl_cookie->skb; + eid = packet->endpoint; + map_no = ath6kl_cookie->map_no; + + if (WARN_ON_ONCE(!skb || !skb->data)) { + dev_kfree_skb(skb); + ath6kl_free_cookie(ar, ath6kl_cookie); + continue; + } + + __skb_queue_tail(&skb_queue, skb); + + if (WARN_ON_ONCE(!status && (packet->act_len != skb->len))) { + ath6kl_free_cookie(ar, ath6kl_cookie); + continue; + } + + ar->tx_pending[eid]--; + + if (eid != ar->ctrl_ep) + ar->total_tx_data_pend--; + + if (eid == ar->ctrl_ep) { + if (test_bit(WMI_CTRL_EP_FULL, &ar->flag)) + clear_bit(WMI_CTRL_EP_FULL, &ar->flag); + + if (ar->tx_pending[eid] == 0) + wake_event = true; + } + + if (eid == ar->ctrl_ep) { + if_idx = wmi_cmd_hdr_get_if_idx( + (struct wmi_cmd_hdr *) packet->buf); + } else { + if_idx = wmi_data_hdr_get_if_idx( + (struct wmi_data_hdr *) packet->buf); + } + + vif = ath6kl_get_vif_by_index(ar, if_idx); + if (!vif) { + ath6kl_free_cookie(ar, ath6kl_cookie); + continue; + } + + if (status) { + if (status == -ECANCELED) + /* a packet was flushed */ + flushing[if_idx] = true; + + vif->ndev->stats.tx_errors++; + + if (status != -ENOSPC && status != -ECANCELED) + ath6kl_warn("tx complete error: %d\n", status); + + ath6kl_dbg(ATH6KL_DBG_WLAN_TX, + "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", + __func__, skb, packet->buf, packet->act_len, + eid, "error!"); + } else { + ath6kl_dbg(ATH6KL_DBG_WLAN_TX, + "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", + __func__, skb, packet->buf, packet->act_len, + eid, "OK"); + + flushing[if_idx] = false; + vif->ndev->stats.tx_packets++; + vif->ndev->stats.tx_bytes += skb->len; + } + + ath6kl_tx_clear_node_map(vif, eid, map_no); + + ath6kl_free_cookie(ar, ath6kl_cookie); + + if (test_bit(NETQ_STOPPED, &vif->flags)) + clear_bit(NETQ_STOPPED, &vif->flags); + } + + spin_unlock_bh(&ar->lock); + + __skb_queue_purge(&skb_queue); + + /* FIXME: Locking */ + spin_lock_bh(&ar->list_lock); + list_for_each_entry(vif, &ar->vif_list, list) { + if (test_bit(CONNECTED, &vif->flags) && + !flushing[vif->fw_vif_idx]) { + spin_unlock_bh(&ar->list_lock); + netif_wake_queue(vif->ndev); + spin_lock_bh(&ar->list_lock); + } + } + spin_unlock_bh(&ar->list_lock); + + if (wake_event) + wake_up(&ar->event_wq); + + return; +} + +void ath6kl_tx_data_cleanup(struct ath6kl *ar) +{ + int i; + + /* flush all the data (non-control) streams */ + for (i = 0; i < WMM_NUM_AC; i++) + ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i], + ATH6KL_DATA_PKT_TAG); +} + +/* Rx functions */ + +static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev, + struct sk_buff *skb) +{ + if (!skb) + return; + + skb->dev = dev; + + if (!(skb->dev->flags & IFF_UP)) { + dev_kfree_skb(skb); + return; + } + + skb->protocol = eth_type_trans(skb, skb->dev); + + netif_rx(skb); +} + +static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num) +{ + struct sk_buff *skb; + + while (num) { + skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE); + if (!skb) { + ath6kl_err("netbuf allocation failed\n"); + return; + } + skb_queue_tail(q, skb); + num--; + } +} + +static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr) +{ + struct sk_buff *skb = NULL; + + if (skb_queue_len(&p_aggr->rx_amsdu_freeq) < + (AGGR_NUM_OF_FREE_NETBUFS >> 2)) + ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, + AGGR_NUM_OF_FREE_NETBUFS); + + skb = skb_dequeue(&p_aggr->rx_amsdu_freeq); + + return skb; +} + +void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint) +{ + struct ath6kl *ar = target->dev->ar; + struct sk_buff *skb; + int rx_buf; + int n_buf_refill; + struct htc_packet *packet; + struct list_head queue; + + n_buf_refill = ATH6KL_MAX_RX_BUFFERS - + ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint); + + if (n_buf_refill <= 0) + return; + + INIT_LIST_HEAD(&queue); + + ath6kl_dbg(ATH6KL_DBG_WLAN_RX, + "%s: providing htc with %d buffers at eid=%d\n", + __func__, n_buf_refill, endpoint); + + for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) { + skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE); + if (!skb) + break; + + packet = (struct htc_packet *) skb->head; + if (!IS_ALIGNED((unsigned long) skb->data, 4)) { + size_t len = skb_headlen(skb); + skb->data = PTR_ALIGN(skb->data - 4, 4); + skb_set_tail_pointer(skb, len); + } + set_htc_rxpkt_info(packet, skb, skb->data, + ATH6KL_BUFFER_SIZE, endpoint); + packet->skb = skb; + list_add_tail(&packet->list, &queue); + } + + if (!list_empty(&queue)) + ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue); +} + +void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count) +{ + struct htc_packet *packet; + struct sk_buff *skb; + + while (count) { + skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE); + if (!skb) + return; + + packet = (struct htc_packet *) skb->head; + if (!IS_ALIGNED((unsigned long) skb->data, 4)) { + size_t len = skb_headlen(skb); + skb->data = PTR_ALIGN(skb->data - 4, 4); + skb_set_tail_pointer(skb, len); + } + set_htc_rxpkt_info(packet, skb, skb->data, + ATH6KL_AMSDU_BUFFER_SIZE, 0); + packet->skb = skb; + + spin_lock_bh(&ar->lock); + list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue); + spin_unlock_bh(&ar->lock); + count--; + } +} + +/* + * Callback to allocate a receive buffer for a pending packet. We use a + * pre-allocated list of buffers of maximum AMSDU size (4K). + */ +struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target, + enum htc_endpoint_id endpoint, + int len) +{ + struct ath6kl *ar = target->dev->ar; + struct htc_packet *packet = NULL; + struct list_head *pkt_pos; + int refill_cnt = 0, depth = 0; + + ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n", + __func__, endpoint, len); + + if ((len <= ATH6KL_BUFFER_SIZE) || + (len > ATH6KL_AMSDU_BUFFER_SIZE)) + return NULL; + + spin_lock_bh(&ar->lock); + + if (list_empty(&ar->amsdu_rx_buffer_queue)) { + spin_unlock_bh(&ar->lock); + refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS; + goto refill_buf; + } + + packet = list_first_entry(&ar->amsdu_rx_buffer_queue, + struct htc_packet, list); + list_del(&packet->list); + list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue) + depth++; + + refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth; + spin_unlock_bh(&ar->lock); + + /* set actual endpoint ID */ + packet->endpoint = endpoint; + +refill_buf: + if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD) + ath6kl_refill_amsdu_rxbufs(ar, refill_cnt); + + return packet; +} + +static void aggr_slice_amsdu(struct aggr_info *p_aggr, + struct rxtid *rxtid, struct sk_buff *skb) +{ + struct sk_buff *new_skb; + struct ethhdr *hdr; + u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len; + u8 *framep; + + mac_hdr_len = sizeof(struct ethhdr); + framep = skb->data + mac_hdr_len; + amsdu_len = skb->len - mac_hdr_len; + + while (amsdu_len > mac_hdr_len) { + hdr = (struct ethhdr *) framep; + payload_8023_len = be16_to_cpu(hdr->h_proto); + + if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN || + payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) { + ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n", + payload_8023_len); + break; + } + + frame_8023_len = payload_8023_len + mac_hdr_len; + new_skb = aggr_get_free_skb(p_aggr); + if (!new_skb) { + ath6kl_err("no buffer available\n"); + break; + } + + memcpy(new_skb->data, framep, frame_8023_len); + skb_put(new_skb, frame_8023_len); + if (ath6kl_wmi_dot3_2_dix(new_skb)) { + ath6kl_err("dot3_2_dix error\n"); + dev_kfree_skb(new_skb); + break; + } + + skb_queue_tail(&rxtid->q, new_skb); + + /* Is this the last subframe within this aggregate ? */ + if ((amsdu_len - frame_8023_len) == 0) + break; + + /* Add the length of A-MSDU subframe padding bytes - + * Round to nearest word. + */ + frame_8023_len = ALIGN(frame_8023_len, 4); + + framep += frame_8023_len; + amsdu_len -= frame_8023_len; + } + + dev_kfree_skb(skb); +} + +static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid, + u16 seq_no, u8 order) +{ + struct sk_buff *skb; + struct rxtid *rxtid; + struct skb_hold_q *node; + u16 idx, idx_end, seq_end; + struct rxtid_stats *stats; + + rxtid = &agg_conn->rx_tid[tid]; + stats = &agg_conn->stat[tid]; + + spin_lock_bh(&rxtid->lock); + idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); + + /* + * idx_end is typically the last possible frame in the window, + * but changes to 'the' seq_no, when BAR comes. If seq_no + * is non-zero, we will go up to that and stop. + * Note: last seq no in current window will occupy the same + * index position as index that is just previous to start. + * An imp point : if win_sz is 7, for seq_no space of 4095, + * then, there would be holes when sequence wrap around occurs. + * Target should judiciously choose the win_sz, based on + * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz + * 2, 4, 8, 16 win_sz works fine). + * We must deque from "idx" to "idx_end", including both. + */ + seq_end = seq_no ? seq_no : rxtid->seq_next; + idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz); + + do { + node = &rxtid->hold_q[idx]; + if ((order == 1) && (!node->skb)) + break; + + if (node->skb) { + if (node->is_amsdu) + aggr_slice_amsdu(agg_conn->aggr_info, rxtid, + node->skb); + else + skb_queue_tail(&rxtid->q, node->skb); + node->skb = NULL; + } else { + stats->num_hole++; + } + + rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next); + idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); + } while (idx != idx_end); + + spin_unlock_bh(&rxtid->lock); + + stats->num_delivered += skb_queue_len(&rxtid->q); + + while ((skb = skb_dequeue(&rxtid->q))) + ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb); +} + +static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid, + u16 seq_no, + bool is_amsdu, struct sk_buff *frame) +{ + struct rxtid *rxtid; + struct rxtid_stats *stats; + struct sk_buff *skb; + struct skb_hold_q *node; + u16 idx, st, cur, end; + bool is_queued = false; + u16 extended_end; + + rxtid = &agg_conn->rx_tid[tid]; + stats = &agg_conn->stat[tid]; + + stats->num_into_aggr++; + + if (!rxtid->aggr) { + if (is_amsdu) { + aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame); + is_queued = true; + stats->num_amsdu++; + while ((skb = skb_dequeue(&rxtid->q))) + ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, + skb); + } + return is_queued; + } + + /* Check the incoming sequence no, if it's in the window */ + st = rxtid->seq_next; + cur = seq_no; + end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO; + + if (((st < end) && (cur < st || cur > end)) || + ((st > end) && (cur > end) && (cur < st))) { + extended_end = (end + rxtid->hold_q_sz - 1) & + ATH6KL_MAX_SEQ_NO; + + if (((end < extended_end) && + (cur < end || cur > extended_end)) || + ((end > extended_end) && (cur > extended_end) && + (cur < end))) { + aggr_deque_frms(agg_conn, tid, 0, 0); + spin_lock_bh(&rxtid->lock); + if (cur >= rxtid->hold_q_sz - 1) + rxtid->seq_next = cur - (rxtid->hold_q_sz - 1); + else + rxtid->seq_next = ATH6KL_MAX_SEQ_NO - + (rxtid->hold_q_sz - 2 - cur); + spin_unlock_bh(&rxtid->lock); + } else { + /* + * Dequeue only those frames that are outside the + * new shifted window. + */ + if (cur >= rxtid->hold_q_sz - 1) + st = cur - (rxtid->hold_q_sz - 1); + else + st = ATH6KL_MAX_SEQ_NO - + (rxtid->hold_q_sz - 2 - cur); + + aggr_deque_frms(agg_conn, tid, st, 0); + } + + stats->num_oow++; + } + + idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz); + + node = &rxtid->hold_q[idx]; + + spin_lock_bh(&rxtid->lock); + + /* + * Is the cur frame duplicate or something beyond our window(hold_q + * -> which is 2x, already)? + * + * 1. Duplicate is easy - drop incoming frame. + * 2. Not falling in current sliding window. + * 2a. is the frame_seq_no preceding current tid_seq_no? + * -> drop the frame. perhaps sender did not get our ACK. + * this is taken care of above. + * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ); + * -> Taken care of it above, by moving window forward. + */ + dev_kfree_skb(node->skb); + stats->num_dups++; + + node->skb = frame; + is_queued = true; + node->is_amsdu = is_amsdu; + node->seq_no = seq_no; + + if (node->is_amsdu) + stats->num_amsdu++; + else + stats->num_mpdu++; + + spin_unlock_bh(&rxtid->lock); + + aggr_deque_frms(agg_conn, tid, 0, 1); + + if (agg_conn->timer_scheduled) + return is_queued; + + spin_lock_bh(&rxtid->lock); + for (idx = 0; idx < rxtid->hold_q_sz; idx++) { + if (rxtid->hold_q[idx].skb) { + /* + * There is a frame in the queue and no + * timer so start a timer to ensure that + * the frame doesn't remain stuck + * forever. + */ + agg_conn->timer_scheduled = true; + mod_timer(&agg_conn->timer, + (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000)); + rxtid->timer_mon = true; + break; + } + } + spin_unlock_bh(&rxtid->lock); + + return is_queued; +} + +static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif, + struct ath6kl_sta *conn) +{ + struct ath6kl *ar = vif->ar; + bool is_apsdq_empty, is_apsdq_empty_at_start; + u32 num_frames_to_deliver, flags; + struct sk_buff *skb = NULL; + + /* + * If the APSD q for this STA is not empty, dequeue and + * send a pkt from the head of the q. Also update the + * More data bit in the WMI_DATA_HDR if there are + * more pkts for this STA in the APSD q. + * If there are no more pkts for this STA, + * update the APSD bitmap for this STA. + */ + + num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) & + ATH6KL_APSD_FRAME_MASK; + /* + * Number of frames to send in a service period is + * indicated by the station + * in the QOS_INFO of the association request + * If it is zero, send all frames + */ + if (!num_frames_to_deliver) + num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME; + + spin_lock_bh(&conn->psq_lock); + is_apsdq_empty = skb_queue_empty(&conn->apsdq); + spin_unlock_bh(&conn->psq_lock); + is_apsdq_empty_at_start = is_apsdq_empty; + + while ((!is_apsdq_empty) && (num_frames_to_deliver)) { + spin_lock_bh(&conn->psq_lock); + skb = skb_dequeue(&conn->apsdq); + is_apsdq_empty = skb_queue_empty(&conn->apsdq); + spin_unlock_bh(&conn->psq_lock); + + /* + * Set the STA flag to Trigger delivery, + * so that the frame will go out + */ + conn->sta_flags |= STA_PS_APSD_TRIGGER; + num_frames_to_deliver--; + + /* Last frame in the service period, set EOSP or queue empty */ + if ((is_apsdq_empty) || (!num_frames_to_deliver)) + conn->sta_flags |= STA_PS_APSD_EOSP; + + ath6kl_data_tx(skb, vif->ndev); + conn->sta_flags &= ~(STA_PS_APSD_TRIGGER); + conn->sta_flags &= ~(STA_PS_APSD_EOSP); + } + + if (is_apsdq_empty) { + if (is_apsdq_empty_at_start) + flags = WMI_AP_APSD_NO_DELIVERY_FRAMES; + else + flags = 0; + + ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi, + vif->fw_vif_idx, + conn->aid, 0, flags); + } + + return; +} + +void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) +{ + struct ath6kl *ar = target->dev->ar; + struct sk_buff *skb = packet->pkt_cntxt; + struct wmi_rx_meta_v2 *meta; + struct wmi_data_hdr *dhdr; + int min_hdr_len; + u8 meta_type, dot11_hdr = 0; + u8 pad_before_data_start; + int status = packet->status; + enum htc_endpoint_id ept = packet->endpoint; + bool is_amsdu, prev_ps, ps_state = false; + bool trig_state = false; + struct ath6kl_sta *conn = NULL; + struct sk_buff *skb1 = NULL; + struct ethhdr *datap = NULL; + struct ath6kl_vif *vif; + struct aggr_info_conn *aggr_conn; + u16 seq_no, offset; + u8 tid, if_idx; + + ath6kl_dbg(ATH6KL_DBG_WLAN_RX, + "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d", + __func__, ar, ept, skb, packet->buf, + packet->act_len, status); + + if (status || packet->act_len < HTC_HDR_LENGTH) { + dev_kfree_skb(skb); + return; + } + + skb_put(skb, packet->act_len + HTC_HDR_LENGTH); + skb_pull(skb, HTC_HDR_LENGTH); + + ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ", + skb->data, skb->len); + + if (ept == ar->ctrl_ep) { + if (test_bit(WMI_ENABLED, &ar->flag)) { + ath6kl_check_wow_status(ar); + ath6kl_wmi_control_rx(ar->wmi, skb); + return; + } + if_idx = + wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data); + } else { + if_idx = + wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data); + } + + vif = ath6kl_get_vif_by_index(ar, if_idx); + if (!vif) { + dev_kfree_skb(skb); + return; + } + + /* + * Take lock to protect buffer counts and adaptive power throughput + * state. + */ + spin_lock_bh(&vif->if_lock); + + vif->ndev->stats.rx_packets++; + vif->ndev->stats.rx_bytes += packet->act_len; + + spin_unlock_bh(&vif->if_lock); + + skb->dev = vif->ndev; + + if (!test_bit(WMI_ENABLED, &ar->flag)) { + if (EPPING_ALIGNMENT_PAD > 0) + skb_pull(skb, EPPING_ALIGNMENT_PAD); + ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); + return; + } + + ath6kl_check_wow_status(ar); + + min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) + + sizeof(struct ath6kl_llc_snap_hdr); + + dhdr = (struct wmi_data_hdr *) skb->data; + + /* + * In the case of AP mode we may receive NULL data frames + * that do not have LLC hdr. They are 16 bytes in size. + * Allow these frames in the AP mode. + */ + if (vif->nw_type != AP_NETWORK && + ((packet->act_len < min_hdr_len) || + (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) { + ath6kl_info("frame len is too short or too long\n"); + vif->ndev->stats.rx_errors++; + vif->ndev->stats.rx_length_errors++; + dev_kfree_skb(skb); + return; + } + + pad_before_data_start = + (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT) + & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK; + + /* Get the Power save state of the STA */ + if (vif->nw_type == AP_NETWORK) { + meta_type = wmi_data_hdr_get_meta(dhdr); + + ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) & + WMI_DATA_HDR_PS_MASK); + + offset = sizeof(struct wmi_data_hdr) + pad_before_data_start; + trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG); + + switch (meta_type) { + case 0: + break; + case WMI_META_VERSION_1: + offset += sizeof(struct wmi_rx_meta_v1); + break; + case WMI_META_VERSION_2: + offset += sizeof(struct wmi_rx_meta_v2); + break; + default: + break; + } + + datap = (struct ethhdr *) (skb->data + offset); + conn = ath6kl_find_sta(vif, datap->h_source); + + if (!conn) { + dev_kfree_skb(skb); + return; + } + + /* + * If there is a change in PS state of the STA, + * take appropriate steps: + * + * 1. If Sleep-->Awake, flush the psq for the STA + * Clear the PVB for the STA. + * 2. If Awake-->Sleep, Starting queueing frames + * the STA. + */ + prev_ps = !!(conn->sta_flags & STA_PS_SLEEP); + + if (ps_state) + conn->sta_flags |= STA_PS_SLEEP; + else + conn->sta_flags &= ~STA_PS_SLEEP; + + /* Accept trigger only when the station is in sleep */ + if ((conn->sta_flags & STA_PS_SLEEP) && trig_state) + ath6kl_uapsd_trigger_frame_rx(vif, conn); + + if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) { + if (!(conn->sta_flags & STA_PS_SLEEP)) { + struct sk_buff *skbuff = NULL; + bool is_apsdq_empty; + struct ath6kl_mgmt_buff *mgmt; + u8 idx; + + spin_lock_bh(&conn->psq_lock); + while (conn->mgmt_psq_len > 0) { + mgmt = list_first_entry( + &conn->mgmt_psq, + struct ath6kl_mgmt_buff, + list); + list_del(&mgmt->list); + conn->mgmt_psq_len--; + spin_unlock_bh(&conn->psq_lock); + idx = vif->fw_vif_idx; + + ath6kl_wmi_send_mgmt_cmd(ar->wmi, + idx, + mgmt->id, + mgmt->freq, + mgmt->wait, + mgmt->buf, + mgmt->len, + mgmt->no_cck); + + kfree(mgmt); + spin_lock_bh(&conn->psq_lock); + } + conn->mgmt_psq_len = 0; + while ((skbuff = skb_dequeue(&conn->psq))) { + spin_unlock_bh(&conn->psq_lock); + ath6kl_data_tx(skbuff, vif->ndev); + spin_lock_bh(&conn->psq_lock); + } + + is_apsdq_empty = skb_queue_empty(&conn->apsdq); + while ((skbuff = skb_dequeue(&conn->apsdq))) { + spin_unlock_bh(&conn->psq_lock); + ath6kl_data_tx(skbuff, vif->ndev); + spin_lock_bh(&conn->psq_lock); + } + spin_unlock_bh(&conn->psq_lock); + + if (!is_apsdq_empty) + ath6kl_wmi_set_apsd_bfrd_traf( + ar->wmi, + vif->fw_vif_idx, + conn->aid, 0, 0); + + /* Clear the PVB for this STA */ + ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, + conn->aid, 0); + } + } + + /* drop NULL data frames here */ + if ((packet->act_len < min_hdr_len) || + (packet->act_len > + WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) { + dev_kfree_skb(skb); + return; + } + } + + is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false; + tid = wmi_data_hdr_get_up(dhdr); + seq_no = wmi_data_hdr_get_seqno(dhdr); + meta_type = wmi_data_hdr_get_meta(dhdr); + dot11_hdr = wmi_data_hdr_get_dot11(dhdr); + + skb_pull(skb, sizeof(struct wmi_data_hdr)); + + switch (meta_type) { + case WMI_META_VERSION_1: + skb_pull(skb, sizeof(struct wmi_rx_meta_v1)); + break; + case WMI_META_VERSION_2: + meta = (struct wmi_rx_meta_v2 *) skb->data; + if (meta->csum_flags & 0x1) { + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = (__force __wsum) meta->csum; + } + skb_pull(skb, sizeof(struct wmi_rx_meta_v2)); + break; + default: + break; + } + + skb_pull(skb, pad_before_data_start); + + if (dot11_hdr) + status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb); + else if (!is_amsdu) + status = ath6kl_wmi_dot3_2_dix(skb); + + if (status) { + /* + * Drop frames that could not be processed (lack of + * memory, etc.) + */ + dev_kfree_skb(skb); + return; + } + + if (!(vif->ndev->flags & IFF_UP)) { + dev_kfree_skb(skb); + return; + } + + if (vif->nw_type == AP_NETWORK) { + datap = (struct ethhdr *) skb->data; + if (is_multicast_ether_addr(datap->h_dest)) + /* + * Bcast/Mcast frames should be sent to the + * OS stack as well as on the air. + */ + skb1 = skb_copy(skb, GFP_ATOMIC); + else { + /* + * Search for a connected STA with dstMac + * as the Mac address. If found send the + * frame to it on the air else send the + * frame up the stack. + */ + conn = ath6kl_find_sta(vif, datap->h_dest); + + if (conn && ar->intra_bss) { + skb1 = skb; + skb = NULL; + } else if (conn && !ar->intra_bss) { + dev_kfree_skb(skb); + skb = NULL; + } + } + if (skb1) + ath6kl_data_tx(skb1, vif->ndev); + + if (skb == NULL) { + /* nothing to deliver up the stack */ + return; + } + } + + datap = (struct ethhdr *) skb->data; + + if (is_unicast_ether_addr(datap->h_dest)) { + if (vif->nw_type == AP_NETWORK) { + conn = ath6kl_find_sta(vif, datap->h_source); + if (!conn) + return; + aggr_conn = conn->aggr_conn; + } else { + aggr_conn = vif->aggr_cntxt->aggr_conn; + } + + if (aggr_process_recv_frm(aggr_conn, tid, seq_no, + is_amsdu, skb)) { + /* aggregation code will handle the skb */ + return; + } + } else if (!is_broadcast_ether_addr(datap->h_dest)) { + vif->ndev->stats.multicast++; + } + + ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); +} + +static void aggr_timeout(struct timer_list *t) +{ + u8 i, j; + struct aggr_info_conn *aggr_conn = from_timer(aggr_conn, t, timer); + struct rxtid *rxtid; + struct rxtid_stats *stats; + + for (i = 0; i < NUM_OF_TIDS; i++) { + rxtid = &aggr_conn->rx_tid[i]; + stats = &aggr_conn->stat[i]; + + if (!rxtid->aggr || !rxtid->timer_mon) + continue; + + stats->num_timeouts++; + ath6kl_dbg(ATH6KL_DBG_AGGR, + "aggr timeout (st %d end %d)\n", + rxtid->seq_next, + ((rxtid->seq_next + rxtid->hold_q_sz-1) & + ATH6KL_MAX_SEQ_NO)); + aggr_deque_frms(aggr_conn, i, 0, 0); + } + + aggr_conn->timer_scheduled = false; + + for (i = 0; i < NUM_OF_TIDS; i++) { + rxtid = &aggr_conn->rx_tid[i]; + + if (rxtid->aggr && rxtid->hold_q) { + spin_lock_bh(&rxtid->lock); + for (j = 0; j < rxtid->hold_q_sz; j++) { + if (rxtid->hold_q[j].skb) { + aggr_conn->timer_scheduled = true; + rxtid->timer_mon = true; + break; + } + } + spin_unlock_bh(&rxtid->lock); + + if (j >= rxtid->hold_q_sz) + rxtid->timer_mon = false; + } + } + + if (aggr_conn->timer_scheduled) + mod_timer(&aggr_conn->timer, + jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT)); +} + +static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid) +{ + struct rxtid *rxtid; + struct rxtid_stats *stats; + + if (!aggr_conn || tid >= NUM_OF_TIDS) + return; + + rxtid = &aggr_conn->rx_tid[tid]; + stats = &aggr_conn->stat[tid]; + + if (rxtid->aggr) + aggr_deque_frms(aggr_conn, tid, 0, 0); + + rxtid->aggr = false; + rxtid->timer_mon = false; + rxtid->win_sz = 0; + rxtid->seq_next = 0; + rxtid->hold_q_sz = 0; + + kfree(rxtid->hold_q); + rxtid->hold_q = NULL; + + memset(stats, 0, sizeof(struct rxtid_stats)); +} + +void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no, + u8 win_sz) +{ + struct ath6kl_sta *sta; + struct aggr_info_conn *aggr_conn = NULL; + struct rxtid *rxtid; + u16 hold_q_size; + u8 tid, aid; + + if (vif->nw_type == AP_NETWORK) { + aid = ath6kl_get_aid(tid_mux); + sta = ath6kl_find_sta_by_aid(vif->ar, aid); + if (sta) + aggr_conn = sta->aggr_conn; + } else { + aggr_conn = vif->aggr_cntxt->aggr_conn; + } + + if (!aggr_conn) + return; + + tid = ath6kl_get_tid(tid_mux); + if (tid >= NUM_OF_TIDS) + return; + + rxtid = &aggr_conn->rx_tid[tid]; + + if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX) + ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n", + __func__, win_sz, tid); + + if (rxtid->aggr) + aggr_delete_tid_state(aggr_conn, tid); + + rxtid->seq_next = seq_no; + hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q); + rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL); + if (!rxtid->hold_q) + return; + + rxtid->win_sz = win_sz; + rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz); + if (!skb_queue_empty(&rxtid->q)) + return; + + rxtid->aggr = true; +} + +void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info, + struct aggr_info_conn *aggr_conn) +{ + struct rxtid *rxtid; + u8 i; + + aggr_conn->aggr_sz = AGGR_SZ_DEFAULT; + aggr_conn->dev = vif->ndev; + timer_setup(&aggr_conn->timer, aggr_timeout, 0); + aggr_conn->aggr_info = aggr_info; + + aggr_conn->timer_scheduled = false; + + for (i = 0; i < NUM_OF_TIDS; i++) { + rxtid = &aggr_conn->rx_tid[i]; + rxtid->aggr = false; + rxtid->timer_mon = false; + skb_queue_head_init(&rxtid->q); + spin_lock_init(&rxtid->lock); + } +} + +struct aggr_info *aggr_init(struct ath6kl_vif *vif) +{ + struct aggr_info *p_aggr = NULL; + + p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL); + if (!p_aggr) { + ath6kl_err("failed to alloc memory for aggr_node\n"); + return NULL; + } + + p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL); + if (!p_aggr->aggr_conn) { + ath6kl_err("failed to alloc memory for connection specific aggr info\n"); + kfree(p_aggr); + return NULL; + } + + aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn); + + skb_queue_head_init(&p_aggr->rx_amsdu_freeq); + ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS); + + return p_aggr; +} + +void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux) +{ + struct ath6kl_sta *sta; + struct rxtid *rxtid; + struct aggr_info_conn *aggr_conn = NULL; + u8 tid, aid; + + if (vif->nw_type == AP_NETWORK) { + aid = ath6kl_get_aid(tid_mux); + sta = ath6kl_find_sta_by_aid(vif->ar, aid); + if (sta) + aggr_conn = sta->aggr_conn; + } else { + aggr_conn = vif->aggr_cntxt->aggr_conn; + } + + if (!aggr_conn) + return; + + tid = ath6kl_get_tid(tid_mux); + if (tid >= NUM_OF_TIDS) + return; + + rxtid = &aggr_conn->rx_tid[tid]; + + if (rxtid->aggr) + aggr_delete_tid_state(aggr_conn, tid); +} + +void aggr_reset_state(struct aggr_info_conn *aggr_conn) +{ + u8 tid; + + if (!aggr_conn) + return; + + if (aggr_conn->timer_scheduled) { + del_timer(&aggr_conn->timer); + aggr_conn->timer_scheduled = false; + } + + for (tid = 0; tid < NUM_OF_TIDS; tid++) + aggr_delete_tid_state(aggr_conn, tid); +} + +/* clean up our amsdu buffer list */ +void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar) +{ + struct htc_packet *packet, *tmp_pkt; + + spin_lock_bh(&ar->lock); + if (list_empty(&ar->amsdu_rx_buffer_queue)) { + spin_unlock_bh(&ar->lock); + return; + } + + list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue, + list) { + list_del(&packet->list); + spin_unlock_bh(&ar->lock); + dev_kfree_skb(packet->pkt_cntxt); + spin_lock_bh(&ar->lock); + } + + spin_unlock_bh(&ar->lock); +} + +void aggr_module_destroy(struct aggr_info *aggr_info) +{ + if (!aggr_info) + return; + + aggr_reset_state(aggr_info->aggr_conn); + skb_queue_purge(&aggr_info->rx_amsdu_freeq); + kfree(aggr_info->aggr_conn); + kfree(aggr_info); +} diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c new file mode 100644 index 000000000..522080984 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/usb.c @@ -0,0 +1,1261 @@ +/* + * Copyright (c) 2007-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/usb.h> + +#include "debug.h" +#include "core.h" + +/* constants */ +#define TX_URB_COUNT 32 +#define RX_URB_COUNT 32 +#define ATH6KL_USB_RX_BUFFER_SIZE 4096 + +/* tx/rx pipes for usb */ +enum ATH6KL_USB_PIPE_ID { + ATH6KL_USB_PIPE_TX_CTRL = 0, + ATH6KL_USB_PIPE_TX_DATA_LP, + ATH6KL_USB_PIPE_TX_DATA_MP, + ATH6KL_USB_PIPE_TX_DATA_HP, + ATH6KL_USB_PIPE_RX_CTRL, + ATH6KL_USB_PIPE_RX_DATA, + ATH6KL_USB_PIPE_RX_DATA2, + ATH6KL_USB_PIPE_RX_INT, + ATH6KL_USB_PIPE_MAX +}; + +#define ATH6KL_USB_PIPE_INVALID ATH6KL_USB_PIPE_MAX + +struct ath6kl_usb_pipe { + struct list_head urb_list_head; + struct usb_anchor urb_submitted; + u32 urb_alloc; + u32 urb_cnt; + u32 urb_cnt_thresh; + unsigned int usb_pipe_handle; + u32 flags; + u8 ep_address; + u8 logical_pipe_num; + struct ath6kl_usb *ar_usb; + u16 max_packet_size; + struct work_struct io_complete_work; + struct sk_buff_head io_comp_queue; + struct usb_endpoint_descriptor *ep_desc; +}; + +#define ATH6KL_USB_PIPE_FLAG_TX (1 << 0) + +/* usb device object */ +struct ath6kl_usb { + /* protects pipe->urb_list_head and pipe->urb_cnt */ + spinlock_t cs_lock; + + struct usb_device *udev; + struct usb_interface *interface; + struct ath6kl_usb_pipe pipes[ATH6KL_USB_PIPE_MAX]; + u8 *diag_cmd_buffer; + u8 *diag_resp_buffer; + struct ath6kl *ar; + struct workqueue_struct *wq; +}; + +/* usb urb object */ +struct ath6kl_urb_context { + struct list_head link; + struct ath6kl_usb_pipe *pipe; + struct sk_buff *skb; + struct ath6kl *ar; +}; + +/* USB endpoint definitions */ +#define ATH6KL_USB_EP_ADDR_APP_CTRL_IN 0x81 +#define ATH6KL_USB_EP_ADDR_APP_DATA_IN 0x82 +#define ATH6KL_USB_EP_ADDR_APP_DATA2_IN 0x83 +#define ATH6KL_USB_EP_ADDR_APP_INT_IN 0x84 + +#define ATH6KL_USB_EP_ADDR_APP_CTRL_OUT 0x01 +#define ATH6KL_USB_EP_ADDR_APP_DATA_LP_OUT 0x02 +#define ATH6KL_USB_EP_ADDR_APP_DATA_MP_OUT 0x03 +#define ATH6KL_USB_EP_ADDR_APP_DATA_HP_OUT 0x04 + +/* diagnostic command defnitions */ +#define ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD 1 +#define ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP 2 +#define ATH6KL_USB_CONTROL_REQ_DIAG_CMD 3 +#define ATH6KL_USB_CONTROL_REQ_DIAG_RESP 4 + +#define ATH6KL_USB_CTRL_DIAG_CC_READ 0 +#define ATH6KL_USB_CTRL_DIAG_CC_WRITE 1 + +struct ath6kl_usb_ctrl_diag_cmd_write { + __le32 cmd; + __le32 address; + __le32 value; + __le32 _pad[1]; +} __packed; + +struct ath6kl_usb_ctrl_diag_cmd_read { + __le32 cmd; + __le32 address; +} __packed; + +struct ath6kl_usb_ctrl_diag_resp_read { + __le32 value; +} __packed; + +/* function declarations */ +static void ath6kl_usb_recv_complete(struct urb *urb); + +#define ATH6KL_USB_IS_BULK_EP(attr) (((attr) & 3) == 0x02) +#define ATH6KL_USB_IS_INT_EP(attr) (((attr) & 3) == 0x03) +#define ATH6KL_USB_IS_ISOC_EP(attr) (((attr) & 3) == 0x01) +#define ATH6KL_USB_IS_DIR_IN(addr) ((addr) & 0x80) + +/* pipe/urb operations */ +static struct ath6kl_urb_context * +ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe) +{ + struct ath6kl_urb_context *urb_context = NULL; + unsigned long flags; + + /* bail if this pipe is not initialized */ + if (!pipe->ar_usb) + return NULL; + + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); + if (!list_empty(&pipe->urb_list_head)) { + urb_context = + list_first_entry(&pipe->urb_list_head, + struct ath6kl_urb_context, link); + list_del(&urb_context->link); + pipe->urb_cnt--; + } + spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); + + return urb_context; +} + +static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe, + struct ath6kl_urb_context *urb_context) +{ + unsigned long flags; + + /* bail if this pipe is not initialized */ + if (!pipe->ar_usb) + return; + + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); + pipe->urb_cnt++; + + list_add(&urb_context->link, &pipe->urb_list_head); + spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); +} + +static void ath6kl_usb_cleanup_recv_urb(struct ath6kl_urb_context *urb_context) +{ + dev_kfree_skb(urb_context->skb); + urb_context->skb = NULL; + + ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context); +} + +static inline struct ath6kl_usb *ath6kl_usb_priv(struct ath6kl *ar) +{ + return ar->hif_priv; +} + +/* pipe resource allocation/cleanup */ +static int ath6kl_usb_alloc_pipe_resources(struct ath6kl_usb_pipe *pipe, + int urb_cnt) +{ + struct ath6kl_urb_context *urb_context; + int status = 0, i; + + INIT_LIST_HEAD(&pipe->urb_list_head); + init_usb_anchor(&pipe->urb_submitted); + + for (i = 0; i < urb_cnt; i++) { + urb_context = kzalloc(sizeof(struct ath6kl_urb_context), + GFP_KERNEL); + if (urb_context == NULL) { + status = -ENOMEM; + goto fail_alloc_pipe_resources; + } + + urb_context->pipe = pipe; + + /* + * we are only allocate the urb contexts here, the actual URB + * is allocated from the kernel as needed to do a transaction + */ + pipe->urb_alloc++; + ath6kl_usb_free_urb_to_pipe(pipe, urb_context); + } + + ath6kl_dbg(ATH6KL_DBG_USB, + "ath6kl usb: alloc resources lpipe:%d hpipe:0x%X urbs:%d\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->urb_alloc); + +fail_alloc_pipe_resources: + return status; +} + +static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe) +{ + struct ath6kl_urb_context *urb_context; + + if (pipe->ar_usb == NULL) { + /* nothing allocated for this pipe */ + return; + } + + ath6kl_dbg(ATH6KL_DBG_USB, + "ath6kl usb: free resources lpipe:%d" + "hpipe:0x%X urbs:%d avail:%d\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->urb_alloc, pipe->urb_cnt); + + if (pipe->urb_alloc != pipe->urb_cnt) { + ath6kl_dbg(ATH6KL_DBG_USB, + "ath6kl usb: urb leak! lpipe:%d" + "hpipe:0x%X urbs:%d avail:%d\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->urb_alloc, pipe->urb_cnt); + } + + while (true) { + urb_context = ath6kl_usb_alloc_urb_from_pipe(pipe); + if (urb_context == NULL) + break; + kfree(urb_context); + } +} + +static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb) +{ + int i; + + for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) + ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]); +} + +static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb, + u8 ep_address, int *urb_count) +{ + u8 pipe_num = ATH6KL_USB_PIPE_INVALID; + + switch (ep_address) { + case ATH6KL_USB_EP_ADDR_APP_CTRL_IN: + pipe_num = ATH6KL_USB_PIPE_RX_CTRL; + *urb_count = RX_URB_COUNT; + break; + case ATH6KL_USB_EP_ADDR_APP_DATA_IN: + pipe_num = ATH6KL_USB_PIPE_RX_DATA; + *urb_count = RX_URB_COUNT; + break; + case ATH6KL_USB_EP_ADDR_APP_INT_IN: + pipe_num = ATH6KL_USB_PIPE_RX_INT; + *urb_count = RX_URB_COUNT; + break; + case ATH6KL_USB_EP_ADDR_APP_DATA2_IN: + pipe_num = ATH6KL_USB_PIPE_RX_DATA2; + *urb_count = RX_URB_COUNT; + break; + case ATH6KL_USB_EP_ADDR_APP_CTRL_OUT: + pipe_num = ATH6KL_USB_PIPE_TX_CTRL; + *urb_count = TX_URB_COUNT; + break; + case ATH6KL_USB_EP_ADDR_APP_DATA_LP_OUT: + pipe_num = ATH6KL_USB_PIPE_TX_DATA_LP; + *urb_count = TX_URB_COUNT; + break; + case ATH6KL_USB_EP_ADDR_APP_DATA_MP_OUT: + pipe_num = ATH6KL_USB_PIPE_TX_DATA_MP; + *urb_count = TX_URB_COUNT; + break; + case ATH6KL_USB_EP_ADDR_APP_DATA_HP_OUT: + pipe_num = ATH6KL_USB_PIPE_TX_DATA_HP; + *urb_count = TX_URB_COUNT; + break; + default: + /* note: there may be endpoints not currently used */ + break; + } + + return pipe_num; +} + +static int ath6kl_usb_setup_pipe_resources(struct ath6kl_usb *ar_usb) +{ + struct usb_interface *interface = ar_usb->interface; + struct usb_host_interface *iface_desc = interface->cur_altsetting; + struct usb_endpoint_descriptor *endpoint; + struct ath6kl_usb_pipe *pipe; + int i, urbcount, status = 0; + u8 pipe_num; + + ath6kl_dbg(ATH6KL_DBG_USB, "setting up USB Pipes using interface\n"); + + /* walk descriptors and setup pipes */ + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; + + if (ATH6KL_USB_IS_BULK_EP(endpoint->bmAttributes)) { + ath6kl_dbg(ATH6KL_DBG_USB, + "%s Bulk Ep:0x%2.2X maxpktsz:%d\n", + ATH6KL_USB_IS_DIR_IN + (endpoint->bEndpointAddress) ? + "RX" : "TX", endpoint->bEndpointAddress, + le16_to_cpu(endpoint->wMaxPacketSize)); + } else if (ATH6KL_USB_IS_INT_EP(endpoint->bmAttributes)) { + ath6kl_dbg(ATH6KL_DBG_USB, + "%s Int Ep:0x%2.2X maxpktsz:%d interval:%d\n", + ATH6KL_USB_IS_DIR_IN + (endpoint->bEndpointAddress) ? + "RX" : "TX", endpoint->bEndpointAddress, + le16_to_cpu(endpoint->wMaxPacketSize), + endpoint->bInterval); + } else if (ATH6KL_USB_IS_ISOC_EP(endpoint->bmAttributes)) { + /* TODO for ISO */ + ath6kl_dbg(ATH6KL_DBG_USB, + "%s ISOC Ep:0x%2.2X maxpktsz:%d interval:%d\n", + ATH6KL_USB_IS_DIR_IN + (endpoint->bEndpointAddress) ? + "RX" : "TX", endpoint->bEndpointAddress, + le16_to_cpu(endpoint->wMaxPacketSize), + endpoint->bInterval); + } + + /* Ignore broken descriptors. */ + if (usb_endpoint_maxp(endpoint) == 0) + continue; + + urbcount = 0; + + pipe_num = + ath6kl_usb_get_logical_pipe_num(ar_usb, + endpoint->bEndpointAddress, + &urbcount); + if (pipe_num == ATH6KL_USB_PIPE_INVALID) + continue; + + pipe = &ar_usb->pipes[pipe_num]; + if (pipe->ar_usb != NULL) { + /* hmmm..pipe was already setup */ + continue; + } + + pipe->ar_usb = ar_usb; + pipe->logical_pipe_num = pipe_num; + pipe->ep_address = endpoint->bEndpointAddress; + pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize); + + if (ATH6KL_USB_IS_BULK_EP(endpoint->bmAttributes)) { + if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvbulkpipe(ar_usb->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndbulkpipe(ar_usb->udev, + pipe->ep_address); + } + } else if (ATH6KL_USB_IS_INT_EP(endpoint->bmAttributes)) { + if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvintpipe(ar_usb->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndintpipe(ar_usb->udev, + pipe->ep_address); + } + } else if (ATH6KL_USB_IS_ISOC_EP(endpoint->bmAttributes)) { + /* TODO for ISO */ + if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvisocpipe(ar_usb->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndisocpipe(ar_usb->udev, + pipe->ep_address); + } + } + + pipe->ep_desc = endpoint; + + if (!ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) + pipe->flags |= ATH6KL_USB_PIPE_FLAG_TX; + + status = ath6kl_usb_alloc_pipe_resources(pipe, urbcount); + if (status != 0) + break; + } + + return status; +} + +/* pipe operations */ +static void ath6kl_usb_post_recv_transfers(struct ath6kl_usb_pipe *recv_pipe, + int buffer_length) +{ + struct ath6kl_urb_context *urb_context; + struct urb *urb; + int usb_status; + + while (true) { + urb_context = ath6kl_usb_alloc_urb_from_pipe(recv_pipe); + if (urb_context == NULL) + break; + + urb_context->skb = dev_alloc_skb(buffer_length); + if (urb_context->skb == NULL) + goto err_cleanup_urb; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (urb == NULL) + goto err_cleanup_urb; + + usb_fill_bulk_urb(urb, + recv_pipe->ar_usb->udev, + recv_pipe->usb_pipe_handle, + urb_context->skb->data, + buffer_length, + ath6kl_usb_recv_complete, urb_context); + + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "ath6kl usb: bulk recv submit:%d, 0x%X (ep:0x%2.2X), %d bytes buf:0x%p\n", + recv_pipe->logical_pipe_num, + recv_pipe->usb_pipe_handle, recv_pipe->ep_address, + buffer_length, urb_context->skb); + + usb_anchor_urb(urb, &recv_pipe->urb_submitted); + usb_status = usb_submit_urb(urb, GFP_ATOMIC); + + if (usb_status) { + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "ath6kl usb : usb bulk recv failed %d\n", + usb_status); + usb_unanchor_urb(urb); + usb_free_urb(urb); + goto err_cleanup_urb; + } + usb_free_urb(urb); + } + return; + +err_cleanup_urb: + ath6kl_usb_cleanup_recv_urb(urb_context); + return; +} + +static void ath6kl_usb_flush_all(struct ath6kl_usb *ar_usb) +{ + int i; + + for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) { + if (ar_usb->pipes[i].ar_usb != NULL) + usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted); + } + + /* + * Flushing any pending I/O may schedule work this call will block + * until all scheduled work runs to completion. + */ + flush_workqueue(ar_usb->wq); +} + +static void ath6kl_usb_start_recv_pipes(struct ath6kl_usb *ar_usb) +{ + /* + * note: control pipe is no longer used + * ar_usb->pipes[ATH6KL_USB_PIPE_RX_CTRL].urb_cnt_thresh = + * ar_usb->pipes[ATH6KL_USB_PIPE_RX_CTRL].urb_alloc/2; + * ath6kl_usb_post_recv_transfers(&ar_usb-> + * pipes[ATH6KL_USB_PIPE_RX_CTRL], + * ATH6KL_USB_RX_BUFFER_SIZE); + */ + + ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_cnt_thresh = 1; + + ath6kl_usb_post_recv_transfers(&ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA], + ATH6KL_USB_RX_BUFFER_SIZE); +} + +/* hif usb rx/tx completion functions */ +static void ath6kl_usb_recv_complete(struct urb *urb) +{ + struct ath6kl_urb_context *urb_context = urb->context; + struct ath6kl_usb_pipe *pipe = urb_context->pipe; + struct sk_buff *skb = NULL; + int status = 0; + + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "%s: recv pipe: %d, stat:%d, len:%d urb:0x%p\n", __func__, + pipe->logical_pipe_num, urb->status, urb->actual_length, + urb); + + if (urb->status != 0) { + status = -EIO; + switch (urb->status) { + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* + * no need to spew these errors when device + * removed or urb killed due to driver shutdown + */ + status = -ECANCELED; + break; + default: + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "%s recv pipe: %d (ep:0x%2.2X), failed:%d\n", + __func__, pipe->logical_pipe_num, + pipe->ep_address, urb->status); + break; + } + goto cleanup_recv_urb; + } + + if (urb->actual_length == 0) + goto cleanup_recv_urb; + + skb = urb_context->skb; + + /* we are going to pass it up */ + urb_context->skb = NULL; + skb_put(skb, urb->actual_length); + + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, skb); + queue_work(pipe->ar_usb->wq, &pipe->io_complete_work); + +cleanup_recv_urb: + ath6kl_usb_cleanup_recv_urb(urb_context); + + if (status == 0 && + pipe->urb_cnt >= pipe->urb_cnt_thresh) { + /* our free urbs are piling up, post more transfers */ + ath6kl_usb_post_recv_transfers(pipe, ATH6KL_USB_RX_BUFFER_SIZE); + } +} + +static void ath6kl_usb_usb_transmit_complete(struct urb *urb) +{ + struct ath6kl_urb_context *urb_context = urb->context; + struct ath6kl_usb_pipe *pipe = urb_context->pipe; + struct sk_buff *skb; + + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "%s: pipe: %d, stat:%d, len:%d\n", + __func__, pipe->logical_pipe_num, urb->status, + urb->actual_length); + + if (urb->status != 0) { + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "%s: pipe: %d, failed:%d\n", + __func__, pipe->logical_pipe_num, urb->status); + } + + skb = urb_context->skb; + urb_context->skb = NULL; + ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context); + + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, skb); + queue_work(pipe->ar_usb->wq, &pipe->io_complete_work); +} + +static void ath6kl_usb_io_comp_work(struct work_struct *work) +{ + struct ath6kl_usb_pipe *pipe = container_of(work, + struct ath6kl_usb_pipe, + io_complete_work); + struct ath6kl_usb *ar_usb; + struct sk_buff *skb; + + ar_usb = pipe->ar_usb; + + while ((skb = skb_dequeue(&pipe->io_comp_queue))) { + if (pipe->flags & ATH6KL_USB_PIPE_FLAG_TX) { + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "ath6kl usb xmit callback buf:0x%p\n", skb); + ath6kl_core_tx_complete(ar_usb->ar, skb); + } else { + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "ath6kl usb recv callback buf:0x%p\n", skb); + ath6kl_core_rx_complete(ar_usb->ar, skb, + pipe->logical_pipe_num); + } + } +} + +#define ATH6KL_USB_MAX_DIAG_CMD (sizeof(struct ath6kl_usb_ctrl_diag_cmd_write)) +#define ATH6KL_USB_MAX_DIAG_RESP (sizeof(struct ath6kl_usb_ctrl_diag_resp_read)) + +static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb) +{ + ath6kl_usb_flush_all(ar_usb); + + ath6kl_usb_cleanup_pipe_resources(ar_usb); + + usb_set_intfdata(ar_usb->interface, NULL); + + kfree(ar_usb->diag_cmd_buffer); + kfree(ar_usb->diag_resp_buffer); + destroy_workqueue(ar_usb->wq); + + kfree(ar_usb); +} + +static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface) +{ + struct usb_device *dev = interface_to_usbdev(interface); + struct ath6kl_usb *ar_usb; + struct ath6kl_usb_pipe *pipe; + int status = 0; + int i; + + /* ath6kl_usb_destroy() needs ar_usb != NULL && ar_usb->wq != NULL. */ + ar_usb = kzalloc(sizeof(struct ath6kl_usb), GFP_KERNEL); + if (ar_usb == NULL) + return NULL; + ar_usb->wq = alloc_workqueue("ath6kl_wq", 0, 0); + if (!ar_usb->wq) { + kfree(ar_usb); + return NULL; + } + + usb_set_intfdata(interface, ar_usb); + spin_lock_init(&(ar_usb->cs_lock)); + ar_usb->udev = dev; + ar_usb->interface = interface; + + for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) { + pipe = &ar_usb->pipes[i]; + INIT_WORK(&pipe->io_complete_work, + ath6kl_usb_io_comp_work); + skb_queue_head_init(&pipe->io_comp_queue); + } + + ar_usb->diag_cmd_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_CMD, GFP_KERNEL); + if (ar_usb->diag_cmd_buffer == NULL) { + status = -ENOMEM; + goto fail_ath6kl_usb_create; + } + + ar_usb->diag_resp_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_RESP, + GFP_KERNEL); + if (ar_usb->diag_resp_buffer == NULL) { + status = -ENOMEM; + goto fail_ath6kl_usb_create; + } + + status = ath6kl_usb_setup_pipe_resources(ar_usb); + +fail_ath6kl_usb_create: + if (status != 0) { + ath6kl_usb_destroy(ar_usb); + ar_usb = NULL; + } + return ar_usb; +} + +static void ath6kl_usb_device_detached(struct usb_interface *interface) +{ + struct ath6kl_usb *ar_usb; + + ar_usb = usb_get_intfdata(interface); + if (ar_usb == NULL) + return; + + ath6kl_stop_txrx(ar_usb->ar); + + /* Delay to wait for the target to reboot */ + mdelay(20); + ath6kl_core_cleanup(ar_usb->ar); + ath6kl_usb_destroy(ar_usb); +} + +/* exported hif usb APIs for htc pipe */ +static void hif_start(struct ath6kl *ar) +{ + struct ath6kl_usb *device = ath6kl_usb_priv(ar); + int i; + + ath6kl_usb_start_recv_pipes(device); + + /* set the TX resource avail threshold for each TX pipe */ + for (i = ATH6KL_USB_PIPE_TX_CTRL; + i <= ATH6KL_USB_PIPE_TX_DATA_HP; i++) { + device->pipes[i].urb_cnt_thresh = + device->pipes[i].urb_alloc / 2; + } +} + +static int ath6kl_usb_send(struct ath6kl *ar, u8 PipeID, + struct sk_buff *hdr_skb, struct sk_buff *skb) +{ + struct ath6kl_usb *device = ath6kl_usb_priv(ar); + struct ath6kl_usb_pipe *pipe = &device->pipes[PipeID]; + struct ath6kl_urb_context *urb_context; + int usb_status, status = 0; + struct urb *urb; + u8 *data; + u32 len; + + ath6kl_dbg(ATH6KL_DBG_USB_BULK, "+%s pipe : %d, buf:0x%p\n", + __func__, PipeID, skb); + + urb_context = ath6kl_usb_alloc_urb_from_pipe(pipe); + + if (urb_context == NULL) { + /* + * TODO: it is possible to run out of urbs if + * 2 endpoints map to the same pipe ID + */ + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "%s pipe:%d no urbs left. URB Cnt : %d\n", + __func__, PipeID, pipe->urb_cnt); + status = -ENOMEM; + goto fail_hif_send; + } + + urb_context->skb = skb; + + data = skb->data; + len = skb->len; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (urb == NULL) { + status = -ENOMEM; + ath6kl_usb_free_urb_to_pipe(urb_context->pipe, + urb_context); + goto fail_hif_send; + } + + usb_fill_bulk_urb(urb, + device->udev, + pipe->usb_pipe_handle, + data, + len, + ath6kl_usb_usb_transmit_complete, urb_context); + + if ((len % pipe->max_packet_size) == 0) { + /* hit a max packet boundary on this pipe */ + urb->transfer_flags |= URB_ZERO_PACKET; + } + + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "athusb bulk send submit:%d, 0x%X (ep:0x%2.2X), %d bytes\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->ep_address, len); + + usb_anchor_urb(urb, &pipe->urb_submitted); + usb_status = usb_submit_urb(urb, GFP_ATOMIC); + + if (usb_status) { + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "ath6kl usb : usb bulk transmit failed %d\n", + usb_status); + usb_unanchor_urb(urb); + ath6kl_usb_free_urb_to_pipe(urb_context->pipe, + urb_context); + status = -EINVAL; + } + usb_free_urb(urb); + +fail_hif_send: + return status; +} + +static void hif_stop(struct ath6kl *ar) +{ + struct ath6kl_usb *device = ath6kl_usb_priv(ar); + + ath6kl_usb_flush_all(device); +} + +static void ath6kl_usb_get_default_pipe(struct ath6kl *ar, + u8 *ul_pipe, u8 *dl_pipe) +{ + *ul_pipe = ATH6KL_USB_PIPE_TX_CTRL; + *dl_pipe = ATH6KL_USB_PIPE_RX_CTRL; +} + +static int ath6kl_usb_map_service_pipe(struct ath6kl *ar, u16 svc_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + int status = 0; + + switch (svc_id) { + case HTC_CTRL_RSVD_SVC: + case WMI_CONTROL_SVC: + *ul_pipe = ATH6KL_USB_PIPE_TX_CTRL; + /* due to large control packets, shift to data pipe */ + *dl_pipe = ATH6KL_USB_PIPE_RX_DATA; + break; + case WMI_DATA_BE_SVC: + case WMI_DATA_BK_SVC: + *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP; + /* + * Disable rxdata2 directly, it will be enabled + * if FW enable rxdata2 + */ + *dl_pipe = ATH6KL_USB_PIPE_RX_DATA; + break; + case WMI_DATA_VI_SVC: + + if (test_bit(ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT, + ar->fw_capabilities)) + *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP; + else + *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP; + /* + * Disable rxdata2 directly, it will be enabled + * if FW enable rxdata2 + */ + *dl_pipe = ATH6KL_USB_PIPE_RX_DATA; + break; + case WMI_DATA_VO_SVC: + + if (test_bit(ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT, + ar->fw_capabilities)) + *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP; + else + *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP; + /* + * Disable rxdata2 directly, it will be enabled + * if FW enable rxdata2 + */ + *dl_pipe = ATH6KL_USB_PIPE_RX_DATA; + break; + default: + status = -EPERM; + break; + } + + return status; +} + +static u16 ath6kl_usb_get_free_queue_number(struct ath6kl *ar, u8 pipe_id) +{ + struct ath6kl_usb *device = ath6kl_usb_priv(ar); + + return device->pipes[pipe_id].urb_cnt; +} + +static void hif_detach_htc(struct ath6kl *ar) +{ + struct ath6kl_usb *device = ath6kl_usb_priv(ar); + + ath6kl_usb_flush_all(device); +} + +static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb, + u8 req, u16 value, u16 index, void *data, + u32 size) +{ + u8 *buf = NULL; + int ret; + + if (size > 0) { + buf = kmemdup(data, size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + } + + /* note: if successful returns number of bytes transfered */ + ret = usb_control_msg(ar_usb->udev, + usb_sndctrlpipe(ar_usb->udev, 0), + req, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, buf, + size, 1000); + + if (ret < 0) { + ath6kl_warn("Failed to submit usb control message: %d\n", ret); + kfree(buf); + return ret; + } + + kfree(buf); + + return 0; +} + +static int ath6kl_usb_submit_ctrl_in(struct ath6kl_usb *ar_usb, + u8 req, u16 value, u16 index, void *data, + u32 size) +{ + u8 *buf = NULL; + int ret; + + if (size > 0) { + buf = kmalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + } + + /* note: if successful returns number of bytes transfered */ + ret = usb_control_msg(ar_usb->udev, + usb_rcvctrlpipe(ar_usb->udev, 0), + req, + USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, buf, + size, 2000); + + if (ret < 0) { + ath6kl_warn("Failed to read usb control message: %d\n", ret); + kfree(buf); + return ret; + } + + memcpy((u8 *) data, buf, size); + + kfree(buf); + + return 0; +} + +static int ath6kl_usb_ctrl_msg_exchange(struct ath6kl_usb *ar_usb, + u8 req_val, u8 *req_buf, u32 req_len, + u8 resp_val, u8 *resp_buf, u32 *resp_len) +{ + int ret; + + /* send command */ + ret = ath6kl_usb_submit_ctrl_out(ar_usb, req_val, 0, 0, + req_buf, req_len); + + if (ret != 0) + return ret; + + if (resp_buf == NULL) { + /* no expected response */ + return ret; + } + + /* get response */ + ret = ath6kl_usb_submit_ctrl_in(ar_usb, resp_val, 0, 0, + resp_buf, *resp_len); + + return ret; +} + +static int ath6kl_usb_diag_read32(struct ath6kl *ar, u32 address, u32 *data) +{ + struct ath6kl_usb *ar_usb = ar->hif_priv; + struct ath6kl_usb_ctrl_diag_resp_read *resp; + struct ath6kl_usb_ctrl_diag_cmd_read *cmd; + u32 resp_len; + int ret; + + cmd = (struct ath6kl_usb_ctrl_diag_cmd_read *) ar_usb->diag_cmd_buffer; + + memset(cmd, 0, sizeof(*cmd)); + cmd->cmd = ATH6KL_USB_CTRL_DIAG_CC_READ; + cmd->address = cpu_to_le32(address); + resp_len = sizeof(*resp); + + ret = ath6kl_usb_ctrl_msg_exchange(ar_usb, + ATH6KL_USB_CONTROL_REQ_DIAG_CMD, + (u8 *) cmd, + sizeof(struct ath6kl_usb_ctrl_diag_cmd_write), + ATH6KL_USB_CONTROL_REQ_DIAG_RESP, + ar_usb->diag_resp_buffer, &resp_len); + + if (ret) { + ath6kl_warn("diag read32 failed: %d\n", ret); + return ret; + } + + resp = (struct ath6kl_usb_ctrl_diag_resp_read *) + ar_usb->diag_resp_buffer; + + *data = le32_to_cpu(resp->value); + + return ret; +} + +static int ath6kl_usb_diag_write32(struct ath6kl *ar, u32 address, __le32 data) +{ + struct ath6kl_usb *ar_usb = ar->hif_priv; + struct ath6kl_usb_ctrl_diag_cmd_write *cmd; + int ret; + + cmd = (struct ath6kl_usb_ctrl_diag_cmd_write *) ar_usb->diag_cmd_buffer; + + memset(cmd, 0, sizeof(struct ath6kl_usb_ctrl_diag_cmd_write)); + cmd->cmd = cpu_to_le32(ATH6KL_USB_CTRL_DIAG_CC_WRITE); + cmd->address = cpu_to_le32(address); + cmd->value = data; + + ret = ath6kl_usb_ctrl_msg_exchange(ar_usb, + ATH6KL_USB_CONTROL_REQ_DIAG_CMD, + (u8 *) cmd, + sizeof(*cmd), + 0, NULL, NULL); + if (ret) { + ath6kl_warn("diag_write32 failed: %d\n", ret); + return ret; + } + + return 0; +} + +static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) +{ + struct ath6kl_usb *ar_usb = ar->hif_priv; + int ret; + + /* get response */ + ret = ath6kl_usb_submit_ctrl_in(ar_usb, + ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP, + 0, 0, buf, len); + if (ret) { + ath6kl_err("Unable to read the bmi data from the device: %d\n", + ret); + return ret; + } + + return 0; +} + +static int ath6kl_usb_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) +{ + struct ath6kl_usb *ar_usb = ar->hif_priv; + int ret; + + /* send command */ + ret = ath6kl_usb_submit_ctrl_out(ar_usb, + ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD, + 0, 0, buf, len); + if (ret) { + ath6kl_err("unable to send the bmi data to the device: %d\n", + ret); + return ret; + } + + return 0; +} + +static int ath6kl_usb_power_on(struct ath6kl *ar) +{ + hif_start(ar); + return 0; +} + +static int ath6kl_usb_power_off(struct ath6kl *ar) +{ + hif_detach_htc(ar); + return 0; +} + +static void ath6kl_usb_stop(struct ath6kl *ar) +{ + hif_stop(ar); +} + +static void ath6kl_usb_cleanup_scatter(struct ath6kl *ar) +{ + /* + * USB doesn't support it. Just return. + */ + return; +} + +static int ath6kl_usb_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) +{ + /* + * cfg80211 suspend/WOW currently not supported for USB. + */ + return 0; +} + +static int ath6kl_usb_resume(struct ath6kl *ar) +{ + /* + * cfg80211 resume currently not supported for USB. + */ + return 0; +} + +static const struct ath6kl_hif_ops ath6kl_usb_ops = { + .diag_read32 = ath6kl_usb_diag_read32, + .diag_write32 = ath6kl_usb_diag_write32, + .bmi_read = ath6kl_usb_bmi_read, + .bmi_write = ath6kl_usb_bmi_write, + .power_on = ath6kl_usb_power_on, + .power_off = ath6kl_usb_power_off, + .stop = ath6kl_usb_stop, + .pipe_send = ath6kl_usb_send, + .pipe_get_default = ath6kl_usb_get_default_pipe, + .pipe_map_service = ath6kl_usb_map_service_pipe, + .pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number, + .cleanup_scatter = ath6kl_usb_cleanup_scatter, + .suspend = ath6kl_usb_suspend, + .resume = ath6kl_usb_resume, +}; + +/* ath6kl usb driver registered functions */ +static int ath6kl_usb_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ + struct usb_device *dev = interface_to_usbdev(interface); + struct ath6kl *ar; + struct ath6kl_usb *ar_usb = NULL; + int vendor_id, product_id; + int ret = 0; + + usb_get_dev(dev); + + vendor_id = le16_to_cpu(dev->descriptor.idVendor); + product_id = le16_to_cpu(dev->descriptor.idProduct); + + ath6kl_dbg(ATH6KL_DBG_USB, "vendor_id = %04x\n", vendor_id); + ath6kl_dbg(ATH6KL_DBG_USB, "product_id = %04x\n", product_id); + + if (interface->cur_altsetting) + ath6kl_dbg(ATH6KL_DBG_USB, "USB Interface %d\n", + interface->cur_altsetting->desc.bInterfaceNumber); + + + if (dev->speed == USB_SPEED_HIGH) + ath6kl_dbg(ATH6KL_DBG_USB, "USB 2.0 Host\n"); + else + ath6kl_dbg(ATH6KL_DBG_USB, "USB 1.1 Host\n"); + + ar_usb = ath6kl_usb_create(interface); + + if (ar_usb == NULL) { + ret = -ENOMEM; + goto err_usb_put; + } + + ar = ath6kl_core_create(&ar_usb->udev->dev); + if (ar == NULL) { + ath6kl_err("Failed to alloc ath6kl core\n"); + ret = -ENOMEM; + goto err_usb_destroy; + } + + ar->hif_priv = ar_usb; + ar->hif_type = ATH6KL_HIF_TYPE_USB; + ar->hif_ops = &ath6kl_usb_ops; + ar->mbox_info.block_size = 16; + ar->bmi.max_data_size = 252; + + ar_usb->ar = ar; + + ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_PIPE); + if (ret) { + ath6kl_err("Failed to init ath6kl core: %d\n", ret); + goto err_core_free; + } + + return ret; + +err_core_free: + ath6kl_core_destroy(ar); +err_usb_destroy: + ath6kl_usb_destroy(ar_usb); +err_usb_put: + usb_put_dev(dev); + + return ret; +} + +static void ath6kl_usb_remove(struct usb_interface *interface) +{ + usb_put_dev(interface_to_usbdev(interface)); + ath6kl_usb_device_detached(interface); +} + +#ifdef CONFIG_PM + +static int ath6kl_usb_pm_suspend(struct usb_interface *interface, + pm_message_t message) +{ + struct ath6kl_usb *device; + device = usb_get_intfdata(interface); + + ath6kl_usb_flush_all(device); + return 0; +} + +static int ath6kl_usb_pm_resume(struct usb_interface *interface) +{ + struct ath6kl_usb *device; + device = usb_get_intfdata(interface); + + ath6kl_usb_post_recv_transfers(&device->pipes[ATH6KL_USB_PIPE_RX_DATA], + ATH6KL_USB_RX_BUFFER_SIZE); + ath6kl_usb_post_recv_transfers(&device->pipes[ATH6KL_USB_PIPE_RX_DATA2], + ATH6KL_USB_RX_BUFFER_SIZE); + + return 0; +} + +#else + +#define ath6kl_usb_pm_suspend NULL +#define ath6kl_usb_pm_resume NULL + +#endif + +/* table of devices that work with this driver */ +static const struct usb_device_id ath6kl_usb_ids[] = { + {USB_DEVICE(0x0cf3, 0x9375)}, + {USB_DEVICE(0x0cf3, 0x9374)}, + {USB_DEVICE(0x04da, 0x390d)}, + { /* Terminating entry */ }, +}; + +MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids); + +static struct usb_driver ath6kl_usb_driver = { + .name = "ath6kl_usb", + .probe = ath6kl_usb_probe, + .suspend = ath6kl_usb_pm_suspend, + .resume = ath6kl_usb_pm_resume, + .disconnect = ath6kl_usb_remove, + .id_table = ath6kl_usb_ids, + .supports_autosuspend = true, + .disable_hub_initiated_lpm = 1, +}; + +module_usb_driver(ath6kl_usb_driver); + +MODULE_AUTHOR("Atheros Communications, Inc."); +MODULE_DESCRIPTION("Driver support for Atheros AR600x USB devices"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_FIRMWARE(AR6004_HW_1_0_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_2_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_3_FW_DIR "/" AR6004_HW_1_3_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6004_HW_1_3_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE); diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c new file mode 100644 index 000000000..3787b9fb0 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -0,0 +1,4160 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/ip.h> +#include <linux/in.h> +#include "core.h" +#include "debug.h" +#include "testmode.h" +#include "trace.h" +#include "../regd.h" +#include "../regd_common.h" + +static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx); + +static const s32 wmi_rate_tbl[][2] = { + /* {W/O SGI, with SGI} */ + {1000, 1000}, + {2000, 2000}, + {5500, 5500}, + {11000, 11000}, + {6000, 6000}, + {9000, 9000}, + {12000, 12000}, + {18000, 18000}, + {24000, 24000}, + {36000, 36000}, + {48000, 48000}, + {54000, 54000}, + {6500, 7200}, + {13000, 14400}, + {19500, 21700}, + {26000, 28900}, + {39000, 43300}, + {52000, 57800}, + {58500, 65000}, + {65000, 72200}, + {13500, 15000}, + {27000, 30000}, + {40500, 45000}, + {54000, 60000}, + {81000, 90000}, + {108000, 120000}, + {121500, 135000}, + {135000, 150000}, + {0, 0} +}; + +static const s32 wmi_rate_tbl_mcs15[][2] = { + /* {W/O SGI, with SGI} */ + {1000, 1000}, + {2000, 2000}, + {5500, 5500}, + {11000, 11000}, + {6000, 6000}, + {9000, 9000}, + {12000, 12000}, + {18000, 18000}, + {24000, 24000}, + {36000, 36000}, + {48000, 48000}, + {54000, 54000}, + {6500, 7200}, /* HT 20, MCS 0 */ + {13000, 14400}, + {19500, 21700}, + {26000, 28900}, + {39000, 43300}, + {52000, 57800}, + {58500, 65000}, + {65000, 72200}, + {13000, 14400}, /* HT 20, MCS 8 */ + {26000, 28900}, + {39000, 43300}, + {52000, 57800}, + {78000, 86700}, + {104000, 115600}, + {117000, 130000}, + {130000, 144400}, /* HT 20, MCS 15 */ + {13500, 15000}, /*HT 40, MCS 0 */ + {27000, 30000}, + {40500, 45000}, + {54000, 60000}, + {81000, 90000}, + {108000, 120000}, + {121500, 135000}, + {135000, 150000}, + {27000, 30000}, /*HT 40, MCS 8 */ + {54000, 60000}, + {81000, 90000}, + {108000, 120000}, + {162000, 180000}, + {216000, 240000}, + {243000, 270000}, + {270000, 300000}, /*HT 40, MCS 15 */ + {0, 0} +}; + +/* 802.1d to AC mapping. Refer pg 57 of WMM-test-plan-v1.2 */ +static const u8 up_to_ac[] = { + WMM_AC_BE, + WMM_AC_BK, + WMM_AC_BK, + WMM_AC_BE, + WMM_AC_VI, + WMM_AC_VI, + WMM_AC_VO, + WMM_AC_VO, +}; + +void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id) +{ + if (WARN_ON(ep_id == ENDPOINT_UNUSED || ep_id >= ENDPOINT_MAX)) + return; + + wmi->ep_id = ep_id; +} + +enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi) +{ + return wmi->ep_id; +} + +struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx) +{ + struct ath6kl_vif *vif, *found = NULL; + + if (WARN_ON(if_idx > (ar->vif_max - 1))) + return NULL; + + /* FIXME: Locking */ + spin_lock_bh(&ar->list_lock); + list_for_each_entry(vif, &ar->vif_list, list) { + if (vif->fw_vif_idx == if_idx) { + found = vif; + break; + } + } + spin_unlock_bh(&ar->list_lock); + + return found; +} + +/* Performs DIX to 802.3 encapsulation for transmit packets. + * Assumes the entire DIX header is contiguous and that there is + * enough room in the buffer for a 802.3 mac header and LLC+SNAP headers. + */ +int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb) +{ + struct ath6kl_llc_snap_hdr *llc_hdr; + struct ethhdr *eth_hdr; + size_t new_len; + __be16 type; + u8 *datap; + u16 size; + + if (WARN_ON(skb == NULL)) + return -EINVAL; + + size = sizeof(struct ath6kl_llc_snap_hdr) + sizeof(struct wmi_data_hdr); + if (skb_headroom(skb) < size) + return -ENOMEM; + + eth_hdr = (struct ethhdr *) skb->data; + type = eth_hdr->h_proto; + + if (!is_ethertype(be16_to_cpu(type))) { + ath6kl_dbg(ATH6KL_DBG_WMI, + "%s: pkt is already in 802.3 format\n", __func__); + return 0; + } + + new_len = skb->len - sizeof(*eth_hdr) + sizeof(*llc_hdr); + + skb_push(skb, sizeof(struct ath6kl_llc_snap_hdr)); + datap = skb->data; + + eth_hdr->h_proto = cpu_to_be16(new_len); + + memcpy(datap, eth_hdr, sizeof(*eth_hdr)); + + llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap + sizeof(*eth_hdr)); + llc_hdr->dsap = 0xAA; + llc_hdr->ssap = 0xAA; + llc_hdr->cntl = 0x03; + llc_hdr->org_code[0] = 0x0; + llc_hdr->org_code[1] = 0x0; + llc_hdr->org_code[2] = 0x0; + llc_hdr->eth_type = type; + + return 0; +} + +static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb, + u8 *version, void *tx_meta_info) +{ + struct wmi_tx_meta_v1 *v1; + struct wmi_tx_meta_v2 *v2; + + if (WARN_ON(skb == NULL || version == NULL)) + return -EINVAL; + + switch (*version) { + case WMI_META_VERSION_1: + skb_push(skb, WMI_MAX_TX_META_SZ); + v1 = (struct wmi_tx_meta_v1 *) skb->data; + v1->pkt_id = 0; + v1->rate_plcy_id = 0; + *version = WMI_META_VERSION_1; + break; + case WMI_META_VERSION_2: + skb_push(skb, WMI_MAX_TX_META_SZ); + v2 = (struct wmi_tx_meta_v2 *) skb->data; + memcpy(v2, (struct wmi_tx_meta_v2 *) tx_meta_info, + sizeof(struct wmi_tx_meta_v2)); + break; + } + + return 0; +} + +int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb, + u8 msg_type, u32 flags, + enum wmi_data_hdr_data_type data_type, + u8 meta_ver, void *tx_meta_info, u8 if_idx) +{ + struct wmi_data_hdr *data_hdr; + int ret; + + if (WARN_ON(skb == NULL || (if_idx > wmi->parent_dev->vif_max - 1))) + return -EINVAL; + + if (tx_meta_info) { + ret = ath6kl_wmi_meta_add(wmi, skb, &meta_ver, tx_meta_info); + if (ret) + return ret; + } + + skb_push(skb, sizeof(struct wmi_data_hdr)); + + data_hdr = (struct wmi_data_hdr *)skb->data; + memset(data_hdr, 0, sizeof(struct wmi_data_hdr)); + + data_hdr->info = msg_type << WMI_DATA_HDR_MSG_TYPE_SHIFT; + data_hdr->info |= data_type << WMI_DATA_HDR_DATA_TYPE_SHIFT; + + if (flags & WMI_DATA_HDR_FLAGS_MORE) + data_hdr->info |= WMI_DATA_HDR_MORE; + + if (flags & WMI_DATA_HDR_FLAGS_EOSP) + data_hdr->info3 |= cpu_to_le16(WMI_DATA_HDR_EOSP); + + data_hdr->info2 |= cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT); + data_hdr->info3 |= cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK); + + return 0; +} + +u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri) +{ + struct iphdr *ip_hdr = (struct iphdr *) pkt; + u8 ip_pri; + + /* + * Determine IPTOS priority + * + * IP-TOS - 8bits + * : DSCP(6-bits) ECN(2-bits) + * : DSCP - P2 P1 P0 X X X + * where (P2 P1 P0) form 802.1D + */ + ip_pri = ip_hdr->tos >> 5; + ip_pri &= 0x7; + + if ((layer2_pri & 0x7) > ip_pri) + return (u8) layer2_pri & 0x7; + else + return ip_pri; +} + +u8 ath6kl_wmi_get_traffic_class(u8 user_priority) +{ + return up_to_ac[user_priority & 0x7]; +} + +int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx, + struct sk_buff *skb, + u32 layer2_priority, bool wmm_enabled, + u8 *ac) +{ + struct wmi_data_hdr *data_hdr; + struct ath6kl_llc_snap_hdr *llc_hdr; + struct wmi_create_pstream_cmd cmd; + u32 meta_size, hdr_size; + u16 ip_type = IP_ETHERTYPE; + u8 stream_exist, usr_pri; + u8 traffic_class = WMM_AC_BE; + u8 *datap; + + if (WARN_ON(skb == NULL)) + return -EINVAL; + + datap = skb->data; + data_hdr = (struct wmi_data_hdr *) datap; + + meta_size = ((le16_to_cpu(data_hdr->info2) >> WMI_DATA_HDR_META_SHIFT) & + WMI_DATA_HDR_META_MASK) ? WMI_MAX_TX_META_SZ : 0; + + if (!wmm_enabled) { + /* If WMM is disabled all traffic goes as BE traffic */ + usr_pri = 0; + } else { + hdr_size = sizeof(struct ethhdr); + + llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap + + sizeof(struct + wmi_data_hdr) + + meta_size + hdr_size); + + if (llc_hdr->eth_type == htons(ip_type)) { + /* + * Extract the endpoint info from the TOS field + * in the IP header. + */ + usr_pri = + ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) + + sizeof(struct ath6kl_llc_snap_hdr), + layer2_priority); + } else { + usr_pri = layer2_priority & 0x7; + } + + /* + * Queue the EAPOL frames in the same WMM_AC_VO queue + * as that of management frames. + */ + if (skb->protocol == cpu_to_be16(ETH_P_PAE)) + usr_pri = WMI_VOICE_USER_PRIORITY; + } + + /* + * workaround for WMM S5 + * + * FIXME: wmi->traffic_class is always 100 so this test doesn't + * make sense + */ + if ((wmi->traffic_class == WMM_AC_VI) && + ((usr_pri == 5) || (usr_pri == 4))) + usr_pri = 1; + + /* Convert user priority to traffic class */ + traffic_class = up_to_ac[usr_pri & 0x7]; + + wmi_data_hdr_set_up(data_hdr, usr_pri); + + spin_lock_bh(&wmi->lock); + stream_exist = wmi->fat_pipe_exist; + spin_unlock_bh(&wmi->lock); + + if (!(stream_exist & (1 << traffic_class))) { + memset(&cmd, 0, sizeof(cmd)); + cmd.traffic_class = traffic_class; + cmd.user_pri = usr_pri; + cmd.inactivity_int = + cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT); + /* Implicit streams are created with TSID 0xFF */ + cmd.tsid = WMI_IMPLICIT_PSTREAM; + ath6kl_wmi_create_pstream_cmd(wmi, if_idx, &cmd); + } + + *ac = traffic_class; + + return 0; +} + +int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb) +{ + struct ieee80211_hdr_3addr *pwh, wh; + struct ath6kl_llc_snap_hdr *llc_hdr; + struct ethhdr eth_hdr; + u32 hdr_size; + u8 *datap; + __le16 sub_type; + + if (WARN_ON(skb == NULL)) + return -EINVAL; + + datap = skb->data; + pwh = (struct ieee80211_hdr_3addr *) datap; + + sub_type = pwh->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); + + memcpy((u8 *) &wh, datap, sizeof(struct ieee80211_hdr_3addr)); + + /* Strip off the 802.11 header */ + if (sub_type == cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { + hdr_size = roundup(sizeof(struct ieee80211_qos_hdr), + sizeof(u32)); + skb_pull(skb, hdr_size); + } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA)) { + skb_pull(skb, sizeof(struct ieee80211_hdr_3addr)); + } + + datap = skb->data; + llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap); + + memset(ð_hdr, 0, sizeof(eth_hdr)); + eth_hdr.h_proto = llc_hdr->eth_type; + + switch ((le16_to_cpu(wh.frame_control)) & + (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { + case IEEE80211_FCTL_TODS: + memcpy(eth_hdr.h_dest, wh.addr3, ETH_ALEN); + memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN); + break; + case IEEE80211_FCTL_FROMDS: + memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN); + memcpy(eth_hdr.h_source, wh.addr3, ETH_ALEN); + break; + case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS: + break; + default: + memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN); + memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN); + break; + } + + skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr)); + skb_push(skb, sizeof(eth_hdr)); + + datap = skb->data; + + memcpy(datap, ð_hdr, sizeof(eth_hdr)); + + return 0; +} + +/* + * Performs 802.3 to DIX encapsulation for received packets. + * Assumes the entire 802.3 header is contiguous. + */ +int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb) +{ + struct ath6kl_llc_snap_hdr *llc_hdr; + struct ethhdr eth_hdr; + u8 *datap; + + if (WARN_ON(skb == NULL)) + return -EINVAL; + + datap = skb->data; + + memcpy(ð_hdr, datap, sizeof(eth_hdr)); + + llc_hdr = (struct ath6kl_llc_snap_hdr *) (datap + sizeof(eth_hdr)); + eth_hdr.h_proto = llc_hdr->eth_type; + + skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr)); + datap = skb->data; + + memcpy(datap, ð_hdr, sizeof(eth_hdr)); + + return 0; +} + +static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len) +{ + struct tx_complete_msg_v1 *msg_v1; + struct wmi_tx_complete_event *evt; + int index; + u16 size; + + evt = (struct wmi_tx_complete_event *) datap; + + ath6kl_dbg(ATH6KL_DBG_WMI, "comp: %d %d %d\n", + evt->num_msg, evt->msg_len, evt->msg_type); + + for (index = 0; index < evt->num_msg; index++) { + size = sizeof(struct wmi_tx_complete_event) + + (index * sizeof(struct tx_complete_msg_v1)); + msg_v1 = (struct tx_complete_msg_v1 *)(datap + size); + + ath6kl_dbg(ATH6KL_DBG_WMI, "msg: %d %d %d %d\n", + msg_v1->status, msg_v1->pkt_id, + msg_v1->rate_idx, msg_v1->ack_failures); + } + + return 0; +} + +static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap, + int len, struct ath6kl_vif *vif) +{ + struct wmi_remain_on_chnl_event *ev; + u32 freq; + u32 dur; + struct ieee80211_channel *chan; + struct ath6kl *ar = wmi->parent_dev; + u32 id; + + if (len < sizeof(*ev)) + return -EINVAL; + + ev = (struct wmi_remain_on_chnl_event *) datap; + freq = le32_to_cpu(ev->freq); + dur = le32_to_cpu(ev->duration); + ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: freq=%u dur=%u\n", + freq, dur); + chan = ieee80211_get_channel(ar->wiphy, freq); + if (!chan) { + ath6kl_dbg(ATH6KL_DBG_WMI, + "remain_on_chnl: Unknown channel (freq=%u)\n", + freq); + return -EINVAL; + } + id = vif->last_roc_id; + cfg80211_ready_on_channel(&vif->wdev, id, chan, + dur, GFP_ATOMIC); + + return 0; +} + +static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi, + u8 *datap, int len, + struct ath6kl_vif *vif) +{ + struct wmi_cancel_remain_on_chnl_event *ev; + u32 freq; + u32 dur; + struct ieee80211_channel *chan; + struct ath6kl *ar = wmi->parent_dev; + u32 id; + + if (len < sizeof(*ev)) + return -EINVAL; + + ev = (struct wmi_cancel_remain_on_chnl_event *) datap; + freq = le32_to_cpu(ev->freq); + dur = le32_to_cpu(ev->duration); + ath6kl_dbg(ATH6KL_DBG_WMI, + "cancel_remain_on_chnl: freq=%u dur=%u status=%u\n", + freq, dur, ev->status); + chan = ieee80211_get_channel(ar->wiphy, freq); + if (!chan) { + ath6kl_dbg(ATH6KL_DBG_WMI, + "cancel_remain_on_chnl: Unknown channel (freq=%u)\n", + freq); + return -EINVAL; + } + if (vif->last_cancel_roc_id && + vif->last_cancel_roc_id + 1 == vif->last_roc_id) + id = vif->last_cancel_roc_id; /* event for cancel command */ + else + id = vif->last_roc_id; /* timeout on uncanceled r-o-c */ + vif->last_cancel_roc_id = 0; + cfg80211_remain_on_channel_expired(&vif->wdev, id, chan, GFP_ATOMIC); + + return 0; +} + +static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) +{ + struct wmi_tx_status_event *ev; + u32 id; + + if (len < sizeof(*ev)) + return -EINVAL; + + ev = (struct wmi_tx_status_event *) datap; + id = le32_to_cpu(ev->id); + ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n", + id, ev->ack_status); + if (wmi->last_mgmt_tx_frame) { + cfg80211_mgmt_tx_status(&vif->wdev, id, + wmi->last_mgmt_tx_frame, + wmi->last_mgmt_tx_frame_len, + !!ev->ack_status, GFP_ATOMIC); + kfree(wmi->last_mgmt_tx_frame); + wmi->last_mgmt_tx_frame = NULL; + wmi->last_mgmt_tx_frame_len = 0; + } + + return 0; +} + +static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) +{ + struct wmi_p2p_rx_probe_req_event *ev; + u32 freq; + u16 dlen; + + if (len < sizeof(*ev)) + return -EINVAL; + + ev = (struct wmi_p2p_rx_probe_req_event *) datap; + freq = le32_to_cpu(ev->freq); + dlen = le16_to_cpu(ev->len); + if (datap + len < ev->data + dlen) { + ath6kl_err("invalid wmi_p2p_rx_probe_req_event: len=%d dlen=%u\n", + len, dlen); + return -EINVAL; + } + ath6kl_dbg(ATH6KL_DBG_WMI, + "rx_probe_req: len=%u freq=%u probe_req_report=%d\n", + dlen, freq, vif->probe_req_report); + + if (vif->probe_req_report || vif->nw_type == AP_NETWORK) + cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0); + + return 0; +} + +static int ath6kl_wmi_p2p_capabilities_event_rx(u8 *datap, int len) +{ + struct wmi_p2p_capabilities_event *ev; + u16 dlen; + + if (len < sizeof(*ev)) + return -EINVAL; + + ev = (struct wmi_p2p_capabilities_event *) datap; + dlen = le16_to_cpu(ev->len); + ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_capab: len=%u\n", dlen); + + return 0; +} + +static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) +{ + struct wmi_rx_action_event *ev; + u32 freq; + u16 dlen; + + if (len < sizeof(*ev)) + return -EINVAL; + + ev = (struct wmi_rx_action_event *) datap; + freq = le32_to_cpu(ev->freq); + dlen = le16_to_cpu(ev->len); + if (datap + len < ev->data + dlen) { + ath6kl_err("invalid wmi_rx_action_event: len=%d dlen=%u\n", + len, dlen); + return -EINVAL; + } + ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq); + cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0); + + return 0; +} + +static int ath6kl_wmi_p2p_info_event_rx(u8 *datap, int len) +{ + struct wmi_p2p_info_event *ev; + u32 flags; + u16 dlen; + + if (len < sizeof(*ev)) + return -EINVAL; + + ev = (struct wmi_p2p_info_event *) datap; + flags = le32_to_cpu(ev->info_req_flags); + dlen = le16_to_cpu(ev->len); + ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: flags=%x len=%d\n", flags, dlen); + + if (flags & P2P_FLAG_CAPABILITIES_REQ) { + struct wmi_p2p_capabilities *cap; + if (dlen < sizeof(*cap)) + return -EINVAL; + cap = (struct wmi_p2p_capabilities *) ev->data; + ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: GO Power Save = %d\n", + cap->go_power_save); + } + + if (flags & P2P_FLAG_MACADDR_REQ) { + struct wmi_p2p_macaddr *mac; + if (dlen < sizeof(*mac)) + return -EINVAL; + mac = (struct wmi_p2p_macaddr *) ev->data; + ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: MAC Address = %pM\n", + mac->mac_addr); + } + + if (flags & P2P_FLAG_HMODEL_REQ) { + struct wmi_p2p_hmodel *mod; + if (dlen < sizeof(*mod)) + return -EINVAL; + mod = (struct wmi_p2p_hmodel *) ev->data; + ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: P2P Model = %d (%s)\n", + mod->p2p_model, + mod->p2p_model ? "host" : "firmware"); + } + return 0; +} + +static inline struct sk_buff *ath6kl_wmi_get_new_buf(u32 size) +{ + struct sk_buff *skb; + + skb = ath6kl_buf_alloc(size); + if (!skb) + return NULL; + + skb_put(skb, size); + if (size) + memset(skb->data, 0, size); + + return skb; +} + +/* Send a "simple" wmi command -- one with no arguments */ +static int ath6kl_wmi_simple_cmd(struct wmi *wmi, u8 if_idx, + enum wmi_cmd_id cmd_id) +{ + struct sk_buff *skb; + int ret; + + skb = ath6kl_wmi_get_new_buf(0); + if (!skb) + return -ENOMEM; + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, cmd_id, NO_SYNC_WMIFLAG); + + return ret; +} + +static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len) +{ + struct wmi_ready_event_2 *ev = (struct wmi_ready_event_2 *) datap; + + if (len < sizeof(struct wmi_ready_event_2)) + return -EINVAL; + + ath6kl_ready_event(wmi->parent_dev, ev->mac_addr, + le32_to_cpu(ev->sw_version), + le32_to_cpu(ev->abi_version), ev->phy_cap); + + return 0; +} + +/* + * Mechanism to modify the roaming behavior in the firmware. The lower rssi + * at which the station has to roam can be passed with + * WMI_SET_LRSSI_SCAN_PARAMS. Subtract 96 from RSSI to get the signal level + * in dBm. + */ +int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi) +{ + struct sk_buff *skb; + struct roam_ctrl_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct roam_ctrl_cmd *) skb->data; + + cmd->info.params.lrssi_scan_period = cpu_to_le16(DEF_LRSSI_SCAN_PERIOD); + cmd->info.params.lrssi_scan_threshold = a_cpu_to_sle16(lrssi + + DEF_SCAN_FOR_ROAM_INTVL); + cmd->info.params.lrssi_roam_threshold = a_cpu_to_sle16(lrssi); + cmd->info.params.roam_rssi_floor = DEF_LRSSI_ROAM_FLOOR; + cmd->roam_ctrl = WMI_SET_LRSSI_SCAN_PARAMS; + + return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID, + NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid) +{ + struct sk_buff *skb; + struct roam_ctrl_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct roam_ctrl_cmd *) skb->data; + + memcpy(cmd->info.bssid, bssid, ETH_ALEN); + cmd->roam_ctrl = WMI_FORCE_ROAM; + + ath6kl_dbg(ATH6KL_DBG_WMI, "force roam to %pM\n", bssid); + return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID, + NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_ap_set_beacon_intvl_cmd(struct wmi *wmi, u8 if_idx, + u32 beacon_intvl) +{ + struct sk_buff *skb; + struct set_beacon_int_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct set_beacon_int_cmd *) skb->data; + + cmd->beacon_intvl = cpu_to_le32(beacon_intvl); + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, + WMI_SET_BEACON_INT_CMDID, NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period) +{ + struct sk_buff *skb; + struct set_dtim_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct set_dtim_cmd *) skb->data; + + cmd->dtim_period = cpu_to_le32(dtim_period); + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, + WMI_AP_SET_DTIM_CMDID, NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode) +{ + struct sk_buff *skb; + struct roam_ctrl_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct roam_ctrl_cmd *) skb->data; + + cmd->info.roam_mode = mode; + cmd->roam_ctrl = WMI_SET_ROAM_MODE; + + ath6kl_dbg(ATH6KL_DBG_WMI, "set roam mode %d\n", mode); + return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID, + NO_SYNC_WMIFLAG); +} + +static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) +{ + struct wmi_connect_event *ev; + u8 *pie, *peie; + + if (len < sizeof(struct wmi_connect_event)) + return -EINVAL; + + ev = (struct wmi_connect_event *) datap; + + if (vif->nw_type == AP_NETWORK) { + /* AP mode start/STA connected event */ + struct net_device *dev = vif->ndev; + if (memcmp(dev->dev_addr, ev->u.ap_bss.bssid, ETH_ALEN) == 0) { + ath6kl_dbg(ATH6KL_DBG_WMI, + "%s: freq %d bssid %pM (AP started)\n", + __func__, le16_to_cpu(ev->u.ap_bss.ch), + ev->u.ap_bss.bssid); + ath6kl_connect_ap_mode_bss( + vif, le16_to_cpu(ev->u.ap_bss.ch)); + } else { + ath6kl_dbg(ATH6KL_DBG_WMI, + "%s: aid %u mac_addr %pM auth=%u keymgmt=%u cipher=%u apsd_info=%u (STA connected)\n", + __func__, ev->u.ap_sta.aid, + ev->u.ap_sta.mac_addr, + ev->u.ap_sta.auth, + ev->u.ap_sta.keymgmt, + le16_to_cpu(ev->u.ap_sta.cipher), + ev->u.ap_sta.apsd_info); + + ath6kl_connect_ap_mode_sta( + vif, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr, + ev->u.ap_sta.keymgmt, + le16_to_cpu(ev->u.ap_sta.cipher), + ev->u.ap_sta.auth, ev->assoc_req_len, + ev->assoc_info + ev->beacon_ie_len, + ev->u.ap_sta.apsd_info); + } + return 0; + } + + /* STA/IBSS mode connection event */ + + ath6kl_dbg(ATH6KL_DBG_WMI, + "wmi event connect freq %d bssid %pM listen_intvl %d beacon_intvl %d type %d\n", + le16_to_cpu(ev->u.sta.ch), ev->u.sta.bssid, + le16_to_cpu(ev->u.sta.listen_intvl), + le16_to_cpu(ev->u.sta.beacon_intvl), + le32_to_cpu(ev->u.sta.nw_type)); + + /* Start of assoc rsp IEs */ + pie = ev->assoc_info + ev->beacon_ie_len + + ev->assoc_req_len + (sizeof(u16) * 3); /* capinfo, status, aid */ + + /* End of assoc rsp IEs */ + peie = ev->assoc_info + ev->beacon_ie_len + ev->assoc_req_len + + ev->assoc_resp_len; + + while (pie < peie) { + switch (*pie) { + case WLAN_EID_VENDOR_SPECIFIC: + if (pie[1] > 3 && pie[2] == 0x00 && pie[3] == 0x50 && + pie[4] == 0xf2 && pie[5] == WMM_OUI_TYPE) { + /* WMM OUT (00:50:F2) */ + if (pie[1] > 5 && + pie[6] == WMM_PARAM_OUI_SUBTYPE) + wmi->is_wmm_enabled = true; + } + break; + } + + if (wmi->is_wmm_enabled) + break; + + pie += pie[1] + 2; + } + + ath6kl_connect_event(vif, le16_to_cpu(ev->u.sta.ch), + ev->u.sta.bssid, + le16_to_cpu(ev->u.sta.listen_intvl), + le16_to_cpu(ev->u.sta.beacon_intvl), + le32_to_cpu(ev->u.sta.nw_type), + ev->beacon_ie_len, ev->assoc_req_len, + ev->assoc_resp_len, ev->assoc_info); + + return 0; +} + +static struct country_code_to_enum_rd * +ath6kl_regd_find_country(u16 countryCode) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(allCountries); i++) { + if (allCountries[i].countryCode == countryCode) + return &allCountries[i]; + } + + return NULL; +} + +static struct reg_dmn_pair_mapping * +ath6kl_get_regpair(u16 regdmn) +{ + int i; + + if (regdmn == NO_ENUMRD) + return NULL; + + for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) { + if (regDomainPairs[i].reg_domain == regdmn) + return ®DomainPairs[i]; + } + + return NULL; +} + +static struct country_code_to_enum_rd * +ath6kl_regd_find_country_by_rd(u16 regdmn) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(allCountries); i++) { + if (allCountries[i].regDmnEnum == regdmn) + return &allCountries[i]; + } + + return NULL; +} + +static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len) +{ + struct ath6kl_wmi_regdomain *ev; + struct country_code_to_enum_rd *country = NULL; + struct reg_dmn_pair_mapping *regpair = NULL; + char alpha2[2]; + u32 reg_code; + + ev = (struct ath6kl_wmi_regdomain *) datap; + reg_code = le32_to_cpu(ev->reg_code); + + if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG) { + country = ath6kl_regd_find_country((u16) reg_code); + } else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) { + regpair = ath6kl_get_regpair((u16) reg_code); + country = ath6kl_regd_find_country_by_rd((u16) reg_code); + if (regpair) + ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n", + regpair->reg_domain); + else + ath6kl_warn("Regpair not found reg_code 0x%0x\n", + reg_code); + } + + if (country && wmi->parent_dev->wiphy_registered) { + alpha2[0] = country->isoName[0]; + alpha2[1] = country->isoName[1]; + + regulatory_hint(wmi->parent_dev->wiphy, alpha2); + + ath6kl_dbg(ATH6KL_DBG_WMI, "Country alpha2 being used: %c%c\n", + alpha2[0], alpha2[1]); + } +} + +static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) +{ + struct wmi_disconnect_event *ev; + wmi->traffic_class = 100; + + if (len < sizeof(struct wmi_disconnect_event)) + return -EINVAL; + + ev = (struct wmi_disconnect_event *) datap; + + ath6kl_dbg(ATH6KL_DBG_WMI, + "wmi event disconnect proto_reason %d bssid %pM wmi_reason %d assoc_resp_len %d\n", + le16_to_cpu(ev->proto_reason_status), ev->bssid, + ev->disconn_reason, ev->assoc_resp_len); + + wmi->is_wmm_enabled = false; + + ath6kl_disconnect_event(vif, ev->disconn_reason, + ev->bssid, ev->assoc_resp_len, ev->assoc_info, + le16_to_cpu(ev->proto_reason_status)); + + return 0; +} + +static int ath6kl_wmi_peer_node_event_rx(struct wmi *wmi, u8 *datap, int len) +{ + struct wmi_peer_node_event *ev; + + if (len < sizeof(struct wmi_peer_node_event)) + return -EINVAL; + + ev = (struct wmi_peer_node_event *) datap; + + if (ev->event_code == PEER_NODE_JOIN_EVENT) + ath6kl_dbg(ATH6KL_DBG_WMI, "joined node with mac addr: %pM\n", + ev->peer_mac_addr); + else if (ev->event_code == PEER_NODE_LEAVE_EVENT) + ath6kl_dbg(ATH6KL_DBG_WMI, "left node with mac addr: %pM\n", + ev->peer_mac_addr); + + return 0; +} + +static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) +{ + struct wmi_tkip_micerr_event *ev; + + if (len < sizeof(struct wmi_tkip_micerr_event)) + return -EINVAL; + + ev = (struct wmi_tkip_micerr_event *) datap; + + ath6kl_tkip_micerr_event(vif, ev->key_id, ev->is_mcast); + + return 0; +} + +void ath6kl_wmi_sscan_timer(struct timer_list *t) +{ + struct ath6kl_vif *vif = from_timer(vif, t, sched_scan_timer); + + cfg80211_sched_scan_results(vif->ar->wiphy, 0); +} + +static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) +{ + struct wmi_bss_info_hdr2 *bih; + u8 *buf; + struct ieee80211_channel *channel; + struct ath6kl *ar = wmi->parent_dev; + struct cfg80211_bss *bss; + + if (len <= sizeof(struct wmi_bss_info_hdr2)) + return -EINVAL; + + bih = (struct wmi_bss_info_hdr2 *) datap; + buf = datap + sizeof(struct wmi_bss_info_hdr2); + len -= sizeof(struct wmi_bss_info_hdr2); + + ath6kl_dbg(ATH6KL_DBG_WMI, + "bss info evt - ch %u, snr %d, rssi %d, bssid \"%pM\" " + "frame_type=%d\n", + bih->ch, bih->snr, bih->snr - 95, bih->bssid, + bih->frame_type); + + if (bih->frame_type != BEACON_FTYPE && + bih->frame_type != PROBERESP_FTYPE) + return 0; /* Only update BSS table for now */ + + if (bih->frame_type == BEACON_FTYPE && + test_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags)) { + clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); + ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, + NONE_BSS_FILTER, 0); + } + + channel = ieee80211_get_channel(ar->wiphy, le16_to_cpu(bih->ch)); + if (channel == NULL) + return -EINVAL; + + if (len < 8 + 2 + 2) + return -EINVAL; + + if (bih->frame_type == BEACON_FTYPE && + test_bit(CONNECTED, &vif->flags) && + memcmp(bih->bssid, vif->bssid, ETH_ALEN) == 0) { + const u8 *tim; + tim = cfg80211_find_ie(WLAN_EID_TIM, buf + 8 + 2 + 2, + len - 8 - 2 - 2); + if (tim && tim[1] >= 2) { + vif->assoc_bss_dtim_period = tim[3]; + set_bit(DTIM_PERIOD_AVAIL, &vif->flags); + } + } + + bss = cfg80211_inform_bss(ar->wiphy, channel, + bih->frame_type == BEACON_FTYPE ? + CFG80211_BSS_FTYPE_BEACON : + CFG80211_BSS_FTYPE_PRESP, + bih->bssid, get_unaligned_le64((__le64 *)buf), + get_unaligned_le16(((__le16 *)buf) + 5), + get_unaligned_le16(((__le16 *)buf) + 4), + buf + 8 + 2 + 2, len - 8 - 2 - 2, + (bih->snr - 95) * 100, GFP_ATOMIC); + if (bss == NULL) + return -ENOMEM; + cfg80211_put_bss(ar->wiphy, bss); + + /* + * Firmware doesn't return any event when scheduled scan has + * finished, so we need to use a timer to find out when there are + * no more results. + * + * The timer is started from the first bss info received, otherwise + * the timer would not ever fire if the scan interval is short + * enough. + */ + if (test_bit(SCHED_SCANNING, &vif->flags) && + !timer_pending(&vif->sched_scan_timer)) { + mod_timer(&vif->sched_scan_timer, jiffies + + msecs_to_jiffies(ATH6KL_SCHED_SCAN_RESULT_DELAY)); + } + + return 0; +} + +/* Inactivity timeout of a fatpipe(pstream) at the target */ +static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap, + int len) +{ + struct wmi_pstream_timeout_event *ev; + + if (len < sizeof(struct wmi_pstream_timeout_event)) + return -EINVAL; + + ev = (struct wmi_pstream_timeout_event *) datap; + if (ev->traffic_class >= WMM_NUM_AC) { + ath6kl_err("invalid traffic class: %d\n", ev->traffic_class); + return -EINVAL; + } + + /* + * When the pstream (fat pipe == AC) timesout, it means there were + * no thinStreams within this pstream & it got implicitly created + * due to data flow on this AC. We start the inactivity timer only + * for implicitly created pstream. Just reset the host state. + */ + spin_lock_bh(&wmi->lock); + wmi->stream_exist_for_ac[ev->traffic_class] = 0; + wmi->fat_pipe_exist &= ~(1 << ev->traffic_class); + spin_unlock_bh(&wmi->lock); + + /* Indicate inactivity to driver layer for this fatpipe (pstream) */ + ath6kl_indicate_tx_activity(wmi->parent_dev, ev->traffic_class, false); + + return 0; +} + +static int ath6kl_wmi_bitrate_reply_rx(struct wmi *wmi, u8 *datap, int len) +{ + struct wmi_bit_rate_reply *reply; + u32 index; + + if (len < sizeof(struct wmi_bit_rate_reply)) + return -EINVAL; + + reply = (struct wmi_bit_rate_reply *) datap; + + ath6kl_dbg(ATH6KL_DBG_WMI, "rateindex %d\n", reply->rate_index); + + if (reply->rate_index != (s8) RATE_AUTO) { + index = reply->rate_index & 0x7f; + if (WARN_ON_ONCE(index > (RATE_MCS_7_40 + 1))) + return -EINVAL; + } + + ath6kl_wakeup_event(wmi->parent_dev); + + return 0; +} + +static int ath6kl_wmi_test_rx(struct wmi *wmi, u8 *datap, int len) +{ + ath6kl_tm_rx_event(wmi->parent_dev, datap, len); + + return 0; +} + +static int ath6kl_wmi_ratemask_reply_rx(struct wmi *wmi, u8 *datap, int len) +{ + if (len < sizeof(struct wmi_fix_rates_reply)) + return -EINVAL; + + ath6kl_wakeup_event(wmi->parent_dev); + + return 0; +} + +static int ath6kl_wmi_ch_list_reply_rx(struct wmi *wmi, u8 *datap, int len) +{ + if (len < sizeof(struct wmi_channel_list_reply)) + return -EINVAL; + + ath6kl_wakeup_event(wmi->parent_dev); + + return 0; +} + +static int ath6kl_wmi_tx_pwr_reply_rx(struct wmi *wmi, u8 *datap, int len) +{ + struct wmi_tx_pwr_reply *reply; + + if (len < sizeof(struct wmi_tx_pwr_reply)) + return -EINVAL; + + reply = (struct wmi_tx_pwr_reply *) datap; + ath6kl_txpwr_rx_evt(wmi->parent_dev, reply->dbM); + + return 0; +} + +static int ath6kl_wmi_keepalive_reply_rx(struct wmi *wmi, u8 *datap, int len) +{ + if (len < sizeof(struct wmi_get_keepalive_cmd)) + return -EINVAL; + + ath6kl_wakeup_event(wmi->parent_dev); + + return 0; +} + +static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) +{ + struct wmi_scan_complete_event *ev; + + ev = (struct wmi_scan_complete_event *) datap; + + ath6kl_scan_complete_evt(vif, a_sle32_to_cpu(ev->status)); + wmi->is_probe_ssid = false; + + return 0; +} + +static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap, + int len, struct ath6kl_vif *vif) +{ + struct wmi_neighbor_report_event *ev; + u8 i; + + if (len < sizeof(*ev)) + return -EINVAL; + ev = (struct wmi_neighbor_report_event *) datap; + if (struct_size(ev, neighbor, ev->num_neighbors) > len) { + ath6kl_dbg(ATH6KL_DBG_WMI, + "truncated neighbor event (num=%d len=%d)\n", + ev->num_neighbors, len); + return -EINVAL; + } + for (i = 0; i < ev->num_neighbors; i++) { + ath6kl_dbg(ATH6KL_DBG_WMI, "neighbor %d/%d - %pM 0x%x\n", + i + 1, ev->num_neighbors, ev->neighbor[i].bssid, + ev->neighbor[i].bss_flags); + cfg80211_pmksa_candidate_notify(vif->ndev, i, + ev->neighbor[i].bssid, + !!(ev->neighbor[i].bss_flags & + WMI_PREAUTH_CAPABLE_BSS), + GFP_ATOMIC); + } + + return 0; +} + +/* + * Target is reporting a programming error. This is for + * developer aid only. Target only checks a few common violations + * and it is responsibility of host to do all error checking. + * Behavior of target after wmi error event is undefined. + * A reset is recommended. + */ +static int ath6kl_wmi_error_event_rx(struct wmi *wmi, u8 *datap, int len) +{ + const char *type = "unknown error"; + struct wmi_cmd_error_event *ev; + ev = (struct wmi_cmd_error_event *) datap; + + switch (ev->err_code) { + case INVALID_PARAM: + type = "invalid parameter"; + break; + case ILLEGAL_STATE: + type = "invalid state"; + break; + case INTERNAL_ERROR: + type = "internal error"; + break; + } + + ath6kl_dbg(ATH6KL_DBG_WMI, "programming error, cmd=%d %s\n", + ev->cmd_id, type); + + return 0; +} + +static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) +{ + ath6kl_tgt_stats_event(vif, datap, len); + + return 0; +} + +static u8 ath6kl_wmi_get_upper_threshold(s16 rssi, + struct sq_threshold_params *sq_thresh, + u32 size) +{ + u32 index; + u8 threshold = (u8) sq_thresh->upper_threshold[size - 1]; + + /* The list is already in sorted order. Get the next lower value */ + for (index = 0; index < size; index++) { + if (rssi < sq_thresh->upper_threshold[index]) { + threshold = (u8) sq_thresh->upper_threshold[index]; + break; + } + } + + return threshold; +} + +static u8 ath6kl_wmi_get_lower_threshold(s16 rssi, + struct sq_threshold_params *sq_thresh, + u32 size) +{ + u32 index; + u8 threshold = (u8) sq_thresh->lower_threshold[size - 1]; + + /* The list is already in sorted order. Get the next lower value */ + for (index = 0; index < size; index++) { + if (rssi > sq_thresh->lower_threshold[index]) { + threshold = (u8) sq_thresh->lower_threshold[index]; + break; + } + } + + return threshold; +} + +static int ath6kl_wmi_send_rssi_threshold_params(struct wmi *wmi, + struct wmi_rssi_threshold_params_cmd *rssi_cmd) +{ + struct sk_buff *skb; + struct wmi_rssi_threshold_params_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data; + memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd)); + + return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID, + NO_SYNC_WMIFLAG); +} + +static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap, + int len) +{ + struct wmi_rssi_threshold_event *reply; + struct wmi_rssi_threshold_params_cmd cmd; + struct sq_threshold_params *sq_thresh; + enum wmi_rssi_threshold_val new_threshold; + u8 upper_rssi_threshold, lower_rssi_threshold; + s16 rssi; + int ret; + + if (len < sizeof(struct wmi_rssi_threshold_event)) + return -EINVAL; + + reply = (struct wmi_rssi_threshold_event *) datap; + new_threshold = (enum wmi_rssi_threshold_val) reply->range; + rssi = a_sle16_to_cpu(reply->rssi); + + sq_thresh = &wmi->sq_threshld[SIGNAL_QUALITY_METRICS_RSSI]; + + /* + * Identify the threshold breached and communicate that to the app. + * After that install a new set of thresholds based on the signal + * quality reported by the target + */ + if (new_threshold) { + /* Upper threshold breached */ + if (rssi < sq_thresh->upper_threshold[0]) { + ath6kl_dbg(ATH6KL_DBG_WMI, + "spurious upper rssi threshold event: %d\n", + rssi); + } else if ((rssi < sq_thresh->upper_threshold[1]) && + (rssi >= sq_thresh->upper_threshold[0])) { + new_threshold = WMI_RSSI_THRESHOLD1_ABOVE; + } else if ((rssi < sq_thresh->upper_threshold[2]) && + (rssi >= sq_thresh->upper_threshold[1])) { + new_threshold = WMI_RSSI_THRESHOLD2_ABOVE; + } else if ((rssi < sq_thresh->upper_threshold[3]) && + (rssi >= sq_thresh->upper_threshold[2])) { + new_threshold = WMI_RSSI_THRESHOLD3_ABOVE; + } else if ((rssi < sq_thresh->upper_threshold[4]) && + (rssi >= sq_thresh->upper_threshold[3])) { + new_threshold = WMI_RSSI_THRESHOLD4_ABOVE; + } else if ((rssi < sq_thresh->upper_threshold[5]) && + (rssi >= sq_thresh->upper_threshold[4])) { + new_threshold = WMI_RSSI_THRESHOLD5_ABOVE; + } else if (rssi >= sq_thresh->upper_threshold[5]) { + new_threshold = WMI_RSSI_THRESHOLD6_ABOVE; + } + } else { + /* Lower threshold breached */ + if (rssi > sq_thresh->lower_threshold[0]) { + ath6kl_dbg(ATH6KL_DBG_WMI, + "spurious lower rssi threshold event: %d %d\n", + rssi, sq_thresh->lower_threshold[0]); + } else if ((rssi > sq_thresh->lower_threshold[1]) && + (rssi <= sq_thresh->lower_threshold[0])) { + new_threshold = WMI_RSSI_THRESHOLD6_BELOW; + } else if ((rssi > sq_thresh->lower_threshold[2]) && + (rssi <= sq_thresh->lower_threshold[1])) { + new_threshold = WMI_RSSI_THRESHOLD5_BELOW; + } else if ((rssi > sq_thresh->lower_threshold[3]) && + (rssi <= sq_thresh->lower_threshold[2])) { + new_threshold = WMI_RSSI_THRESHOLD4_BELOW; + } else if ((rssi > sq_thresh->lower_threshold[4]) && + (rssi <= sq_thresh->lower_threshold[3])) { + new_threshold = WMI_RSSI_THRESHOLD3_BELOW; + } else if ((rssi > sq_thresh->lower_threshold[5]) && + (rssi <= sq_thresh->lower_threshold[4])) { + new_threshold = WMI_RSSI_THRESHOLD2_BELOW; + } else if (rssi <= sq_thresh->lower_threshold[5]) { + new_threshold = WMI_RSSI_THRESHOLD1_BELOW; + } + } + + /* Calculate and install the next set of thresholds */ + lower_rssi_threshold = ath6kl_wmi_get_lower_threshold(rssi, sq_thresh, + sq_thresh->lower_threshold_valid_count); + upper_rssi_threshold = ath6kl_wmi_get_upper_threshold(rssi, sq_thresh, + sq_thresh->upper_threshold_valid_count); + + /* Issue a wmi command to install the thresholds */ + cmd.thresh_above1_val = a_cpu_to_sle16(upper_rssi_threshold); + cmd.thresh_below1_val = a_cpu_to_sle16(lower_rssi_threshold); + cmd.weight = sq_thresh->weight; + cmd.poll_time = cpu_to_le32(sq_thresh->polling_interval); + + ret = ath6kl_wmi_send_rssi_threshold_params(wmi, &cmd); + if (ret) { + ath6kl_err("unable to configure rssi thresholds\n"); + return -EIO; + } + + return 0; +} + +static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) +{ + struct wmi_cac_event *reply; + struct ieee80211_tspec_ie *ts; + u16 active_tsids, tsinfo; + u8 tsid, index; + u8 ts_id; + + if (len < sizeof(struct wmi_cac_event)) + return -EINVAL; + + reply = (struct wmi_cac_event *) datap; + if (reply->ac >= WMM_NUM_AC) { + ath6kl_err("invalid AC: %d\n", reply->ac); + return -EINVAL; + } + + if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) && + (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) { + ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion); + tsinfo = le16_to_cpu(ts->tsinfo); + tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) & + IEEE80211_WMM_IE_TSPEC_TID_MASK; + + ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx, + reply->ac, tsid); + } else if (reply->cac_indication == CAC_INDICATION_NO_RESP) { + /* + * Following assumes that there is only one outstanding + * ADDTS request when this event is received + */ + spin_lock_bh(&wmi->lock); + active_tsids = wmi->stream_exist_for_ac[reply->ac]; + spin_unlock_bh(&wmi->lock); + + for (index = 0; index < sizeof(active_tsids) * 8; index++) { + if ((active_tsids >> index) & 1) + break; + } + if (index < (sizeof(active_tsids) * 8)) + ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx, + reply->ac, index); + } + + /* + * Clear active tsids and Add missing handling + * for delete qos stream from AP + */ + else if (reply->cac_indication == CAC_INDICATION_DELETE) { + ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion); + tsinfo = le16_to_cpu(ts->tsinfo); + ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) & + IEEE80211_WMM_IE_TSPEC_TID_MASK); + + spin_lock_bh(&wmi->lock); + wmi->stream_exist_for_ac[reply->ac] &= ~(1 << ts_id); + active_tsids = wmi->stream_exist_for_ac[reply->ac]; + spin_unlock_bh(&wmi->lock); + + /* Indicate stream inactivity to driver layer only if all tsids + * within this AC are deleted. + */ + if (!active_tsids) { + ath6kl_indicate_tx_activity(wmi->parent_dev, reply->ac, + false); + wmi->fat_pipe_exist &= ~(1 << reply->ac); + } + } + + return 0; +} + +static int ath6kl_wmi_txe_notify_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) +{ + struct wmi_txe_notify_event *ev; + u32 rate, pkts; + + if (len < sizeof(*ev)) + return -EINVAL; + + if (vif->nw_type != INFRA_NETWORK || + !test_bit(ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, + vif->ar->fw_capabilities)) + return -EOPNOTSUPP; + + if (vif->sme_state != SME_CONNECTED) + return -ENOTCONN; + + ev = (struct wmi_txe_notify_event *) datap; + rate = le32_to_cpu(ev->rate); + pkts = le32_to_cpu(ev->pkts); + + ath6kl_dbg(ATH6KL_DBG_WMI, "TXE notify event: peer %pM rate %d%% pkts %d intvl %ds\n", + vif->bssid, rate, pkts, vif->txe_intvl); + + cfg80211_cqm_txe_notify(vif->ndev, vif->bssid, pkts, + rate, vif->txe_intvl, GFP_KERNEL); + + return 0; +} + +int ath6kl_wmi_set_txe_notify(struct wmi *wmi, u8 idx, + u32 rate, u32 pkts, u32 intvl) +{ + struct sk_buff *skb; + struct wmi_txe_notify_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_txe_notify_cmd *) skb->data; + cmd->rate = cpu_to_le32(rate); + cmd->pkts = cpu_to_le32(pkts); + cmd->intvl = cpu_to_le32(intvl); + + return ath6kl_wmi_cmd_send(wmi, idx, skb, WMI_SET_TXE_NOTIFY_CMDID, + NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi) +{ + struct sk_buff *skb; + struct wmi_set_rssi_filter_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_rssi_filter_cmd *) skb->data; + cmd->rssi = rssi; + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_RSSI_FILTER_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi, + struct wmi_snr_threshold_params_cmd *snr_cmd) +{ + struct sk_buff *skb; + struct wmi_snr_threshold_params_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_snr_threshold_params_cmd *) skb->data; + memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd)); + + return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID, + NO_SYNC_WMIFLAG); +} + +static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap, + int len) +{ + struct wmi_snr_threshold_event *reply; + struct sq_threshold_params *sq_thresh; + struct wmi_snr_threshold_params_cmd cmd; + enum wmi_snr_threshold_val new_threshold; + u8 upper_snr_threshold, lower_snr_threshold; + s16 snr; + int ret; + + if (len < sizeof(struct wmi_snr_threshold_event)) + return -EINVAL; + + reply = (struct wmi_snr_threshold_event *) datap; + + new_threshold = (enum wmi_snr_threshold_val) reply->range; + snr = reply->snr; + + sq_thresh = &wmi->sq_threshld[SIGNAL_QUALITY_METRICS_SNR]; + + /* + * Identify the threshold breached and communicate that to the app. + * After that install a new set of thresholds based on the signal + * quality reported by the target. + */ + if (new_threshold) { + /* Upper threshold breached */ + if (snr < sq_thresh->upper_threshold[0]) { + ath6kl_dbg(ATH6KL_DBG_WMI, + "spurious upper snr threshold event: %d\n", + snr); + } else if ((snr < sq_thresh->upper_threshold[1]) && + (snr >= sq_thresh->upper_threshold[0])) { + new_threshold = WMI_SNR_THRESHOLD1_ABOVE; + } else if ((snr < sq_thresh->upper_threshold[2]) && + (snr >= sq_thresh->upper_threshold[1])) { + new_threshold = WMI_SNR_THRESHOLD2_ABOVE; + } else if ((snr < sq_thresh->upper_threshold[3]) && + (snr >= sq_thresh->upper_threshold[2])) { + new_threshold = WMI_SNR_THRESHOLD3_ABOVE; + } else if (snr >= sq_thresh->upper_threshold[3]) { + new_threshold = WMI_SNR_THRESHOLD4_ABOVE; + } + } else { + /* Lower threshold breached */ + if (snr > sq_thresh->lower_threshold[0]) { + ath6kl_dbg(ATH6KL_DBG_WMI, + "spurious lower snr threshold event: %d\n", + sq_thresh->lower_threshold[0]); + } else if ((snr > sq_thresh->lower_threshold[1]) && + (snr <= sq_thresh->lower_threshold[0])) { + new_threshold = WMI_SNR_THRESHOLD4_BELOW; + } else if ((snr > sq_thresh->lower_threshold[2]) && + (snr <= sq_thresh->lower_threshold[1])) { + new_threshold = WMI_SNR_THRESHOLD3_BELOW; + } else if ((snr > sq_thresh->lower_threshold[3]) && + (snr <= sq_thresh->lower_threshold[2])) { + new_threshold = WMI_SNR_THRESHOLD2_BELOW; + } else if (snr <= sq_thresh->lower_threshold[3]) { + new_threshold = WMI_SNR_THRESHOLD1_BELOW; + } + } + + /* Calculate and install the next set of thresholds */ + lower_snr_threshold = ath6kl_wmi_get_lower_threshold(snr, sq_thresh, + sq_thresh->lower_threshold_valid_count); + upper_snr_threshold = ath6kl_wmi_get_upper_threshold(snr, sq_thresh, + sq_thresh->upper_threshold_valid_count); + + /* Issue a wmi command to install the thresholds */ + cmd.thresh_above1_val = upper_snr_threshold; + cmd.thresh_below1_val = lower_snr_threshold; + cmd.weight = sq_thresh->weight; + cmd.poll_time = cpu_to_le32(sq_thresh->polling_interval); + + ath6kl_dbg(ATH6KL_DBG_WMI, + "snr: %d, threshold: %d, lower: %d, upper: %d\n", + snr, new_threshold, + lower_snr_threshold, upper_snr_threshold); + + ret = ath6kl_wmi_send_snr_threshold_params(wmi, &cmd); + if (ret) { + ath6kl_err("unable to configure snr threshold\n"); + return -EIO; + } + + return 0; +} + +static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len) +{ + struct wmi_aplist_event *ev = (struct wmi_aplist_event *) datap; + struct wmi_ap_info_v1 *ap_info_v1; + u8 index; + + if (len < sizeof(struct wmi_aplist_event) || + ev->ap_list_ver != APLIST_VER1) + return -EINVAL; + + ap_info_v1 = (struct wmi_ap_info_v1 *) ev->ap_list; + + ath6kl_dbg(ATH6KL_DBG_WMI, + "number of APs in aplist event: %d\n", ev->num_ap); + + if (len < struct_size(ev, ap_list, ev->num_ap)) + return -EINVAL; + + /* AP list version 1 contents */ + for (index = 0; index < ev->num_ap; index++) { + ath6kl_dbg(ATH6KL_DBG_WMI, "AP#%d BSSID %pM Channel %d\n", + index, ap_info_v1->bssid, ap_info_v1->channel); + ap_info_v1++; + } + + return 0; +} + +int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb, + enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag) +{ + struct wmi_cmd_hdr *cmd_hdr; + enum htc_endpoint_id ep_id = wmi->ep_id; + int ret; + u16 info1; + + if (WARN_ON(skb == NULL || + (if_idx > (wmi->parent_dev->vif_max - 1)))) { + dev_kfree_skb(skb); + return -EINVAL; + } + + ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n", + cmd_id, skb->len, sync_flag); + ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi tx ", + skb->data, skb->len); + + if (sync_flag >= END_WMIFLAG) { + dev_kfree_skb(skb); + return -EINVAL; + } + + if ((sync_flag == SYNC_BEFORE_WMIFLAG) || + (sync_flag == SYNC_BOTH_WMIFLAG)) { + /* + * Make sure all data currently queued is transmitted before + * the cmd execution. Establish a new sync point. + */ + ath6kl_wmi_sync_point(wmi, if_idx); + } + + skb_push(skb, sizeof(struct wmi_cmd_hdr)); + + cmd_hdr = (struct wmi_cmd_hdr *) skb->data; + cmd_hdr->cmd_id = cpu_to_le16(cmd_id); + info1 = if_idx & WMI_CMD_HDR_IF_ID_MASK; + cmd_hdr->info1 = cpu_to_le16(info1); + + /* Only for OPT_TX_CMD, use BE endpoint. */ + if (cmd_id == WMI_OPT_TX_FRAME_CMDID) { + ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE, false, + WMI_DATA_HDR_DATA_TYPE_802_3, 0, NULL, if_idx); + if (ret) { + dev_kfree_skb(skb); + return ret; + } + ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev, WMM_AC_BE); + } + + ath6kl_control_tx(wmi->parent_dev, skb, ep_id); + + if ((sync_flag == SYNC_AFTER_WMIFLAG) || + (sync_flag == SYNC_BOTH_WMIFLAG)) { + /* + * Make sure all new data queued waits for the command to + * execute. Establish a new sync point. + */ + ath6kl_wmi_sync_point(wmi, if_idx); + } + + return 0; +} + +int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx, + enum network_type nw_type, + enum dot11_auth_mode dot11_auth_mode, + enum auth_mode auth_mode, + enum ath6kl_crypto_type pairwise_crypto, + u8 pairwise_crypto_len, + enum ath6kl_crypto_type group_crypto, + u8 group_crypto_len, int ssid_len, u8 *ssid, + u8 *bssid, u16 channel, u32 ctrl_flags, + u8 nw_subtype) +{ + struct sk_buff *skb; + struct wmi_connect_cmd *cc; + int ret; + + ath6kl_dbg(ATH6KL_DBG_WMI, + "wmi connect bssid %pM freq %d flags 0x%x ssid_len %d " + "type %d dot11_auth %d auth %d pairwise %d group %d\n", + bssid, channel, ctrl_flags, ssid_len, nw_type, + dot11_auth_mode, auth_mode, pairwise_crypto, group_crypto); + ath6kl_dbg_dump(ATH6KL_DBG_WMI, NULL, "ssid ", ssid, ssid_len); + + wmi->traffic_class = 100; + + if ((pairwise_crypto == NONE_CRYPT) && (group_crypto != NONE_CRYPT)) + return -EINVAL; + + if ((pairwise_crypto != NONE_CRYPT) && (group_crypto == NONE_CRYPT)) + return -EINVAL; + + skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_connect_cmd)); + if (!skb) + return -ENOMEM; + + cc = (struct wmi_connect_cmd *) skb->data; + + if (ssid_len) + memcpy(cc->ssid, ssid, ssid_len); + + cc->ssid_len = ssid_len; + cc->nw_type = nw_type; + cc->dot11_auth_mode = dot11_auth_mode; + cc->auth_mode = auth_mode; + cc->prwise_crypto_type = pairwise_crypto; + cc->prwise_crypto_len = pairwise_crypto_len; + cc->grp_crypto_type = group_crypto; + cc->grp_crypto_len = group_crypto_len; + cc->ch = cpu_to_le16(channel); + cc->ctrl_flags = cpu_to_le32(ctrl_flags); + cc->nw_subtype = nw_subtype; + + if (bssid != NULL) + memcpy(cc->bssid, bssid, ETH_ALEN); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CONNECT_CMDID, + NO_SYNC_WMIFLAG); + + return ret; +} + +int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid, + u16 channel) +{ + struct sk_buff *skb; + struct wmi_reconnect_cmd *cc; + int ret; + + ath6kl_dbg(ATH6KL_DBG_WMI, "wmi reconnect bssid %pM freq %d\n", + bssid, channel); + + wmi->traffic_class = 100; + + skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_reconnect_cmd)); + if (!skb) + return -ENOMEM; + + cc = (struct wmi_reconnect_cmd *) skb->data; + cc->channel = cpu_to_le16(channel); + + if (bssid != NULL) + memcpy(cc->bssid, bssid, ETH_ALEN); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RECONNECT_CMDID, + NO_SYNC_WMIFLAG); + + return ret; +} + +int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx) +{ + int ret; + + ath6kl_dbg(ATH6KL_DBG_WMI, "wmi disconnect\n"); + + wmi->traffic_class = 100; + + /* Disconnect command does not need to do a SYNC before. */ + ret = ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_DISCONNECT_CMDID); + + return ret; +} + +/* ath6kl_wmi_start_scan_cmd is to be deprecated. Use + * ath6kl_wmi_begin_scan_cmd instead. The new function supports P2P + * mgmt operations using station interface. + */ +static int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx, + enum wmi_scan_type scan_type, + u32 force_fgscan, u32 is_legacy, + u32 home_dwell_time, + u32 force_scan_interval, + s8 num_chan, u16 *ch_list) +{ + struct sk_buff *skb; + struct wmi_start_scan_cmd *sc; + int i, ret; + + if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN)) + return -EINVAL; + + if (num_chan > WMI_MAX_CHANNELS) + return -EINVAL; + + skb = ath6kl_wmi_get_new_buf(struct_size(sc, ch_list, num_chan)); + if (!skb) + return -ENOMEM; + + sc = (struct wmi_start_scan_cmd *) skb->data; + sc->scan_type = scan_type; + sc->force_fg_scan = cpu_to_le32(force_fgscan); + sc->is_legacy = cpu_to_le32(is_legacy); + sc->home_dwell_time = cpu_to_le32(home_dwell_time); + sc->force_scan_intvl = cpu_to_le32(force_scan_interval); + sc->num_ch = num_chan; + + for (i = 0; i < num_chan; i++) + sc->ch_list[i] = cpu_to_le16(ch_list[i]); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID, + NO_SYNC_WMIFLAG); + + return ret; +} + +/* + * beginscan supports (compared to old startscan) P2P mgmt operations using + * station interface, send additional information like supported rates to + * advertise and xmit rates for probe requests + */ +int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx, + enum wmi_scan_type scan_type, + u32 force_fgscan, u32 is_legacy, + u32 home_dwell_time, u32 force_scan_interval, + s8 num_chan, u16 *ch_list, u32 no_cck, u32 *rates) +{ + struct ieee80211_supported_band *sband; + struct sk_buff *skb; + struct wmi_begin_scan_cmd *sc; + s8 *supp_rates; + int i, band, ret; + struct ath6kl *ar = wmi->parent_dev; + int num_rates; + u32 ratemask; + + if (!test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, + ar->fw_capabilities)) { + return ath6kl_wmi_startscan_cmd(wmi, if_idx, + scan_type, force_fgscan, + is_legacy, home_dwell_time, + force_scan_interval, + num_chan, ch_list); + } + + if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN)) + return -EINVAL; + + if (num_chan > WMI_MAX_CHANNELS) + return -EINVAL; + + skb = ath6kl_wmi_get_new_buf(struct_size(sc, ch_list, num_chan)); + if (!skb) + return -ENOMEM; + + sc = (struct wmi_begin_scan_cmd *) skb->data; + sc->scan_type = scan_type; + sc->force_fg_scan = cpu_to_le32(force_fgscan); + sc->is_legacy = cpu_to_le32(is_legacy); + sc->home_dwell_time = cpu_to_le32(home_dwell_time); + sc->force_scan_intvl = cpu_to_le32(force_scan_interval); + sc->no_cck = cpu_to_le32(no_cck); + sc->num_ch = num_chan; + + for (band = 0; band < NUM_NL80211_BANDS; band++) { + sband = ar->wiphy->bands[band]; + + if (!sband) + continue; + + if (WARN_ON(band >= ATH6KL_NUM_BANDS)) + break; + + ratemask = rates[band]; + supp_rates = sc->supp_rates[band].rates; + num_rates = 0; + + for (i = 0; i < sband->n_bitrates; i++) { + if ((BIT(i) & ratemask) == 0) + continue; /* skip rate */ + supp_rates[num_rates++] = + (u8) (sband->bitrates[i].bitrate / 5); + } + sc->supp_rates[band].nrates = num_rates; + } + + for (i = 0; i < num_chan; i++) + sc->ch_list[i] = cpu_to_le16(ch_list[i]); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_BEGIN_SCAN_CMDID, + NO_SYNC_WMIFLAG); + + return ret; +} + +int ath6kl_wmi_enable_sched_scan_cmd(struct wmi *wmi, u8 if_idx, bool enable) +{ + struct sk_buff *skb; + struct wmi_enable_sched_scan_cmd *sc; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*sc)); + if (!skb) + return -ENOMEM; + + ath6kl_dbg(ATH6KL_DBG_WMI, "%s scheduled scan on vif %d\n", + enable ? "enabling" : "disabling", if_idx); + sc = (struct wmi_enable_sched_scan_cmd *) skb->data; + sc->enable = enable ? 1 : 0; + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, + WMI_ENABLE_SCHED_SCAN_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, + u16 fg_start_sec, + u16 fg_end_sec, u16 bg_sec, + u16 minact_chdw_msec, u16 maxact_chdw_msec, + u16 pas_chdw_msec, u8 short_scan_ratio, + u8 scan_ctrl_flag, u32 max_dfsch_act_time, + u16 maxact_scan_per_ssid) +{ + struct sk_buff *skb; + struct wmi_scan_params_cmd *sc; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*sc)); + if (!skb) + return -ENOMEM; + + sc = (struct wmi_scan_params_cmd *) skb->data; + sc->fg_start_period = cpu_to_le16(fg_start_sec); + sc->fg_end_period = cpu_to_le16(fg_end_sec); + sc->bg_period = cpu_to_le16(bg_sec); + sc->minact_chdwell_time = cpu_to_le16(minact_chdw_msec); + sc->maxact_chdwell_time = cpu_to_le16(maxact_chdw_msec); + sc->pas_chdwell_time = cpu_to_le16(pas_chdw_msec); + sc->short_scan_ratio = short_scan_ratio; + sc->scan_ctrl_flags = scan_ctrl_flag; + sc->max_dfsch_act_time = cpu_to_le32(max_dfsch_act_time); + sc->maxact_scan_per_ssid = cpu_to_le16(maxact_scan_per_ssid); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_SCAN_PARAMS_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter, u32 ie_mask) +{ + struct sk_buff *skb; + struct wmi_bss_filter_cmd *cmd; + int ret; + + if (filter >= LAST_BSS_FILTER) + return -EINVAL; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_bss_filter_cmd *) skb->data; + cmd->bss_filter = filter; + cmd->ie_mask = cpu_to_le32(ie_mask); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BSS_FILTER_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag, + u8 ssid_len, u8 *ssid) +{ + struct sk_buff *skb; + struct wmi_probed_ssid_cmd *cmd; + int ret; + + if (index >= MAX_PROBED_SSIDS) + return -EINVAL; + + if (ssid_len > sizeof(cmd->ssid)) + return -EINVAL; + + if ((flag & (DISABLE_SSID_FLAG | ANY_SSID_FLAG)) && (ssid_len > 0)) + return -EINVAL; + + if ((flag & SPECIFIC_SSID_FLAG) && !ssid_len) + return -EINVAL; + + if (flag & SPECIFIC_SSID_FLAG) + wmi->is_probe_ssid = true; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_probed_ssid_cmd *) skb->data; + cmd->entry_index = index; + cmd->flag = flag; + cmd->ssid_len = ssid_len; + memcpy(cmd->ssid, ssid, ssid_len); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PROBED_SSID_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx, + u16 listen_interval, + u16 listen_beacons) +{ + struct sk_buff *skb; + struct wmi_listen_int_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_listen_int_cmd *) skb->data; + cmd->listen_intvl = cpu_to_le16(listen_interval); + cmd->num_beacons = cpu_to_le16(listen_beacons); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LISTEN_INT_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_bmisstime_cmd(struct wmi *wmi, u8 if_idx, + u16 bmiss_time, u16 num_beacons) +{ + struct sk_buff *skb; + struct wmi_bmiss_time_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_bmiss_time_cmd *) skb->data; + cmd->bmiss_time = cpu_to_le16(bmiss_time); + cmd->num_beacons = cpu_to_le16(num_beacons); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BMISS_TIME_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode) +{ + struct sk_buff *skb; + struct wmi_power_mode_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_power_mode_cmd *) skb->data; + cmd->pwr_mode = pwr_mode; + wmi->pwr_mode = pwr_mode; + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_MODE_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period, + u16 ps_poll_num, u16 dtim_policy, + u16 tx_wakeup_policy, u16 num_tx_to_wakeup, + u16 ps_fail_event_policy) +{ + struct sk_buff *skb; + struct wmi_power_params_cmd *pm; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*pm)); + if (!skb) + return -ENOMEM; + + pm = (struct wmi_power_params_cmd *)skb->data; + pm->idle_period = cpu_to_le16(idle_period); + pm->pspoll_number = cpu_to_le16(ps_poll_num); + pm->dtim_policy = cpu_to_le16(dtim_policy); + pm->tx_wakeup_policy = cpu_to_le16(tx_wakeup_policy); + pm->num_tx_to_wakeup = cpu_to_le16(num_tx_to_wakeup); + pm->ps_fail_event_policy = cpu_to_le16(ps_fail_event_policy); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_PARAMS_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout) +{ + struct sk_buff *skb; + struct wmi_disc_timeout_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_disc_timeout_cmd *) skb->data; + cmd->discon_timeout = timeout; + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_DISC_TIMEOUT_CMDID, + NO_SYNC_WMIFLAG); + + if (ret == 0) + ath6kl_debug_set_disconnect_timeout(wmi->parent_dev, timeout); + + return ret; +} + +int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index, + enum ath6kl_crypto_type key_type, + u8 key_usage, u8 key_len, + u8 *key_rsc, unsigned int key_rsc_len, + u8 *key_material, + u8 key_op_ctrl, u8 *mac_addr, + enum wmi_sync_flag sync_flag) +{ + struct sk_buff *skb; + struct wmi_add_cipher_key_cmd *cmd; + int ret; + + ath6kl_dbg(ATH6KL_DBG_WMI, + "addkey cmd: key_index=%u key_type=%d key_usage=%d key_len=%d key_op_ctrl=%d\n", + key_index, key_type, key_usage, key_len, key_op_ctrl); + + if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) || + (key_material == NULL) || key_rsc_len > 8) + return -EINVAL; + + if ((WEP_CRYPT != key_type) && (NULL == key_rsc)) + return -EINVAL; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_add_cipher_key_cmd *) skb->data; + cmd->key_index = key_index; + cmd->key_type = key_type; + cmd->key_usage = key_usage; + cmd->key_len = key_len; + memcpy(cmd->key, key_material, key_len); + + if (key_rsc != NULL) + memcpy(cmd->key_rsc, key_rsc, key_rsc_len); + + cmd->key_op_ctrl = key_op_ctrl; + + if (mac_addr) + memcpy(cmd->key_mac_addr, mac_addr, ETH_ALEN); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_CIPHER_KEY_CMDID, + sync_flag); + + return ret; +} + +int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, const u8 *krk) +{ + struct sk_buff *skb; + struct wmi_add_krk_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_add_krk_cmd *) skb->data; + memcpy(cmd->krk, krk, WMI_KRK_LEN); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_KRK_CMDID, + NO_SYNC_WMIFLAG); + + return ret; +} + +int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index) +{ + struct sk_buff *skb; + struct wmi_delete_cipher_key_cmd *cmd; + int ret; + + if (key_index > WMI_MAX_KEY_INDEX) + return -EINVAL; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_delete_cipher_key_cmd *) skb->data; + cmd->key_index = key_index; + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_CIPHER_KEY_CMDID, + NO_SYNC_WMIFLAG); + + return ret; +} + +int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid, + const u8 *pmkid, bool set) +{ + struct sk_buff *skb; + struct wmi_setpmkid_cmd *cmd; + int ret; + + if (bssid == NULL) + return -EINVAL; + + if (set && pmkid == NULL) + return -EINVAL; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_setpmkid_cmd *) skb->data; + memcpy(cmd->bssid, bssid, ETH_ALEN); + if (set) { + memcpy(cmd->pmkid, pmkid, sizeof(cmd->pmkid)); + cmd->enable = PMKID_ENABLE; + } else { + memset(cmd->pmkid, 0, sizeof(cmd->pmkid)); + cmd->enable = PMKID_DISABLE; + } + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PMKID_CMDID, + NO_SYNC_WMIFLAG); + + return ret; +} + +static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb, + enum htc_endpoint_id ep_id, u8 if_idx) +{ + struct wmi_data_hdr *data_hdr; + int ret; + + if (WARN_ON(skb == NULL || ep_id == wmi->ep_id)) { + dev_kfree_skb(skb); + return -EINVAL; + } + + skb_push(skb, sizeof(struct wmi_data_hdr)); + + data_hdr = (struct wmi_data_hdr *) skb->data; + data_hdr->info = SYNC_MSGTYPE << WMI_DATA_HDR_MSG_TYPE_SHIFT; + data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK); + + ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id); + + return ret; +} + +static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx) +{ + struct sk_buff *skb; + struct wmi_sync_cmd *cmd; + struct wmi_data_sync_bufs data_sync_bufs[WMM_NUM_AC]; + enum htc_endpoint_id ep_id; + u8 index, num_pri_streams = 0; + int ret = 0; + + memset(data_sync_bufs, 0, sizeof(data_sync_bufs)); + + spin_lock_bh(&wmi->lock); + + for (index = 0; index < WMM_NUM_AC; index++) { + if (wmi->fat_pipe_exist & (1 << index)) { + num_pri_streams++; + data_sync_bufs[num_pri_streams - 1].traffic_class = + index; + } + } + + spin_unlock_bh(&wmi->lock); + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_sync_cmd *) skb->data; + + /* + * In the SYNC cmd sent on the control Ep, send a bitmap + * of the data eps on which the Data Sync will be sent + */ + cmd->data_sync_map = wmi->fat_pipe_exist; + + for (index = 0; index < num_pri_streams; index++) { + data_sync_bufs[index].skb = ath6kl_buf_alloc(0); + if (data_sync_bufs[index].skb == NULL) { + ret = -ENOMEM; + break; + } + } + + /* + * If buffer allocation for any of the dataSync fails, + * then do not send the Synchronize cmd on the control ep + */ + if (ret) + goto free_cmd_skb; + + /* + * Send sync cmd followed by sync data messages on all + * endpoints being used + */ + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SYNCHRONIZE_CMDID, + NO_SYNC_WMIFLAG); + + if (ret) + goto free_data_skb; + + for (index = 0; index < num_pri_streams; index++) { + if (WARN_ON(!data_sync_bufs[index].skb)) { + ret = -ENOMEM; + goto free_data_skb; + } + + ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev, + data_sync_bufs[index]. + traffic_class); + ret = + ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb, + ep_id, if_idx); + + data_sync_bufs[index].skb = NULL; + + if (ret) + goto free_data_skb; + } + + return 0; + +free_cmd_skb: + /* free up any resources left over (possibly due to an error) */ + dev_kfree_skb(skb); + +free_data_skb: + for (index = 0; index < num_pri_streams; index++) + dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].skb); + + return ret; +} + +int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx, + struct wmi_create_pstream_cmd *params) +{ + struct sk_buff *skb; + struct wmi_create_pstream_cmd *cmd; + u8 fatpipe_exist_for_ac = 0; + s32 min_phy = 0; + s32 nominal_phy = 0; + int ret; + + if (!((params->user_pri <= 0x7) && + (up_to_ac[params->user_pri & 0x7] == params->traffic_class) && + (params->traffic_direc == UPLINK_TRAFFIC || + params->traffic_direc == DNLINK_TRAFFIC || + params->traffic_direc == BIDIR_TRAFFIC) && + (params->traffic_type == TRAFFIC_TYPE_APERIODIC || + params->traffic_type == TRAFFIC_TYPE_PERIODIC) && + (params->voice_psc_cap == DISABLE_FOR_THIS_AC || + params->voice_psc_cap == ENABLE_FOR_THIS_AC || + params->voice_psc_cap == ENABLE_FOR_ALL_AC) && + (params->tsid == WMI_IMPLICIT_PSTREAM || + params->tsid <= WMI_MAX_THINSTREAM))) { + return -EINVAL; + } + + /* + * Check nominal PHY rate is >= minimalPHY, + * so that DUT can allow TSRS IE + */ + + /* Get the physical rate (units of bps) */ + min_phy = ((le32_to_cpu(params->min_phy_rate) / 1000) / 1000); + + /* Check minimal phy < nominal phy rate */ + if (params->nominal_phy >= min_phy) { + /* unit of 500 kbps */ + nominal_phy = (params->nominal_phy * 1000) / 500; + ath6kl_dbg(ATH6KL_DBG_WMI, + "TSRS IE enabled::MinPhy %x->NominalPhy ===> %x\n", + min_phy, nominal_phy); + + params->nominal_phy = nominal_phy; + } else { + params->nominal_phy = 0; + } + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + ath6kl_dbg(ATH6KL_DBG_WMI, + "sending create_pstream_cmd: ac=%d tsid:%d\n", + params->traffic_class, params->tsid); + + cmd = (struct wmi_create_pstream_cmd *) skb->data; + memcpy(cmd, params, sizeof(*cmd)); + + /* This is an implicitly created Fat pipe */ + if ((u32) params->tsid == (u32) WMI_IMPLICIT_PSTREAM) { + spin_lock_bh(&wmi->lock); + fatpipe_exist_for_ac = (wmi->fat_pipe_exist & + (1 << params->traffic_class)); + wmi->fat_pipe_exist |= (1 << params->traffic_class); + spin_unlock_bh(&wmi->lock); + } else { + /* explicitly created thin stream within a fat pipe */ + spin_lock_bh(&wmi->lock); + fatpipe_exist_for_ac = (wmi->fat_pipe_exist & + (1 << params->traffic_class)); + wmi->stream_exist_for_ac[params->traffic_class] |= + (1 << params->tsid); + /* + * If a thinstream becomes active, the fat pipe automatically + * becomes active + */ + wmi->fat_pipe_exist |= (1 << params->traffic_class); + spin_unlock_bh(&wmi->lock); + } + + /* + * Indicate activty change to driver layer only if this is the + * first TSID to get created in this AC explicitly or an implicit + * fat pipe is getting created. + */ + if (!fatpipe_exist_for_ac) + ath6kl_indicate_tx_activity(wmi->parent_dev, + params->traffic_class, true); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CREATE_PSTREAM_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class, + u8 tsid) +{ + struct sk_buff *skb; + struct wmi_delete_pstream_cmd *cmd; + u16 active_tsids = 0; + int ret; + + if (traffic_class >= WMM_NUM_AC) { + ath6kl_err("invalid traffic class: %d\n", traffic_class); + return -EINVAL; + } + + if (tsid >= 16) { + ath6kl_err("invalid tsid: %d\n", tsid); + return -EINVAL; + } + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_delete_pstream_cmd *) skb->data; + cmd->traffic_class = traffic_class; + cmd->tsid = tsid; + + spin_lock_bh(&wmi->lock); + active_tsids = wmi->stream_exist_for_ac[traffic_class]; + spin_unlock_bh(&wmi->lock); + + if (!(active_tsids & (1 << tsid))) { + dev_kfree_skb(skb); + ath6kl_dbg(ATH6KL_DBG_WMI, + "TSID %d doesn't exist for traffic class: %d\n", + tsid, traffic_class); + return -ENODATA; + } + + ath6kl_dbg(ATH6KL_DBG_WMI, + "sending delete_pstream_cmd: traffic class: %d tsid=%d\n", + traffic_class, tsid); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_PSTREAM_CMDID, + SYNC_BEFORE_WMIFLAG); + + spin_lock_bh(&wmi->lock); + wmi->stream_exist_for_ac[traffic_class] &= ~(1 << tsid); + active_tsids = wmi->stream_exist_for_ac[traffic_class]; + spin_unlock_bh(&wmi->lock); + + /* + * Indicate stream inactivity to driver layer only if all tsids + * within this AC are deleted. + */ + if (!active_tsids) { + ath6kl_indicate_tx_activity(wmi->parent_dev, + traffic_class, false); + wmi->fat_pipe_exist &= ~(1 << traffic_class); + } + + return ret; +} + +int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, u8 if_idx, + __be32 ips0, __be32 ips1) +{ + struct sk_buff *skb; + struct wmi_set_ip_cmd *cmd; + int ret; + + /* Multicast address are not valid */ + if (ipv4_is_multicast(ips0) || + ipv4_is_multicast(ips1)) + return -EINVAL; + + skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_ip_cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_ip_cmd *) skb->data; + cmd->ips[0] = ips0; + cmd->ips[1] = ips1; + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_IP_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi) +{ + u16 active_tsids; + u8 stream_exist; + int i; + + /* + * Relinquish credits from all implicitly created pstreams + * since when we go to sleep. If user created explicit + * thinstreams exists with in a fatpipe leave them intact + * for the user to delete. + */ + spin_lock_bh(&wmi->lock); + stream_exist = wmi->fat_pipe_exist; + spin_unlock_bh(&wmi->lock); + + for (i = 0; i < WMM_NUM_AC; i++) { + if (stream_exist & (1 << i)) { + /* + * FIXME: Is this lock & unlock inside + * for loop correct? may need rework. + */ + spin_lock_bh(&wmi->lock); + active_tsids = wmi->stream_exist_for_ac[i]; + spin_unlock_bh(&wmi->lock); + + /* + * If there are no user created thin streams + * delete the fatpipe + */ + if (!active_tsids) { + stream_exist &= ~(1 << i); + /* + * Indicate inactivity to driver layer for + * this fatpipe (pstream) + */ + ath6kl_indicate_tx_activity(wmi->parent_dev, + i, false); + } + } + } + + /* FIXME: Can we do this assignment without locking ? */ + spin_lock_bh(&wmi->lock); + wmi->fat_pipe_exist = stream_exist; + spin_unlock_bh(&wmi->lock); +} + +static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx, + const struct cfg80211_bitrate_mask *mask) +{ + struct sk_buff *skb; + int ret, mode, band; + u64 mcsrate, ratemask[ATH6KL_NUM_BANDS]; + struct wmi_set_tx_select_rates64_cmd *cmd; + + memset(&ratemask, 0, sizeof(ratemask)); + + /* only check 2.4 and 5 GHz bands, skip the rest */ + for (band = 0; band <= NL80211_BAND_5GHZ; band++) { + /* copy legacy rate mask */ + ratemask[band] = mask->control[band].legacy; + if (band == NL80211_BAND_5GHZ) + ratemask[band] = + mask->control[band].legacy << 4; + + /* copy mcs rate mask */ + mcsrate = mask->control[band].ht_mcs[1]; + mcsrate <<= 8; + mcsrate |= mask->control[band].ht_mcs[0]; + ratemask[band] |= mcsrate << 12; + ratemask[band] |= mcsrate << 28; + } + + ath6kl_dbg(ATH6KL_DBG_WMI, + "Ratemask 64 bit: 2.4:%llx 5:%llx\n", + ratemask[0], ratemask[1]); + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd) * WMI_RATES_MODE_MAX); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_tx_select_rates64_cmd *) skb->data; + for (mode = 0; mode < WMI_RATES_MODE_MAX; mode++) { + /* A mode operate in 5GHZ band */ + if (mode == WMI_RATES_MODE_11A || + mode == WMI_RATES_MODE_11A_HT20 || + mode == WMI_RATES_MODE_11A_HT40) + band = NL80211_BAND_5GHZ; + else + band = NL80211_BAND_2GHZ; + cmd->ratemask[mode] = cpu_to_le64(ratemask[band]); + } + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, + WMI_SET_TX_SELECT_RATES_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx, + const struct cfg80211_bitrate_mask *mask) +{ + struct sk_buff *skb; + int ret, mode, band; + u32 mcsrate, ratemask[ATH6KL_NUM_BANDS]; + struct wmi_set_tx_select_rates32_cmd *cmd; + + memset(&ratemask, 0, sizeof(ratemask)); + + /* only check 2.4 and 5 GHz bands, skip the rest */ + for (band = 0; band <= NL80211_BAND_5GHZ; band++) { + /* copy legacy rate mask */ + ratemask[band] = mask->control[band].legacy; + if (band == NL80211_BAND_5GHZ) + ratemask[band] = + mask->control[band].legacy << 4; + + /* copy mcs rate mask */ + mcsrate = mask->control[band].ht_mcs[0]; + ratemask[band] |= mcsrate << 12; + ratemask[band] |= mcsrate << 20; + } + + ath6kl_dbg(ATH6KL_DBG_WMI, + "Ratemask 32 bit: 2.4:%x 5:%x\n", + ratemask[0], ratemask[1]); + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd) * WMI_RATES_MODE_MAX); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_tx_select_rates32_cmd *) skb->data; + for (mode = 0; mode < WMI_RATES_MODE_MAX; mode++) { + /* A mode operate in 5GHZ band */ + if (mode == WMI_RATES_MODE_11A || + mode == WMI_RATES_MODE_11A_HT20 || + mode == WMI_RATES_MODE_11A_HT40) + band = NL80211_BAND_5GHZ; + else + band = NL80211_BAND_2GHZ; + cmd->ratemask[mode] = cpu_to_le32(ratemask[band]); + } + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, + WMI_SET_TX_SELECT_RATES_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_set_bitrate_mask(struct wmi *wmi, u8 if_idx, + const struct cfg80211_bitrate_mask *mask) +{ + struct ath6kl *ar = wmi->parent_dev; + + if (test_bit(ATH6KL_FW_CAPABILITY_64BIT_RATES, + ar->fw_capabilities)) + return ath6kl_set_bitrate_mask64(wmi, if_idx, mask); + else + return ath6kl_set_bitrate_mask32(wmi, if_idx, mask); +} + +int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx, + enum ath6kl_host_mode host_mode) +{ + struct sk_buff *skb; + struct wmi_set_host_sleep_mode_cmd *cmd; + int ret; + + if ((host_mode != ATH6KL_HOST_MODE_ASLEEP) && + (host_mode != ATH6KL_HOST_MODE_AWAKE)) { + ath6kl_err("invalid host sleep mode: %d\n", host_mode); + return -EINVAL; + } + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_host_sleep_mode_cmd *) skb->data; + + if (host_mode == ATH6KL_HOST_MODE_ASLEEP) { + ath6kl_wmi_relinquish_implicit_pstream_credits(wmi); + cmd->asleep = cpu_to_le32(1); + } else { + cmd->awake = cpu_to_le32(1); + } + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, + WMI_SET_HOST_SLEEP_MODE_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +/* This command has zero length payload */ +static int ath6kl_wmi_host_sleep_mode_cmd_prcd_evt_rx(struct wmi *wmi, + struct ath6kl_vif *vif) +{ + struct ath6kl *ar = wmi->parent_dev; + + set_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags); + wake_up(&ar->event_wq); + + return 0; +} + +int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx, + enum ath6kl_wow_mode wow_mode, + u32 filter, u16 host_req_delay) +{ + struct sk_buff *skb; + struct wmi_set_wow_mode_cmd *cmd; + int ret; + + if ((wow_mode != ATH6KL_WOW_MODE_ENABLE) && + wow_mode != ATH6KL_WOW_MODE_DISABLE) { + ath6kl_err("invalid wow mode: %d\n", wow_mode); + return -EINVAL; + } + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_wow_mode_cmd *) skb->data; + cmd->enable_wow = cpu_to_le32(wow_mode); + cmd->filter = cpu_to_le32(filter); + cmd->host_req_delay = cpu_to_le16(host_req_delay); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WOW_MODE_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, + u8 list_id, u8 filter_size, + u8 filter_offset, const u8 *filter, + const u8 *mask) +{ + struct sk_buff *skb; + struct wmi_add_wow_pattern_cmd *cmd; + u16 size; + u8 *filter_mask; + int ret; + + /* + * Allocate additional memory in the buffer to hold + * filter and mask value, which is twice of filter_size. + */ + size = sizeof(*cmd) + (2 * filter_size); + + skb = ath6kl_wmi_get_new_buf(size); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_add_wow_pattern_cmd *) skb->data; + cmd->filter_list_id = list_id; + cmd->filter_size = filter_size; + cmd->filter_offset = filter_offset; + + memcpy(cmd->filter, filter, filter_size); + + filter_mask = (u8 *) (cmd->filter + filter_size); + memcpy(filter_mask, mask, filter_size); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_WOW_PATTERN_CMDID, + NO_SYNC_WMIFLAG); + + return ret; +} + +int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, + u16 list_id, u16 filter_id) +{ + struct sk_buff *skb; + struct wmi_del_wow_pattern_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_del_wow_pattern_cmd *) skb->data; + cmd->filter_list_id = cpu_to_le16(list_id); + cmd->filter_id = cpu_to_le16(filter_id); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DEL_WOW_PATTERN_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb, + enum wmix_command_id cmd_id, + enum wmi_sync_flag sync_flag) +{ + struct wmix_cmd_hdr *cmd_hdr; + int ret; + + skb_push(skb, sizeof(struct wmix_cmd_hdr)); + + cmd_hdr = (struct wmix_cmd_hdr *) skb->data; + cmd_hdr->cmd_id = cpu_to_le32(cmd_id); + + ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_EXTENSION_CMDID, sync_flag); + + return ret; +} + +int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source) +{ + struct sk_buff *skb; + struct wmix_hb_challenge_resp_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmix_hb_challenge_resp_cmd *) skb->data; + cmd->cookie = cpu_to_le32(cookie); + cmd->source = cpu_to_le32(source); + + ret = ath6kl_wmi_cmd_send_xtnd(wmi, skb, WMIX_HB_CHALLENGE_RESP_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config) +{ + struct ath6kl_wmix_dbglog_cfg_module_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct ath6kl_wmix_dbglog_cfg_module_cmd *) skb->data; + cmd->valid = cpu_to_le32(valid); + cmd->config = cpu_to_le32(config); + + ret = ath6kl_wmi_cmd_send_xtnd(wmi, skb, WMIX_DBGLOG_CFG_MODULE_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx) +{ + return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_STATISTICS_CMDID); +} + +int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM) +{ + struct sk_buff *skb; + struct wmi_set_tx_pwr_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_tx_pwr_cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_tx_pwr_cmd *) skb->data; + cmd->dbM = dbM; + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_TX_PWR_CMDID, + NO_SYNC_WMIFLAG); + + return ret; +} + +int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx) +{ + return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_TX_PWR_CMDID); +} + +int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi) +{ + return ath6kl_wmi_simple_cmd(wmi, 0, WMI_GET_ROAM_TBL_CMDID); +} + +int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status, + u8 preamble_policy) +{ + struct sk_buff *skb; + struct wmi_set_lpreamble_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_lpreamble_cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_lpreamble_cmd *) skb->data; + cmd->status = status; + cmd->preamble_policy = preamble_policy; + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LPREAMBLE_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold) +{ + struct sk_buff *skb; + struct wmi_set_rts_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_rts_cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_rts_cmd *) skb->data; + cmd->threshold = cpu_to_le16(threshold); + + ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_RTS_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg) +{ + struct sk_buff *skb; + struct wmi_set_wmm_txop_cmd *cmd; + int ret; + + if (!((cfg == WMI_TXOP_DISABLED) || (cfg == WMI_TXOP_ENABLED))) + return -EINVAL; + + skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_wmm_txop_cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_wmm_txop_cmd *) skb->data; + cmd->txop_enable = cfg; + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WMM_TXOP_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, + u8 keep_alive_intvl) +{ + struct sk_buff *skb; + struct wmi_set_keepalive_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_keepalive_cmd *) skb->data; + cmd->keep_alive_intvl = keep_alive_intvl; + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_KEEPALIVE_CMDID, + NO_SYNC_WMIFLAG); + + if (ret == 0) + ath6kl_debug_set_keepalive(wmi->parent_dev, keep_alive_intvl); + + return ret; +} + +int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx, + enum nl80211_band band, + struct ath6kl_htcap *htcap) +{ + struct sk_buff *skb; + struct wmi_set_htcap_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_htcap_cmd *) skb->data; + + /* + * NOTE: Band in firmware matches enum nl80211_band, it is unlikely + * this will be changed in firmware. If at all there is any change in + * band value, the host needs to be fixed. + */ + cmd->band = band; + cmd->ht_enable = !!htcap->ht_enable; + cmd->ht20_sgi = !!(htcap->cap_info & IEEE80211_HT_CAP_SGI_20); + cmd->ht40_supported = + !!(htcap->cap_info & IEEE80211_HT_CAP_SUP_WIDTH_20_40); + cmd->ht40_sgi = !!(htcap->cap_info & IEEE80211_HT_CAP_SGI_40); + cmd->intolerant_40mhz = + !!(htcap->cap_info & IEEE80211_HT_CAP_40MHZ_INTOLERANT); + cmd->max_ampdu_len_exp = htcap->ampdu_factor; + + ath6kl_dbg(ATH6KL_DBG_WMI, + "Set htcap: band:%d ht_enable:%d 40mhz:%d sgi_20mhz:%d sgi_40mhz:%d 40mhz_intolerant:%d ampdu_len_exp:%d\n", + cmd->band, cmd->ht_enable, cmd->ht40_supported, + cmd->ht20_sgi, cmd->ht40_sgi, cmd->intolerant_40mhz, + cmd->max_ampdu_len_exp); + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_HT_CAP_CMDID, + NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len) +{ + struct sk_buff *skb; + int ret; + + skb = ath6kl_wmi_get_new_buf(len); + if (!skb) + return -ENOMEM; + + memcpy(skb->data, buf, len); + + ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG); + + return ret; +} + +int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on) +{ + struct sk_buff *skb; + struct wmi_mcast_filter_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_mcast_filter_cmd *) skb->data; + cmd->mcast_all_enable = mc_all_on; + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_MCAST_FILTER_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, + u8 *filter, bool add_filter) +{ + struct sk_buff *skb; + struct wmi_mcast_filter_add_del_cmd *cmd; + int ret; + + if ((filter[0] != 0x33 || filter[1] != 0x33) && + (filter[0] != 0x01 || filter[1] != 0x00 || + filter[2] != 0x5e || filter[3] > 0x7f)) { + ath6kl_warn("invalid multicast filter address\n"); + return -EINVAL; + } + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_mcast_filter_add_del_cmd *) skb->data; + memcpy(cmd->mcast_mac, filter, ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE); + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, + add_filter ? WMI_SET_MCAST_FILTER_CMDID : + WMI_DEL_MCAST_FILTER_CMDID, + NO_SYNC_WMIFLAG); + + return ret; +} + +int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enhance) +{ + struct sk_buff *skb; + struct wmi_sta_bmiss_enhance_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_sta_bmiss_enhance_cmd *) skb->data; + cmd->enable = enhance ? 1 : 0; + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, + WMI_STA_BMISS_ENHANCE_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_set_regdomain_cmd(struct wmi *wmi, const char *alpha2) +{ + struct sk_buff *skb; + struct wmi_set_regdomain_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_regdomain_cmd *) skb->data; + memcpy(cmd->iso_name, alpha2, 2); + + return ath6kl_wmi_cmd_send(wmi, 0, skb, + WMI_SET_REGDOMAIN_CMDID, + NO_SYNC_WMIFLAG); +} + +s32 ath6kl_wmi_get_rate(struct wmi *wmi, s8 rate_index) +{ + struct ath6kl *ar = wmi->parent_dev; + u8 sgi = 0; + s32 ret; + + if (rate_index == RATE_AUTO) + return 0; + + /* SGI is stored as the MSB of the rate_index */ + if (rate_index & RATE_INDEX_MSB) { + rate_index &= RATE_INDEX_WITHOUT_SGI_MASK; + sgi = 1; + } + + if (test_bit(ATH6KL_FW_CAPABILITY_RATETABLE_MCS15, + ar->fw_capabilities)) { + if (WARN_ON(rate_index >= ARRAY_SIZE(wmi_rate_tbl_mcs15))) + return 0; + + ret = wmi_rate_tbl_mcs15[(u32) rate_index][sgi]; + } else { + if (WARN_ON(rate_index >= ARRAY_SIZE(wmi_rate_tbl))) + return 0; + + ret = wmi_rate_tbl[(u32) rate_index][sgi]; + } + + return ret; +} + +static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap, + u32 len) +{ + struct wmi_pmkid_list_reply *reply; + u32 expected_len; + + if (len < sizeof(struct wmi_pmkid_list_reply)) + return -EINVAL; + + reply = (struct wmi_pmkid_list_reply *)datap; + expected_len = sizeof(reply->num_pmkid) + + le32_to_cpu(reply->num_pmkid) * WMI_PMKID_LEN; + + if (len < expected_len) + return -EINVAL; + + return 0; +} + +static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) +{ + struct wmi_addba_req_event *cmd = (struct wmi_addba_req_event *) datap; + + aggr_recv_addba_req_evt(vif, cmd->tid, + le16_to_cpu(cmd->st_seq_no), cmd->win_sz); + + return 0; +} + +static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) +{ + struct wmi_delba_event *cmd = (struct wmi_delba_event *) datap; + + aggr_recv_delba_req_evt(vif, cmd->tid); + + return 0; +} + +/* AP mode functions */ + +int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx, + struct wmi_connect_cmd *p) +{ + struct sk_buff *skb; + struct wmi_connect_cmd *cm; + int res; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cm)); + if (!skb) + return -ENOMEM; + + cm = (struct wmi_connect_cmd *) skb->data; + memcpy(cm, p, sizeof(*cm)); + + res = ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_CONFIG_COMMIT_CMDID, + NO_SYNC_WMIFLAG); + ath6kl_dbg(ATH6KL_DBG_WMI, + "%s: nw_type=%u auth_mode=%u ch=%u ctrl_flags=0x%x-> res=%d\n", + __func__, p->nw_type, p->auth_mode, le16_to_cpu(p->ch), + le32_to_cpu(p->ctrl_flags), res); + return res; +} + +int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac, + u16 reason) +{ + struct sk_buff *skb; + struct wmi_ap_set_mlme_cmd *cm; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cm)); + if (!skb) + return -ENOMEM; + + cm = (struct wmi_ap_set_mlme_cmd *) skb->data; + memcpy(cm->mac, mac, ETH_ALEN); + cm->reason = cpu_to_le16(reason); + cm->cmd = cmd; + + ath6kl_dbg(ATH6KL_DBG_WMI, "ap_set_mlme: cmd=%d reason=%d\n", cm->cmd, + cm->reason); + + return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID, + NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_ap_hidden_ssid(struct wmi *wmi, u8 if_idx, bool enable) +{ + struct sk_buff *skb; + struct wmi_ap_hidden_ssid_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_ap_hidden_ssid_cmd *) skb->data; + cmd->hidden_ssid = enable ? 1 : 0; + + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_HIDDEN_SSID_CMDID, + NO_SYNC_WMIFLAG); +} + +/* This command will be used to enable/disable AP uAPSD feature */ +int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable) +{ + struct wmi_ap_set_apsd_cmd *cmd; + struct sk_buff *skb; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_ap_set_apsd_cmd *)skb->data; + cmd->enable = enable; + + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_APSD_CMDID, + NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_set_apsd_bfrd_traf(struct wmi *wmi, u8 if_idx, + u16 aid, u16 bitmap, u32 flags) +{ + struct wmi_ap_apsd_buffered_traffic_cmd *cmd; + struct sk_buff *skb; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_ap_apsd_buffered_traffic_cmd *)skb->data; + cmd->aid = cpu_to_le16(aid); + cmd->bitmap = cpu_to_le16(bitmap); + cmd->flags = cpu_to_le32(flags); + + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, + WMI_AP_APSD_BUFFERED_TRAFFIC_CMDID, + NO_SYNC_WMIFLAG); +} + +static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) +{ + struct wmi_pspoll_event *ev; + + if (len < sizeof(struct wmi_pspoll_event)) + return -EINVAL; + + ev = (struct wmi_pspoll_event *) datap; + + ath6kl_pspoll_event(vif, le16_to_cpu(ev->aid)); + + return 0; +} + +static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) +{ + ath6kl_dtimexpiry_event(vif); + + return 0; +} + +int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid, + bool flag) +{ + struct sk_buff *skb; + struct wmi_ap_set_pvb_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_ap_set_pvb_cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_ap_set_pvb_cmd *) skb->data; + cmd->aid = cpu_to_le16(aid); + cmd->rsvd = cpu_to_le16(0); + cmd->flag = cpu_to_le32(flag); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_PVB_CMDID, + NO_SYNC_WMIFLAG); + + return ret; +} + +int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx, + u8 rx_meta_ver, + bool rx_dot11_hdr, bool defrag_on_host) +{ + struct sk_buff *skb; + struct wmi_rx_frame_format_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_rx_frame_format_cmd *) skb->data; + cmd->dot11_hdr = rx_dot11_hdr ? 1 : 0; + cmd->defrag_on_host = defrag_on_host ? 1 : 0; + cmd->meta_ver = rx_meta_ver; + + /* Delete the local aggr state, on host */ + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RX_FRAME_FORMAT_CMDID, + NO_SYNC_WMIFLAG); + + return ret; +} + +int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, + const u8 *ie, u8 ie_len) +{ + struct sk_buff *skb; + struct wmi_set_appie_cmd *p; + + skb = ath6kl_wmi_get_new_buf(sizeof(*p) + ie_len); + if (!skb) + return -ENOMEM; + + ath6kl_dbg(ATH6KL_DBG_WMI, + "set_appie_cmd: mgmt_frm_type=%u ie_len=%u\n", + mgmt_frm_type, ie_len); + p = (struct wmi_set_appie_cmd *) skb->data; + p->mgmt_frm_type = mgmt_frm_type; + p->ie_len = ie_len; + + if (ie != NULL && ie_len > 0) + memcpy(p->ie_info, ie, ie_len); + + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_APPIE_CMDID, + NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_set_ie_cmd(struct wmi *wmi, u8 if_idx, u8 ie_id, u8 ie_field, + const u8 *ie_info, u8 ie_len) +{ + struct sk_buff *skb; + struct wmi_set_ie_cmd *p; + + skb = ath6kl_wmi_get_new_buf(sizeof(*p) + ie_len); + if (!skb) + return -ENOMEM; + + ath6kl_dbg(ATH6KL_DBG_WMI, "set_ie_cmd: ie_id=%u ie_ie_field=%u ie_len=%u\n", + ie_id, ie_field, ie_len); + p = (struct wmi_set_ie_cmd *) skb->data; + p->ie_id = ie_id; + p->ie_field = ie_field; + p->ie_len = ie_len; + if (ie_info && ie_len > 0) + memcpy(p->ie_info, ie_info, ie_len); + + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_IE_CMDID, + NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable) +{ + struct sk_buff *skb; + struct wmi_disable_11b_rates_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + ath6kl_dbg(ATH6KL_DBG_WMI, "disable_11b_rates_cmd: disable=%u\n", + disable); + cmd = (struct wmi_disable_11b_rates_cmd *) skb->data; + cmd->disable = disable ? 1 : 0; + + return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_DISABLE_11B_RATES_CMDID, + NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, u32 dur) +{ + struct sk_buff *skb; + struct wmi_remain_on_chnl_cmd *p; + + skb = ath6kl_wmi_get_new_buf(sizeof(*p)); + if (!skb) + return -ENOMEM; + + ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl_cmd: freq=%u dur=%u\n", + freq, dur); + p = (struct wmi_remain_on_chnl_cmd *) skb->data; + p->freq = cpu_to_le32(freq); + p->duration = cpu_to_le32(dur); + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_REMAIN_ON_CHNL_CMDID, + NO_SYNC_WMIFLAG); +} + +/* ath6kl_wmi_send_action_cmd is to be deprecated. Use + * ath6kl_wmi_send_mgmt_cmd instead. The new function supports P2P + * mgmt operations using station interface. + */ +static int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, + u32 freq, u32 wait, const u8 *data, + u16 data_len) +{ + struct sk_buff *skb; + struct wmi_send_action_cmd *p; + u8 *buf; + + if (wait) + return -EINVAL; /* Offload for wait not supported */ + + buf = kmemdup(data, data_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len); + if (!skb) { + kfree(buf); + return -ENOMEM; + } + + kfree(wmi->last_mgmt_tx_frame); + wmi->last_mgmt_tx_frame = buf; + wmi->last_mgmt_tx_frame_len = data_len; + + ath6kl_dbg(ATH6KL_DBG_WMI, + "send_action_cmd: id=%u freq=%u wait=%u len=%u\n", + id, freq, wait, data_len); + p = (struct wmi_send_action_cmd *) skb->data; + p->id = cpu_to_le32(id); + p->freq = cpu_to_le32(freq); + p->wait = cpu_to_le32(wait); + p->len = cpu_to_le16(data_len); + memcpy(p->data, data, data_len); + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_ACTION_CMDID, + NO_SYNC_WMIFLAG); +} + +static int __ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, + u32 freq, u32 wait, const u8 *data, + u16 data_len, u32 no_cck) +{ + struct sk_buff *skb; + struct wmi_send_mgmt_cmd *p; + u8 *buf; + + if (wait) + return -EINVAL; /* Offload for wait not supported */ + + buf = kmemdup(data, data_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len); + if (!skb) { + kfree(buf); + return -ENOMEM; + } + + kfree(wmi->last_mgmt_tx_frame); + wmi->last_mgmt_tx_frame = buf; + wmi->last_mgmt_tx_frame_len = data_len; + + ath6kl_dbg(ATH6KL_DBG_WMI, + "send_action_cmd: id=%u freq=%u wait=%u len=%u\n", + id, freq, wait, data_len); + p = (struct wmi_send_mgmt_cmd *) skb->data; + p->id = cpu_to_le32(id); + p->freq = cpu_to_le32(freq); + p->wait = cpu_to_le32(wait); + p->no_cck = cpu_to_le32(no_cck); + p->len = cpu_to_le16(data_len); + memcpy(p->data, data, data_len); + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_MGMT_CMDID, + NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq, + u32 wait, const u8 *data, u16 data_len, + u32 no_cck) +{ + int status; + struct ath6kl *ar = wmi->parent_dev; + + if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, + ar->fw_capabilities)) { + /* + * If capable of doing P2P mgmt operations using + * station interface, send additional information like + * supported rates to advertise and xmit rates for + * probe requests + */ + status = __ath6kl_wmi_send_mgmt_cmd(ar->wmi, if_idx, id, freq, + wait, data, data_len, + no_cck); + } else { + status = ath6kl_wmi_send_action_cmd(ar->wmi, if_idx, id, freq, + wait, data, data_len); + } + + return status; +} + +int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq, + const u8 *dst, const u8 *data, + u16 data_len) +{ + struct sk_buff *skb; + struct wmi_p2p_probe_response_cmd *p; + size_t cmd_len = sizeof(*p) + data_len; + + if (data_len == 0) + cmd_len++; /* work around target minimum length requirement */ + + skb = ath6kl_wmi_get_new_buf(cmd_len); + if (!skb) + return -ENOMEM; + + ath6kl_dbg(ATH6KL_DBG_WMI, + "send_probe_response_cmd: freq=%u dst=%pM len=%u\n", + freq, dst, data_len); + p = (struct wmi_p2p_probe_response_cmd *) skb->data; + p->freq = cpu_to_le32(freq); + memcpy(p->destination_addr, dst, ETH_ALEN); + p->len = cpu_to_le16(data_len); + memcpy(p->data, data, data_len); + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, + WMI_SEND_PROBE_RESPONSE_CMDID, + NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable) +{ + struct sk_buff *skb; + struct wmi_probe_req_report_cmd *p; + + skb = ath6kl_wmi_get_new_buf(sizeof(*p)); + if (!skb) + return -ENOMEM; + + ath6kl_dbg(ATH6KL_DBG_WMI, "probe_report_req_cmd: enable=%u\n", + enable); + p = (struct wmi_probe_req_report_cmd *) skb->data; + p->enable = enable ? 1 : 0; + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_PROBE_REQ_REPORT_CMDID, + NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags) +{ + struct sk_buff *skb; + struct wmi_get_p2p_info *p; + + skb = ath6kl_wmi_get_new_buf(sizeof(*p)); + if (!skb) + return -ENOMEM; + + ath6kl_dbg(ATH6KL_DBG_WMI, "info_req_cmd: flags=%x\n", + info_req_flags); + p = (struct wmi_get_p2p_info *) skb->data; + p->info_req_flags = cpu_to_le32(info_req_flags); + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_GET_P2P_INFO_CMDID, + NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx) +{ + ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl_cmd\n"); + return ath6kl_wmi_simple_cmd(wmi, if_idx, + WMI_CANCEL_REMAIN_ON_CHNL_CMDID); +} + +int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout) +{ + struct sk_buff *skb; + struct wmi_set_inact_period_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_inact_period_cmd *) skb->data; + cmd->inact_period = cpu_to_le32(inact_timeout); + cmd->num_null_func = 0; + + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_CONN_INACT_CMDID, + NO_SYNC_WMIFLAG); +} + +static void ath6kl_wmi_hb_challenge_resp_event(struct wmi *wmi, u8 *datap, + int len) +{ + struct wmix_hb_challenge_resp_cmd *cmd; + + if (len < sizeof(struct wmix_hb_challenge_resp_cmd)) + return; + + cmd = (struct wmix_hb_challenge_resp_cmd *) datap; + ath6kl_recovery_hb_event(wmi->parent_dev, + le32_to_cpu(cmd->cookie)); +} + +static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb) +{ + struct wmix_cmd_hdr *cmd; + u32 len; + u16 id; + u8 *datap; + int ret = 0; + + if (skb->len < sizeof(struct wmix_cmd_hdr)) { + ath6kl_err("bad packet 1\n"); + return -EINVAL; + } + + cmd = (struct wmix_cmd_hdr *) skb->data; + id = le32_to_cpu(cmd->cmd_id); + + skb_pull(skb, sizeof(struct wmix_cmd_hdr)); + + datap = skb->data; + len = skb->len; + + switch (id) { + case WMIX_HB_CHALLENGE_RESP_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "wmi event hb challenge resp\n"); + ath6kl_wmi_hb_challenge_resp_event(wmi, datap, len); + break; + case WMIX_DBGLOG_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "wmi event dbglog len %d\n", len); + ath6kl_debug_fwlog_event(wmi->parent_dev, datap, len); + break; + default: + ath6kl_warn("unknown cmd id 0x%x\n", id); + ret = -EINVAL; + break; + } + + return ret; +} + +static int ath6kl_wmi_roam_tbl_event_rx(struct wmi *wmi, u8 *datap, int len) +{ + return ath6kl_debug_roam_tbl_event(wmi->parent_dev, datap, len); +} + +/* Process interface specific wmi events, caller would free the datap */ +static int ath6kl_wmi_proc_events_vif(struct wmi *wmi, u16 if_idx, u16 cmd_id, + u8 *datap, u32 len) +{ + struct ath6kl_vif *vif; + + vif = ath6kl_get_vif_by_index(wmi->parent_dev, if_idx); + if (!vif) { + ath6kl_dbg(ATH6KL_DBG_WMI, + "Wmi event for unavailable vif, vif_index:%d\n", + if_idx); + return -EINVAL; + } + + switch (cmd_id) { + case WMI_CONNECT_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n"); + return ath6kl_wmi_connect_event_rx(wmi, datap, len, vif); + case WMI_DISCONNECT_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n"); + return ath6kl_wmi_disconnect_event_rx(wmi, datap, len, vif); + case WMI_TKIP_MICERR_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n"); + return ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len, vif); + case WMI_BSSINFO_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n"); + return ath6kl_wmi_bssinfo_event_rx(wmi, datap, len, vif); + case WMI_NEIGHBOR_REPORT_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n"); + return ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len, + vif); + case WMI_SCAN_COMPLETE_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n"); + return ath6kl_wmi_scan_complete_rx(wmi, datap, len, vif); + case WMI_REPORT_STATISTICS_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n"); + return ath6kl_wmi_stats_event_rx(wmi, datap, len, vif); + case WMI_CAC_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n"); + return ath6kl_wmi_cac_event_rx(wmi, datap, len, vif); + case WMI_PSPOLL_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n"); + return ath6kl_wmi_pspoll_event_rx(wmi, datap, len, vif); + case WMI_DTIMEXPIRY_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n"); + return ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len, vif); + case WMI_ADDBA_REQ_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n"); + return ath6kl_wmi_addba_req_event_rx(wmi, datap, len, vif); + case WMI_DELBA_REQ_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n"); + return ath6kl_wmi_delba_req_event_rx(wmi, datap, len, vif); + case WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, + "WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID"); + return ath6kl_wmi_host_sleep_mode_cmd_prcd_evt_rx(wmi, vif); + case WMI_REMAIN_ON_CHNL_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n"); + return ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len, vif); + case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, + "WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n"); + return ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap, + len, vif); + case WMI_TX_STATUS_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n"); + return ath6kl_wmi_tx_status_event_rx(wmi, datap, len, vif); + case WMI_RX_PROBE_REQ_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n"); + return ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len, vif); + case WMI_RX_ACTION_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n"); + return ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif); + case WMI_TXE_NOTIFY_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TXE_NOTIFY_EVENTID\n"); + return ath6kl_wmi_txe_notify_event_rx(wmi, datap, len, vif); + default: + ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", cmd_id); + return -EINVAL; + } + + return 0; +} + +static int ath6kl_wmi_proc_events(struct wmi *wmi, struct sk_buff *skb) +{ + struct wmi_cmd_hdr *cmd; + int ret = 0; + u32 len; + u16 id; + u8 if_idx; + u8 *datap; + + cmd = (struct wmi_cmd_hdr *) skb->data; + id = le16_to_cpu(cmd->cmd_id); + if_idx = le16_to_cpu(cmd->info1) & WMI_CMD_HDR_IF_ID_MASK; + + skb_pull(skb, sizeof(struct wmi_cmd_hdr)); + datap = skb->data; + len = skb->len; + + ath6kl_dbg(ATH6KL_DBG_WMI, "wmi rx id %d len %d\n", id, len); + ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi rx ", + datap, len); + + switch (id) { + case WMI_GET_BITRATE_CMDID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n"); + ret = ath6kl_wmi_bitrate_reply_rx(wmi, datap, len); + break; + case WMI_GET_CHANNEL_LIST_CMDID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_CHANNEL_LIST_CMDID\n"); + ret = ath6kl_wmi_ch_list_reply_rx(wmi, datap, len); + break; + case WMI_GET_TX_PWR_CMDID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_TX_PWR_CMDID\n"); + ret = ath6kl_wmi_tx_pwr_reply_rx(wmi, datap, len); + break; + case WMI_READY_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_READY_EVENTID\n"); + ret = ath6kl_wmi_ready_event_rx(wmi, datap, len); + break; + case WMI_PEER_NODE_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n"); + ret = ath6kl_wmi_peer_node_event_rx(wmi, datap, len); + break; + case WMI_REGDOMAIN_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n"); + ath6kl_wmi_regdomain_event(wmi, datap, len); + break; + case WMI_PSTREAM_TIMEOUT_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSTREAM_TIMEOUT_EVENTID\n"); + ret = ath6kl_wmi_pstream_timeout_event_rx(wmi, datap, len); + break; + case WMI_CMDERROR_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n"); + ret = ath6kl_wmi_error_event_rx(wmi, datap, len); + break; + case WMI_RSSI_THRESHOLD_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n"); + ret = ath6kl_wmi_rssi_threshold_event_rx(wmi, datap, len); + break; + case WMI_ERROR_REPORT_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ERROR_REPORT_EVENTID\n"); + break; + case WMI_OPT_RX_FRAME_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_OPT_RX_FRAME_EVENTID\n"); + /* this event has been deprecated */ + break; + case WMI_REPORT_ROAM_TBL_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_TBL_EVENTID\n"); + ret = ath6kl_wmi_roam_tbl_event_rx(wmi, datap, len); + break; + case WMI_EXTENSION_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n"); + ret = ath6kl_wmi_control_rx_xtnd(wmi, skb); + break; + case WMI_CHANNEL_CHANGE_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n"); + break; + case WMI_REPORT_ROAM_DATA_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_DATA_EVENTID\n"); + break; + case WMI_TEST_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TEST_EVENTID\n"); + ret = ath6kl_wmi_test_rx(wmi, datap, len); + break; + case WMI_GET_FIXRATES_CMDID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_FIXRATES_CMDID\n"); + ret = ath6kl_wmi_ratemask_reply_rx(wmi, datap, len); + break; + case WMI_TX_RETRY_ERR_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_RETRY_ERR_EVENTID\n"); + break; + case WMI_SNR_THRESHOLD_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SNR_THRESHOLD_EVENTID\n"); + ret = ath6kl_wmi_snr_threshold_event_rx(wmi, datap, len); + break; + case WMI_LQ_THRESHOLD_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_LQ_THRESHOLD_EVENTID\n"); + break; + case WMI_APLIST_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_APLIST_EVENTID\n"); + ret = ath6kl_wmi_aplist_event_rx(wmi, datap, len); + break; + case WMI_GET_KEEPALIVE_CMDID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_KEEPALIVE_CMDID\n"); + ret = ath6kl_wmi_keepalive_reply_rx(wmi, datap, len); + break; + case WMI_GET_WOW_LIST_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_WOW_LIST_EVENTID\n"); + break; + case WMI_GET_PMKID_LIST_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n"); + ret = ath6kl_wmi_get_pmkid_list_event_rx(wmi, datap, len); + break; + case WMI_SET_PARAMS_REPLY_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n"); + break; + case WMI_ADDBA_RESP_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n"); + break; + case WMI_REPORT_BTCOEX_CONFIG_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, + "WMI_REPORT_BTCOEX_CONFIG_EVENTID\n"); + break; + case WMI_REPORT_BTCOEX_STATS_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, + "WMI_REPORT_BTCOEX_STATS_EVENTID\n"); + break; + case WMI_TX_COMPLETE_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_COMPLETE_EVENTID\n"); + ret = ath6kl_wmi_tx_complete_event_rx(datap, len); + break; + case WMI_P2P_CAPABILITIES_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_CAPABILITIES_EVENTID\n"); + ret = ath6kl_wmi_p2p_capabilities_event_rx(datap, len); + break; + case WMI_P2P_INFO_EVENTID: + ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_INFO_EVENTID\n"); + ret = ath6kl_wmi_p2p_info_event_rx(datap, len); + break; + default: + /* may be the event is interface specific */ + ret = ath6kl_wmi_proc_events_vif(wmi, if_idx, id, datap, len); + break; + } + + dev_kfree_skb(skb); + return ret; +} + +/* Control Path */ +int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) +{ + if (WARN_ON(skb == NULL)) + return -EINVAL; + + if (skb->len < sizeof(struct wmi_cmd_hdr)) { + ath6kl_err("bad packet 1\n"); + dev_kfree_skb(skb); + return -EINVAL; + } + + trace_ath6kl_wmi_event(skb->data, skb->len); + + return ath6kl_wmi_proc_events(wmi, skb); +} + +void ath6kl_wmi_reset(struct wmi *wmi) +{ + spin_lock_bh(&wmi->lock); + + wmi->fat_pipe_exist = 0; + memset(wmi->stream_exist_for_ac, 0, sizeof(wmi->stream_exist_for_ac)); + + spin_unlock_bh(&wmi->lock); +} + +void *ath6kl_wmi_init(struct ath6kl *dev) +{ + struct wmi *wmi; + + wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL); + if (!wmi) + return NULL; + + spin_lock_init(&wmi->lock); + + wmi->parent_dev = dev; + + wmi->pwr_mode = REC_POWER; + + ath6kl_wmi_reset(wmi); + + return wmi; +} + +void ath6kl_wmi_shutdown(struct wmi *wmi) +{ + if (!wmi) + return; + + kfree(wmi->last_mgmt_tx_frame); + kfree(wmi); +} diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h new file mode 100644 index 000000000..b4fcfb729 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/wmi.h @@ -0,0 +1,2731 @@ +/* + * Copyright (c) 2010-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the definitions of the WMI protocol specified in the + * Wireless Module Interface (WMI). It includes definitions of all the + * commands and events. Commands are messages from the host to the WM. + * Events and Replies are messages from the WM to the host. + */ + +#ifndef WMI_H +#define WMI_H + +#include <linux/ieee80211.h> + +#include "htc.h" + +#define HTC_PROTOCOL_VERSION 0x0002 +#define WMI_PROTOCOL_VERSION 0x0002 +#define WMI_CONTROL_MSG_MAX_LEN 256 +#define is_ethertype(type_or_len) ((type_or_len) >= 0x0600) + +#define IP_ETHERTYPE 0x0800 + +#define WMI_IMPLICIT_PSTREAM 0xFF +#define WMI_MAX_THINSTREAM 15 + +#define SSID_IE_LEN_INDEX 13 + +/* Host side link management data structures */ +#define SIG_QUALITY_THRESH_LVLS 6 +#define SIG_QUALITY_UPPER_THRESH_LVLS SIG_QUALITY_THRESH_LVLS +#define SIG_QUALITY_LOWER_THRESH_LVLS SIG_QUALITY_THRESH_LVLS + +#define A_BAND_24GHZ 0 +#define A_BAND_5GHZ 1 +#define ATH6KL_NUM_BANDS 2 + +/* in ms */ +#define WMI_IMPLICIT_PSTREAM_INACTIVITY_INT 5000 + +/* + * There are no signed versions of __le16 and __le32, so for a temporary + * solution come up with our own version. The idea is from fs/ntfs/types.h. + * + * Use a_ prefix so that it doesn't conflict if we get proper support to + * linux/types.h. + */ +typedef __s16 __bitwise a_sle16; +typedef __s32 __bitwise a_sle32; + +static inline a_sle32 a_cpu_to_sle32(s32 val) +{ + return (__force a_sle32) cpu_to_le32(val); +} + +static inline s32 a_sle32_to_cpu(a_sle32 val) +{ + return le32_to_cpu((__force __le32) val); +} + +static inline a_sle16 a_cpu_to_sle16(s16 val) +{ + return (__force a_sle16) cpu_to_le16(val); +} + +static inline s16 a_sle16_to_cpu(a_sle16 val) +{ + return le16_to_cpu((__force __le16) val); +} + +struct sq_threshold_params { + s16 upper_threshold[SIG_QUALITY_UPPER_THRESH_LVLS]; + s16 lower_threshold[SIG_QUALITY_LOWER_THRESH_LVLS]; + u32 upper_threshold_valid_count; + u32 lower_threshold_valid_count; + u32 polling_interval; + u8 weight; + u8 last_rssi; + u8 last_rssi_poll_event; +}; + +struct wmi_data_sync_bufs { + u8 traffic_class; + struct sk_buff *skb; +}; + +/* WMM stream classes */ +#define WMM_NUM_AC 4 +#define WMM_AC_BE 0 /* best effort */ +#define WMM_AC_BK 1 /* background */ +#define WMM_AC_VI 2 /* video */ +#define WMM_AC_VO 3 /* voice */ + +#define WMI_VOICE_USER_PRIORITY 0x7 + +struct wmi { + u16 stream_exist_for_ac[WMM_NUM_AC]; + u8 fat_pipe_exist; + struct ath6kl *parent_dev; + u8 pwr_mode; + + /* protects fat_pipe_exist and stream_exist_for_ac */ + spinlock_t lock; + enum htc_endpoint_id ep_id; + struct sq_threshold_params + sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX]; + bool is_wmm_enabled; + u8 traffic_class; + bool is_probe_ssid; + + u8 *last_mgmt_tx_frame; + size_t last_mgmt_tx_frame_len; + u8 saved_pwr_mode; +}; + +struct host_app_area { + __le32 wmi_protocol_ver; +} __packed; + +enum wmi_msg_type { + DATA_MSGTYPE = 0x0, + CNTL_MSGTYPE, + SYNC_MSGTYPE, + OPT_MSGTYPE, +}; + +/* + * Macros for operating on WMI_DATA_HDR (info) field + */ + +#define WMI_DATA_HDR_MSG_TYPE_MASK 0x03 +#define WMI_DATA_HDR_MSG_TYPE_SHIFT 0 +#define WMI_DATA_HDR_UP_MASK 0x07 +#define WMI_DATA_HDR_UP_SHIFT 2 + +/* In AP mode, the same bit (b5) is used to indicate Power save state in + * the Rx dir and More data bit state in the tx direction. + */ +#define WMI_DATA_HDR_PS_MASK 0x1 +#define WMI_DATA_HDR_PS_SHIFT 5 + +#define WMI_DATA_HDR_MORE 0x20 + +enum wmi_data_hdr_data_type { + WMI_DATA_HDR_DATA_TYPE_802_3 = 0, + WMI_DATA_HDR_DATA_TYPE_802_11, + + /* used to be used for the PAL */ + WMI_DATA_HDR_DATA_TYPE_ACL, +}; + +/* Bitmap of data header flags */ +enum wmi_data_hdr_flags { + WMI_DATA_HDR_FLAGS_MORE = 0x1, + WMI_DATA_HDR_FLAGS_EOSP = 0x2, + WMI_DATA_HDR_FLAGS_UAPSD = 0x4, +}; + +#define WMI_DATA_HDR_DATA_TYPE_MASK 0x3 +#define WMI_DATA_HDR_DATA_TYPE_SHIFT 6 + +/* Macros for operating on WMI_DATA_HDR (info2) field */ +#define WMI_DATA_HDR_SEQNO_MASK 0xFFF +#define WMI_DATA_HDR_SEQNO_SHIFT 0 + +#define WMI_DATA_HDR_AMSDU_MASK 0x1 +#define WMI_DATA_HDR_AMSDU_SHIFT 12 + +#define WMI_DATA_HDR_META_MASK 0x7 +#define WMI_DATA_HDR_META_SHIFT 13 + +#define WMI_DATA_HDR_PAD_BEFORE_DATA_MASK 0xFF +#define WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT 0x8 + +/* Macros for operating on WMI_DATA_HDR (info3) field */ +#define WMI_DATA_HDR_IF_IDX_MASK 0xF + +#define WMI_DATA_HDR_TRIG 0x10 +#define WMI_DATA_HDR_EOSP 0x10 + +struct wmi_data_hdr { + s8 rssi; + + /* + * usage of 'info' field(8-bit): + * + * b1:b0 - WMI_MSG_TYPE + * b4:b3:b2 - UP(tid) + * b5 - Used in AP mode. + * More-data in tx dir, PS in rx. + * b7:b6 - Dot3 header(0), + * Dot11 Header(1), + * ACL data(2) + */ + u8 info; + + /* + * usage of 'info2' field(16-bit): + * + * b11:b0 - seq_no + * b12 - A-MSDU? + * b15:b13 - META_DATA_VERSION 0 - 7 + */ + __le16 info2; + + /* + * usage of info3, 16-bit: + * b3:b0 - Interface index + * b4 - uAPSD trigger in rx & EOSP in tx + * b15:b5 - Reserved + */ + __le16 info3; +} __packed; + +static inline u8 wmi_data_hdr_get_up(struct wmi_data_hdr *dhdr) +{ + return (dhdr->info >> WMI_DATA_HDR_UP_SHIFT) & WMI_DATA_HDR_UP_MASK; +} + +static inline void wmi_data_hdr_set_up(struct wmi_data_hdr *dhdr, + u8 usr_pri) +{ + dhdr->info &= ~(WMI_DATA_HDR_UP_MASK << WMI_DATA_HDR_UP_SHIFT); + dhdr->info |= usr_pri << WMI_DATA_HDR_UP_SHIFT; +} + +static inline u8 wmi_data_hdr_get_dot11(struct wmi_data_hdr *dhdr) +{ + u8 data_type; + + data_type = (dhdr->info >> WMI_DATA_HDR_DATA_TYPE_SHIFT) & + WMI_DATA_HDR_DATA_TYPE_MASK; + return (data_type == WMI_DATA_HDR_DATA_TYPE_802_11); +} + +static inline u16 wmi_data_hdr_get_seqno(struct wmi_data_hdr *dhdr) +{ + return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_SEQNO_SHIFT) & + WMI_DATA_HDR_SEQNO_MASK; +} + +static inline u8 wmi_data_hdr_is_amsdu(struct wmi_data_hdr *dhdr) +{ + return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_AMSDU_SHIFT) & + WMI_DATA_HDR_AMSDU_MASK; +} + +static inline u8 wmi_data_hdr_get_meta(struct wmi_data_hdr *dhdr) +{ + return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_META_SHIFT) & + WMI_DATA_HDR_META_MASK; +} + +static inline u8 wmi_data_hdr_get_if_idx(struct wmi_data_hdr *dhdr) +{ + return le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_IF_IDX_MASK; +} + +/* Tx meta version definitions */ +#define WMI_MAX_TX_META_SZ 12 +#define WMI_META_VERSION_1 0x01 +#define WMI_META_VERSION_2 0x02 + +/* Flag to signal to FW to calculate TCP checksum */ +#define WMI_META_V2_FLAG_CSUM_OFFLOAD 0x01 + +struct wmi_tx_meta_v1 { + /* packet ID to identify the tx request */ + u8 pkt_id; + + /* rate policy to be used for the tx of this frame */ + u8 rate_plcy_id; +} __packed; + +struct wmi_tx_meta_v2 { + /* + * Offset from start of the WMI header for csum calculation to + * begin. + */ + u8 csum_start; + + /* offset from start of WMI header where final csum goes */ + u8 csum_dest; + + /* no of bytes over which csum is calculated */ + u8 csum_flags; +} __packed; + +struct wmi_rx_meta_v1 { + u8 status; + + /* rate index mapped to rate at which this packet was received. */ + u8 rix; + + /* rssi of packet */ + u8 rssi; + + /* rf channel during packet reception */ + u8 channel; + + __le16 flags; +} __packed; + +struct wmi_rx_meta_v2 { + __le16 csum; + + /* bit 0 set -partial csum valid bit 1 set -test mode */ + u8 csum_flags; +} __packed; + +#define WMI_CMD_HDR_IF_ID_MASK 0xF + +/* Control Path */ +struct wmi_cmd_hdr { + __le16 cmd_id; + + /* info1 - 16 bits + * b03:b00 - id + * b15:b04 - unused */ + __le16 info1; + + /* for alignment */ + __le16 reserved; +} __packed; + +static inline u8 wmi_cmd_hdr_get_if_idx(struct wmi_cmd_hdr *chdr) +{ + return le16_to_cpu(chdr->info1) & WMI_CMD_HDR_IF_ID_MASK; +} + +/* List of WMI commands */ +enum wmi_cmd_id { + WMI_CONNECT_CMDID = 0x0001, + WMI_RECONNECT_CMDID, + WMI_DISCONNECT_CMDID, + WMI_SYNCHRONIZE_CMDID, + WMI_CREATE_PSTREAM_CMDID, + WMI_DELETE_PSTREAM_CMDID, + /* WMI_START_SCAN_CMDID is to be deprecated. Use + * WMI_BEGIN_SCAN_CMDID instead. The new cmd supports P2P mgmt + * operations using station interface. + */ + WMI_START_SCAN_CMDID, + WMI_SET_SCAN_PARAMS_CMDID, + WMI_SET_BSS_FILTER_CMDID, + WMI_SET_PROBED_SSID_CMDID, /* 10 */ + WMI_SET_LISTEN_INT_CMDID, + WMI_SET_BMISS_TIME_CMDID, + WMI_SET_DISC_TIMEOUT_CMDID, + WMI_GET_CHANNEL_LIST_CMDID, + WMI_SET_BEACON_INT_CMDID, + WMI_GET_STATISTICS_CMDID, + WMI_SET_CHANNEL_PARAMS_CMDID, + WMI_SET_POWER_MODE_CMDID, + WMI_SET_IBSS_PM_CAPS_CMDID, + WMI_SET_POWER_PARAMS_CMDID, /* 20 */ + WMI_SET_POWERSAVE_TIMERS_POLICY_CMDID, + WMI_ADD_CIPHER_KEY_CMDID, + WMI_DELETE_CIPHER_KEY_CMDID, + WMI_ADD_KRK_CMDID, + WMI_DELETE_KRK_CMDID, + WMI_SET_PMKID_CMDID, + WMI_SET_TX_PWR_CMDID, + WMI_GET_TX_PWR_CMDID, + WMI_SET_ASSOC_INFO_CMDID, + WMI_ADD_BAD_AP_CMDID, /* 30 */ + WMI_DELETE_BAD_AP_CMDID, + WMI_SET_TKIP_COUNTERMEASURES_CMDID, + WMI_RSSI_THRESHOLD_PARAMS_CMDID, + WMI_TARGET_ERROR_REPORT_BITMASK_CMDID, + WMI_SET_ACCESS_PARAMS_CMDID, + WMI_SET_RETRY_LIMITS_CMDID, + WMI_SET_OPT_MODE_CMDID, + WMI_OPT_TX_FRAME_CMDID, + WMI_SET_VOICE_PKT_SIZE_CMDID, + WMI_SET_MAX_SP_LEN_CMDID, /* 40 */ + WMI_SET_ROAM_CTRL_CMDID, + WMI_GET_ROAM_TBL_CMDID, + WMI_GET_ROAM_DATA_CMDID, + WMI_ENABLE_RM_CMDID, + WMI_SET_MAX_OFFHOME_DURATION_CMDID, + WMI_EXTENSION_CMDID, /* Non-wireless extensions */ + WMI_SNR_THRESHOLD_PARAMS_CMDID, + WMI_LQ_THRESHOLD_PARAMS_CMDID, + WMI_SET_LPREAMBLE_CMDID, + WMI_SET_RTS_CMDID, /* 50 */ + WMI_CLR_RSSI_SNR_CMDID, + WMI_SET_FIXRATES_CMDID, + WMI_GET_FIXRATES_CMDID, + WMI_SET_AUTH_MODE_CMDID, + WMI_SET_REASSOC_MODE_CMDID, + WMI_SET_WMM_CMDID, + WMI_SET_WMM_TXOP_CMDID, + WMI_TEST_CMDID, + + /* COEX AR6002 only */ + WMI_SET_BT_STATUS_CMDID, + WMI_SET_BT_PARAMS_CMDID, /* 60 */ + + WMI_SET_KEEPALIVE_CMDID, + WMI_GET_KEEPALIVE_CMDID, + WMI_SET_APPIE_CMDID, + WMI_GET_APPIE_CMDID, + WMI_SET_WSC_STATUS_CMDID, + + /* Wake on Wireless */ + WMI_SET_HOST_SLEEP_MODE_CMDID, + WMI_SET_WOW_MODE_CMDID, + WMI_GET_WOW_LIST_CMDID, + WMI_ADD_WOW_PATTERN_CMDID, + WMI_DEL_WOW_PATTERN_CMDID, /* 70 */ + + WMI_SET_FRAMERATES_CMDID, + WMI_SET_AP_PS_CMDID, + WMI_SET_QOS_SUPP_CMDID, + WMI_SET_IE_CMDID, + + /* WMI_THIN_RESERVED_... mark the start and end + * values for WMI_THIN_RESERVED command IDs. These + * command IDs can be found in wmi_thin.h */ + WMI_THIN_RESERVED_START = 0x8000, + WMI_THIN_RESERVED_END = 0x8fff, + + /* Developer commands starts at 0xF000 */ + WMI_SET_BITRATE_CMDID = 0xF000, + WMI_GET_BITRATE_CMDID, + WMI_SET_WHALPARAM_CMDID, + WMI_SET_MAC_ADDRESS_CMDID, + WMI_SET_AKMP_PARAMS_CMDID, + WMI_SET_PMKID_LIST_CMDID, + WMI_GET_PMKID_LIST_CMDID, + WMI_ABORT_SCAN_CMDID, + WMI_SET_TARGET_EVENT_REPORT_CMDID, + + /* Unused */ + WMI_UNUSED1, + WMI_UNUSED2, + + /* AP mode commands */ + WMI_AP_HIDDEN_SSID_CMDID, + WMI_AP_SET_NUM_STA_CMDID, + WMI_AP_ACL_POLICY_CMDID, + WMI_AP_ACL_MAC_LIST_CMDID, + WMI_AP_CONFIG_COMMIT_CMDID, + WMI_AP_SET_MLME_CMDID, + WMI_AP_SET_PVB_CMDID, + WMI_AP_CONN_INACT_CMDID, + WMI_AP_PROT_SCAN_TIME_CMDID, + WMI_AP_SET_COUNTRY_CMDID, + WMI_AP_SET_DTIM_CMDID, + WMI_AP_MODE_STAT_CMDID, + + WMI_SET_IP_CMDID, + WMI_SET_PARAMS_CMDID, + WMI_SET_MCAST_FILTER_CMDID, + WMI_DEL_MCAST_FILTER_CMDID, + + WMI_ALLOW_AGGR_CMDID, + WMI_ADDBA_REQ_CMDID, + WMI_DELBA_REQ_CMDID, + WMI_SET_HT_CAP_CMDID, + WMI_SET_HT_OP_CMDID, + WMI_SET_TX_SELECT_RATES_CMDID, + WMI_SET_TX_SGI_PARAM_CMDID, + WMI_SET_RATE_POLICY_CMDID, + + WMI_HCI_CMD_CMDID, + WMI_RX_FRAME_FORMAT_CMDID, + WMI_SET_THIN_MODE_CMDID, + WMI_SET_BT_WLAN_CONN_PRECEDENCE_CMDID, + + WMI_AP_SET_11BG_RATESET_CMDID, + WMI_SET_PMK_CMDID, + WMI_MCAST_FILTER_CMDID, + + /* COEX CMDID AR6003 */ + WMI_SET_BTCOEX_FE_ANT_CMDID, + WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMDID, + WMI_SET_BTCOEX_SCO_CONFIG_CMDID, + WMI_SET_BTCOEX_A2DP_CONFIG_CMDID, + WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMDID, + WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMDID, + WMI_SET_BTCOEX_DEBUG_CMDID, + WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID, + WMI_GET_BTCOEX_STATS_CMDID, + WMI_GET_BTCOEX_CONFIG_CMDID, + + WMI_SET_DFS_ENABLE_CMDID, /* F034 */ + WMI_SET_DFS_MINRSSITHRESH_CMDID, + WMI_SET_DFS_MAXPULSEDUR_CMDID, + WMI_DFS_RADAR_DETECTED_CMDID, + + /* P2P commands */ + WMI_P2P_SET_CONFIG_CMDID, /* F038 */ + WMI_WPS_SET_CONFIG_CMDID, + WMI_SET_REQ_DEV_ATTR_CMDID, + WMI_P2P_FIND_CMDID, + WMI_P2P_STOP_FIND_CMDID, + WMI_P2P_GO_NEG_START_CMDID, + WMI_P2P_LISTEN_CMDID, + + WMI_CONFIG_TX_MAC_RULES_CMDID, /* F040 */ + WMI_SET_PROMISCUOUS_MODE_CMDID, + WMI_RX_FRAME_FILTER_CMDID, + WMI_SET_CHANNEL_CMDID, + + /* WAC commands */ + WMI_ENABLE_WAC_CMDID, + WMI_WAC_SCAN_REPLY_CMDID, + WMI_WAC_CTRL_REQ_CMDID, + WMI_SET_DIV_PARAMS_CMDID, + + WMI_GET_PMK_CMDID, + WMI_SET_PASSPHRASE_CMDID, + WMI_SEND_ASSOC_RES_CMDID, + WMI_SET_ASSOC_REQ_RELAY_CMDID, + + /* ACS command, consists of sub-commands */ + WMI_ACS_CTRL_CMDID, + WMI_SET_EXCESS_TX_RETRY_THRES_CMDID, + WMI_SET_TBD_TIME_CMDID, /*added for wmiconfig command for TBD */ + + /* Pktlog cmds */ + WMI_PKTLOG_ENABLE_CMDID, + WMI_PKTLOG_DISABLE_CMDID, + + /* More P2P Cmds */ + WMI_P2P_GO_NEG_REQ_RSP_CMDID, + WMI_P2P_GRP_INIT_CMDID, + WMI_P2P_GRP_FORMATION_DONE_CMDID, + WMI_P2P_INVITE_CMDID, + WMI_P2P_INVITE_REQ_RSP_CMDID, + WMI_P2P_PROV_DISC_REQ_CMDID, + WMI_P2P_SET_CMDID, + + WMI_GET_RFKILL_MODE_CMDID, + WMI_SET_RFKILL_MODE_CMDID, + WMI_AP_SET_APSD_CMDID, + WMI_AP_APSD_BUFFERED_TRAFFIC_CMDID, + + WMI_P2P_SDPD_TX_CMDID, /* F05C */ + WMI_P2P_STOP_SDPD_CMDID, + WMI_P2P_CANCEL_CMDID, + /* Ultra low power store / recall commands */ + WMI_STORERECALL_CONFIGURE_CMDID, + WMI_STORERECALL_RECALL_CMDID, + WMI_STORERECALL_HOST_READY_CMDID, + WMI_FORCE_TARGET_ASSERT_CMDID, + + WMI_SET_PROBED_SSID_EX_CMDID, + WMI_SET_NETWORK_LIST_OFFLOAD_CMDID, + WMI_SET_ARP_NS_OFFLOAD_CMDID, + WMI_ADD_WOW_EXT_PATTERN_CMDID, + WMI_GTK_OFFLOAD_OP_CMDID, + WMI_REMAIN_ON_CHNL_CMDID, + WMI_CANCEL_REMAIN_ON_CHNL_CMDID, + /* WMI_SEND_ACTION_CMDID is to be deprecated. Use + * WMI_SEND_MGMT_CMDID instead. The new cmd supports P2P mgmt + * operations using station interface. + */ + WMI_SEND_ACTION_CMDID, + WMI_PROBE_REQ_REPORT_CMDID, + WMI_DISABLE_11B_RATES_CMDID, + WMI_SEND_PROBE_RESPONSE_CMDID, + WMI_GET_P2P_INFO_CMDID, + WMI_AP_JOIN_BSS_CMDID, + + WMI_SMPS_ENABLE_CMDID, + WMI_SMPS_CONFIG_CMDID, + WMI_SET_RATECTRL_PARM_CMDID, + /* LPL specific commands*/ + WMI_LPL_FORCE_ENABLE_CMDID, + WMI_LPL_SET_POLICY_CMDID, + WMI_LPL_GET_POLICY_CMDID, + WMI_LPL_GET_HWSTATE_CMDID, + WMI_LPL_SET_PARAMS_CMDID, + WMI_LPL_GET_PARAMS_CMDID, + + WMI_SET_BUNDLE_PARAM_CMDID, + + /*GreenTx specific commands*/ + + WMI_GREENTX_PARAMS_CMDID, + + WMI_RTT_MEASREQ_CMDID, + WMI_RTT_CAPREQ_CMDID, + WMI_RTT_STATUSREQ_CMDID, + + /* WPS Commands */ + WMI_WPS_START_CMDID, + WMI_GET_WPS_STATUS_CMDID, + + /* More P2P commands */ + WMI_SET_NOA_CMDID, + WMI_GET_NOA_CMDID, + WMI_SET_OPPPS_CMDID, + WMI_GET_OPPPS_CMDID, + WMI_ADD_PORT_CMDID, + WMI_DEL_PORT_CMDID, + + /* 802.11w cmd */ + WMI_SET_RSN_CAP_CMDID, + WMI_GET_RSN_CAP_CMDID, + WMI_SET_IGTK_CMDID, + + WMI_RX_FILTER_COALESCE_FILTER_OP_CMDID, + WMI_RX_FILTER_SET_FRAME_TEST_LIST_CMDID, + + WMI_SEND_MGMT_CMDID, + WMI_BEGIN_SCAN_CMDID, + + WMI_SET_BLACK_LIST, + WMI_SET_MCASTRATE, + + WMI_STA_BMISS_ENHANCE_CMDID, + + WMI_SET_REGDOMAIN_CMDID, + + WMI_SET_RSSI_FILTER_CMDID, + + WMI_SET_KEEP_ALIVE_EXT, + + WMI_VOICE_DETECTION_ENABLE_CMDID, + + WMI_SET_TXE_NOTIFY_CMDID, + + WMI_SET_RECOVERY_TEST_PARAMETER_CMDID, /*0xf094*/ + + WMI_ENABLE_SCHED_SCAN_CMDID, +}; + +enum wmi_mgmt_frame_type { + WMI_FRAME_BEACON = 0, + WMI_FRAME_PROBE_REQ, + WMI_FRAME_PROBE_RESP, + WMI_FRAME_ASSOC_REQ, + WMI_FRAME_ASSOC_RESP, + WMI_NUM_MGMT_FRAME +}; + +enum wmi_ie_field_type { + WMI_RSN_IE_CAPB = 0x1, + WMI_IE_FULL = 0xFF, /* indicats full IE */ +}; + +/* WMI_CONNECT_CMDID */ +enum network_type { + INFRA_NETWORK = 0x01, + ADHOC_NETWORK = 0x02, + ADHOC_CREATOR = 0x04, + AP_NETWORK = 0x10, +}; + +enum network_subtype { + SUBTYPE_NONE, + SUBTYPE_BT, + SUBTYPE_P2PDEV, + SUBTYPE_P2PCLIENT, + SUBTYPE_P2PGO, +}; + +enum dot11_auth_mode { + OPEN_AUTH = 0x01, + SHARED_AUTH = 0x02, + + /* different from IEEE_AUTH_MODE definitions */ + LEAP_AUTH = 0x04, +}; + +enum auth_mode { + NONE_AUTH = 0x01, + WPA_AUTH = 0x02, + WPA2_AUTH = 0x04, + WPA_PSK_AUTH = 0x08, + WPA2_PSK_AUTH = 0x10, + WPA_AUTH_CCKM = 0x20, + WPA2_AUTH_CCKM = 0x40, +}; + +#define WMI_MAX_KEY_INDEX 3 + +#define WMI_MAX_KEY_LEN 32 + +/* + * NB: these values are ordered carefully; there are lots of + * implications in any reordering. In particular beware + * that 4 is not used to avoid conflicting with IEEE80211_F_PRIVACY. + */ +#define ATH6KL_CIPHER_WEP 0 +#define ATH6KL_CIPHER_TKIP 1 +#define ATH6KL_CIPHER_AES_OCB 2 +#define ATH6KL_CIPHER_AES_CCM 3 +#define ATH6KL_CIPHER_CKIP 5 +#define ATH6KL_CIPHER_CCKM_KRK 6 +#define ATH6KL_CIPHER_NONE 7 /* pseudo value */ + +/* + * 802.11 rate set. + */ +#define ATH6KL_RATE_MAXSIZE 15 /* max rates we'll handle */ + +#define ATH_OUI_TYPE 0x01 +#define WPA_OUI_TYPE 0x01 +#define WMM_PARAM_OUI_SUBTYPE 0x01 +#define WMM_OUI_TYPE 0x02 +#define WSC_OUT_TYPE 0x04 + +enum wmi_connect_ctrl_flags_bits { + CONNECT_ASSOC_POLICY_USER = 0x0001, + CONNECT_SEND_REASSOC = 0x0002, + CONNECT_IGNORE_WPAx_GROUP_CIPHER = 0x0004, + CONNECT_PROFILE_MATCH_DONE = 0x0008, + CONNECT_IGNORE_AAC_BEACON = 0x0010, + CONNECT_CSA_FOLLOW_BSS = 0x0020, + CONNECT_DO_WPA_OFFLOAD = 0x0040, + CONNECT_DO_NOT_DEAUTH = 0x0080, + CONNECT_WPS_FLAG = 0x0100, +}; + +struct wmi_connect_cmd { + u8 nw_type; + u8 dot11_auth_mode; + u8 auth_mode; + u8 prwise_crypto_type; + u8 prwise_crypto_len; + u8 grp_crypto_type; + u8 grp_crypto_len; + u8 ssid_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + __le16 ch; + u8 bssid[ETH_ALEN]; + __le32 ctrl_flags; + u8 nw_subtype; +} __packed; + +/* WMI_RECONNECT_CMDID */ +struct wmi_reconnect_cmd { + /* channel hint */ + __le16 channel; + + /* mandatory if set */ + u8 bssid[ETH_ALEN]; +} __packed; + +/* WMI_ADD_CIPHER_KEY_CMDID */ +enum key_usage { + PAIRWISE_USAGE = 0x00, + GROUP_USAGE = 0x01, + + /* default Tx Key - static WEP only */ + TX_USAGE = 0x02, +}; + +/* + * Bit Flag + * Bit 0 - Initialise TSC - default is Initialize + */ +#define KEY_OP_INIT_TSC 0x01 +#define KEY_OP_INIT_RSC 0x02 + +/* default initialise the TSC & RSC */ +#define KEY_OP_INIT_VAL 0x03 +#define KEY_OP_VALID_MASK 0x03 + +struct wmi_add_cipher_key_cmd { + u8 key_index; + u8 key_type; + + /* enum key_usage */ + u8 key_usage; + + u8 key_len; + + /* key replay sequence counter */ + u8 key_rsc[8]; + + u8 key[WLAN_MAX_KEY_LEN]; + + /* additional key control info */ + u8 key_op_ctrl; + + u8 key_mac_addr[ETH_ALEN]; +} __packed; + +/* WMI_DELETE_CIPHER_KEY_CMDID */ +struct wmi_delete_cipher_key_cmd { + u8 key_index; +} __packed; + +#define WMI_KRK_LEN 16 + +/* WMI_ADD_KRK_CMDID */ +struct wmi_add_krk_cmd { + u8 krk[WMI_KRK_LEN]; +} __packed; + +/* WMI_SETPMKID_CMDID */ + +#define WMI_PMKID_LEN 16 + +enum pmkid_enable_flg { + PMKID_DISABLE = 0, + PMKID_ENABLE = 1, +}; + +struct wmi_setpmkid_cmd { + u8 bssid[ETH_ALEN]; + + /* enum pmkid_enable_flg */ + u8 enable; + + u8 pmkid[WMI_PMKID_LEN]; +} __packed; + +/* WMI_START_SCAN_CMD */ +enum wmi_scan_type { + WMI_LONG_SCAN = 0, + WMI_SHORT_SCAN = 1, +}; + +struct wmi_supp_rates { + u8 nrates; + u8 rates[ATH6KL_RATE_MAXSIZE]; +}; + +struct wmi_begin_scan_cmd { + __le32 force_fg_scan; + + /* for legacy cisco AP compatibility */ + __le32 is_legacy; + + /* max duration in the home channel(msec) */ + __le32 home_dwell_time; + + /* time interval between scans (msec) */ + __le32 force_scan_intvl; + + /* no CCK rates */ + __le32 no_cck; + + /* enum wmi_scan_type */ + u8 scan_type; + + /* Supported rates to advertise in the probe request frames */ + struct wmi_supp_rates supp_rates[ATH6KL_NUM_BANDS]; + + /* how many channels follow */ + u8 num_ch; + + /* channels in Mhz */ + __le16 ch_list[]; +} __packed; + +/* wmi_start_scan_cmd is to be deprecated. Use + * wmi_begin_scan_cmd instead. The new structure supports P2P mgmt + * operations using station interface. + */ +struct wmi_start_scan_cmd { + __le32 force_fg_scan; + + /* for legacy cisco AP compatibility */ + __le32 is_legacy; + + /* max duration in the home channel(msec) */ + __le32 home_dwell_time; + + /* time interval between scans (msec) */ + __le32 force_scan_intvl; + + /* enum wmi_scan_type */ + u8 scan_type; + + /* how many channels follow */ + u8 num_ch; + + /* channels in Mhz */ + __le16 ch_list[]; +} __packed; + +/* + * Warning: scan control flag value of 0xFF is used to disable + * all flags in WMI_SCAN_PARAMS_CMD. Do not add any more + * flags here + */ +enum wmi_scan_ctrl_flags_bits { + /* set if can scan in the connect cmd */ + CONNECT_SCAN_CTRL_FLAGS = 0x01, + + /* set if scan for the SSID it is already connected to */ + SCAN_CONNECTED_CTRL_FLAGS = 0x02, + + /* set if enable active scan */ + ACTIVE_SCAN_CTRL_FLAGS = 0x04, + + /* set if enable roam scan when bmiss and lowrssi */ + ROAM_SCAN_CTRL_FLAGS = 0x08, + + /* set if follows customer BSSINFO reporting rule */ + REPORT_BSSINFO_CTRL_FLAGS = 0x10, + + /* if disabled, target doesn't scan after a disconnect event */ + ENABLE_AUTO_CTRL_FLAGS = 0x20, + + /* + * Scan complete event with canceled status will be generated when + * a scan is prempted before it gets completed. + */ + ENABLE_SCAN_ABORT_EVENT = 0x40 +}; + +struct wmi_scan_params_cmd { + /* sec */ + __le16 fg_start_period; + + /* sec */ + __le16 fg_end_period; + + /* sec */ + __le16 bg_period; + + /* msec */ + __le16 maxact_chdwell_time; + + /* msec */ + __le16 pas_chdwell_time; + + /* how many shorts scan for one long */ + u8 short_scan_ratio; + + u8 scan_ctrl_flags; + + /* msec */ + __le16 minact_chdwell_time; + + /* max active scans per ssid */ + __le16 maxact_scan_per_ssid; + + /* msecs */ + __le32 max_dfsch_act_time; +} __packed; + +/* WMI_ENABLE_SCHED_SCAN_CMDID */ +struct wmi_enable_sched_scan_cmd { + u8 enable; +} __packed; + +/* WMI_SET_BSS_FILTER_CMDID */ +enum wmi_bss_filter { + /* no beacons forwarded */ + NONE_BSS_FILTER = 0x0, + + /* all beacons forwarded */ + ALL_BSS_FILTER, + + /* only beacons matching profile */ + PROFILE_FILTER, + + /* all but beacons matching profile */ + ALL_BUT_PROFILE_FILTER, + + /* only beacons matching current BSS */ + CURRENT_BSS_FILTER, + + /* all but beacons matching BSS */ + ALL_BUT_BSS_FILTER, + + /* beacons matching probed ssid */ + PROBED_SSID_FILTER, + + /* beacons matching matched ssid */ + MATCHED_SSID_FILTER, + + /* marker only */ + LAST_BSS_FILTER, +}; + +struct wmi_bss_filter_cmd { + /* see, enum wmi_bss_filter */ + u8 bss_filter; + + /* for alignment */ + u8 reserved1; + + /* for alignment */ + __le16 reserved2; + + __le32 ie_mask; +} __packed; + +/* WMI_SET_PROBED_SSID_CMDID */ +#define MAX_PROBED_SSIDS 16 + +enum wmi_ssid_flag { + /* disables entry */ + DISABLE_SSID_FLAG = 0, + + /* probes specified ssid */ + SPECIFIC_SSID_FLAG = 0x01, + + /* probes for any ssid */ + ANY_SSID_FLAG = 0x02, + + /* match for ssid */ + MATCH_SSID_FLAG = 0x08, +}; + +struct wmi_probed_ssid_cmd { + /* 0 to MAX_PROBED_SSIDS - 1 */ + u8 entry_index; + + /* see, enum wmi_ssid_flg */ + u8 flag; + + u8 ssid_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; +} __packed; + +/* + * WMI_SET_LISTEN_INT_CMDID + * The Listen interval is between 15 and 3000 TUs + */ +struct wmi_listen_int_cmd { + __le16 listen_intvl; + __le16 num_beacons; +} __packed; + +/* WMI_SET_BMISS_TIME_CMDID */ +struct wmi_bmiss_time_cmd { + __le16 bmiss_time; + __le16 num_beacons; +}; + +/* WMI_STA_ENHANCE_BMISS_CMDID */ +struct wmi_sta_bmiss_enhance_cmd { + u8 enable; +} __packed; + +struct wmi_set_regdomain_cmd { + u8 length; + u8 iso_name[2]; +} __packed; + +/* WMI_SET_POWER_MODE_CMDID */ +enum wmi_power_mode { + REC_POWER = 0x01, + MAX_PERF_POWER, +}; + +struct wmi_power_mode_cmd { + /* see, enum wmi_power_mode */ + u8 pwr_mode; +} __packed; + +/* + * Policy to determine whether power save failure event should be sent to + * host during scanning + */ +enum power_save_fail_event_policy { + SEND_POWER_SAVE_FAIL_EVENT_ALWAYS = 1, + IGNORE_PS_FAIL_DURING_SCAN = 2, +}; + +struct wmi_power_params_cmd { + /* msec */ + __le16 idle_period; + + __le16 pspoll_number; + __le16 dtim_policy; + __le16 tx_wakeup_policy; + __le16 num_tx_to_wakeup; + __le16 ps_fail_event_policy; +} __packed; + +/* + * Ratemask for below modes should be passed + * to WMI_SET_TX_SELECT_RATES_CMDID. + * AR6003 has 32 bit mask for each modes. + * First 12 bits for legacy rates, 13 to 20 + * bits for HT 20 rates and 21 to 28 bits for + * HT 40 rates + */ +enum wmi_mode_phy { + WMI_RATES_MODE_11A = 0, + WMI_RATES_MODE_11G, + WMI_RATES_MODE_11B, + WMI_RATES_MODE_11GONLY, + WMI_RATES_MODE_11A_HT20, + WMI_RATES_MODE_11G_HT20, + WMI_RATES_MODE_11A_HT40, + WMI_RATES_MODE_11G_HT40, + WMI_RATES_MODE_MAX +}; + +/* WMI_SET_TX_SELECT_RATES_CMDID */ +struct wmi_set_tx_select_rates32_cmd { + __le32 ratemask[WMI_RATES_MODE_MAX]; +} __packed; + +/* WMI_SET_TX_SELECT_RATES_CMDID */ +struct wmi_set_tx_select_rates64_cmd { + __le64 ratemask[WMI_RATES_MODE_MAX]; +} __packed; + +/* WMI_SET_DISC_TIMEOUT_CMDID */ +struct wmi_disc_timeout_cmd { + /* seconds */ + u8 discon_timeout; +} __packed; + +enum dir_type { + UPLINK_TRAFFIC = 0, + DNLINK_TRAFFIC = 1, + BIDIR_TRAFFIC = 2, +}; + +enum voiceps_cap_type { + DISABLE_FOR_THIS_AC = 0, + ENABLE_FOR_THIS_AC = 1, + ENABLE_FOR_ALL_AC = 2, +}; + +enum traffic_type { + TRAFFIC_TYPE_APERIODIC = 0, + TRAFFIC_TYPE_PERIODIC = 1, +}; + +/* WMI_SYNCHRONIZE_CMDID */ +struct wmi_sync_cmd { + u8 data_sync_map; +} __packed; + +/* WMI_CREATE_PSTREAM_CMDID */ +struct wmi_create_pstream_cmd { + /* msec */ + __le32 min_service_int; + + /* msec */ + __le32 max_service_int; + + /* msec */ + __le32 inactivity_int; + + /* msec */ + __le32 suspension_int; + + __le32 service_start_time; + + /* in bps */ + __le32 min_data_rate; + + /* in bps */ + __le32 mean_data_rate; + + /* in bps */ + __le32 peak_data_rate; + + __le32 max_burst_size; + __le32 delay_bound; + + /* in bps */ + __le32 min_phy_rate; + + __le32 sba; + __le32 medium_time; + + /* in octects */ + __le16 nominal_msdu; + + /* in octects */ + __le16 max_msdu; + + u8 traffic_class; + + /* see, enum dir_type */ + u8 traffic_direc; + + u8 rx_queue_num; + + /* see, enum traffic_type */ + u8 traffic_type; + + /* see, enum voiceps_cap_type */ + u8 voice_psc_cap; + u8 tsid; + + /* 802.1D user priority */ + u8 user_pri; + + /* nominal phy rate */ + u8 nominal_phy; +} __packed; + +/* WMI_DELETE_PSTREAM_CMDID */ +struct wmi_delete_pstream_cmd { + u8 tx_queue_num; + u8 rx_queue_num; + u8 traffic_direc; + u8 traffic_class; + u8 tsid; +} __packed; + +/* WMI_SET_CHANNEL_PARAMS_CMDID */ +enum wmi_phy_mode { + WMI_11A_MODE = 0x1, + WMI_11G_MODE = 0x2, + WMI_11AG_MODE = 0x3, + WMI_11B_MODE = 0x4, + WMI_11GONLY_MODE = 0x5, + WMI_11G_HT20 = 0x6, +}; + +#define WMI_MAX_CHANNELS 32 + +/* + * WMI_RSSI_THRESHOLD_PARAMS_CMDID + * Setting the polltime to 0 would disable polling. Threshold values are + * in the ascending order, and should agree to: + * (lowThreshold_lowerVal < lowThreshold_upperVal < highThreshold_lowerVal + * < highThreshold_upperVal) + */ + +struct wmi_rssi_threshold_params_cmd { + /* polling time as a factor of LI */ + __le32 poll_time; + + /* lowest of upper */ + a_sle16 thresh_above1_val; + + a_sle16 thresh_above2_val; + a_sle16 thresh_above3_val; + a_sle16 thresh_above4_val; + a_sle16 thresh_above5_val; + + /* highest of upper */ + a_sle16 thresh_above6_val; + + /* lowest of bellow */ + a_sle16 thresh_below1_val; + + a_sle16 thresh_below2_val; + a_sle16 thresh_below3_val; + a_sle16 thresh_below4_val; + a_sle16 thresh_below5_val; + + /* highest of bellow */ + a_sle16 thresh_below6_val; + + /* "alpha" */ + u8 weight; + + u8 reserved[3]; +} __packed; + +/* + * WMI_SNR_THRESHOLD_PARAMS_CMDID + * Setting the polltime to 0 would disable polling. + */ + +struct wmi_snr_threshold_params_cmd { + /* polling time as a factor of LI */ + __le32 poll_time; + + /* "alpha" */ + u8 weight; + + /* lowest of upper */ + u8 thresh_above1_val; + + u8 thresh_above2_val; + u8 thresh_above3_val; + + /* highest of upper */ + u8 thresh_above4_val; + + /* lowest of bellow */ + u8 thresh_below1_val; + + u8 thresh_below2_val; + u8 thresh_below3_val; + + /* highest of bellow */ + u8 thresh_below4_val; + + u8 reserved[3]; +} __packed; + +/* Don't report BSSs with signal (RSSI) below this threshold */ +struct wmi_set_rssi_filter_cmd { + s8 rssi; +} __packed; + +enum wmi_preamble_policy { + WMI_IGNORE_BARKER_IN_ERP = 0, + WMI_FOLLOW_BARKER_IN_ERP, +}; + +struct wmi_set_lpreamble_cmd { + u8 status; + u8 preamble_policy; +} __packed; + +struct wmi_set_rts_cmd { + __le16 threshold; +} __packed; + +/* WMI_SET_TX_PWR_CMDID */ +struct wmi_set_tx_pwr_cmd { + /* in dbM units */ + u8 dbM; +} __packed; + +struct wmi_tx_pwr_reply { + /* in dbM units */ + u8 dbM; +} __packed; + +struct wmi_report_sleep_state_event { + __le32 sleep_state; +}; + +enum wmi_report_sleep_status { + WMI_REPORT_SLEEP_STATUS_IS_DEEP_SLEEP = 0, + WMI_REPORT_SLEEP_STATUS_IS_AWAKE +}; +enum target_event_report_config { + /* default */ + DISCONN_EVT_IN_RECONN = 0, + + NO_DISCONN_EVT_IN_RECONN +}; + +struct wmi_mcast_filter_cmd { + u8 mcast_all_enable; +} __packed; + +#define ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE 6 +struct wmi_mcast_filter_add_del_cmd { + u8 mcast_mac[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE]; +} __packed; + +struct wmi_set_htcap_cmd { + u8 band; + u8 ht_enable; + u8 ht40_supported; + u8 ht20_sgi; + u8 ht40_sgi; + u8 intolerant_40mhz; + u8 max_ampdu_len_exp; +} __packed; + +/* Command Replies */ + +/* WMI_GET_CHANNEL_LIST_CMDID reply */ +struct wmi_channel_list_reply { + u8 reserved; + + /* number of channels in reply */ + u8 num_ch; + + /* channel in Mhz */ + __le16 ch_list[]; +} __packed; + +/* List of Events (target to host) */ +enum wmi_event_id { + WMI_READY_EVENTID = 0x1001, + WMI_CONNECT_EVENTID, + WMI_DISCONNECT_EVENTID, + WMI_BSSINFO_EVENTID, + WMI_CMDERROR_EVENTID, + WMI_REGDOMAIN_EVENTID, + WMI_PSTREAM_TIMEOUT_EVENTID, + WMI_NEIGHBOR_REPORT_EVENTID, + WMI_TKIP_MICERR_EVENTID, + WMI_SCAN_COMPLETE_EVENTID, /* 0x100a */ + WMI_REPORT_STATISTICS_EVENTID, + WMI_RSSI_THRESHOLD_EVENTID, + WMI_ERROR_REPORT_EVENTID, + WMI_OPT_RX_FRAME_EVENTID, + WMI_REPORT_ROAM_TBL_EVENTID, + WMI_EXTENSION_EVENTID, + WMI_CAC_EVENTID, + WMI_SNR_THRESHOLD_EVENTID, + WMI_LQ_THRESHOLD_EVENTID, + WMI_TX_RETRY_ERR_EVENTID, /* 0x1014 */ + WMI_REPORT_ROAM_DATA_EVENTID, + WMI_TEST_EVENTID, + WMI_APLIST_EVENTID, + WMI_GET_WOW_LIST_EVENTID, + WMI_GET_PMKID_LIST_EVENTID, + WMI_CHANNEL_CHANGE_EVENTID, + WMI_PEER_NODE_EVENTID, + WMI_PSPOLL_EVENTID, + WMI_DTIMEXPIRY_EVENTID, + WMI_WLAN_VERSION_EVENTID, + WMI_SET_PARAMS_REPLY_EVENTID, + WMI_ADDBA_REQ_EVENTID, /*0x1020 */ + WMI_ADDBA_RESP_EVENTID, + WMI_DELBA_REQ_EVENTID, + WMI_TX_COMPLETE_EVENTID, + WMI_HCI_EVENT_EVENTID, + WMI_ACL_DATA_EVENTID, + WMI_REPORT_SLEEP_STATE_EVENTID, + WMI_REPORT_BTCOEX_STATS_EVENTID, + WMI_REPORT_BTCOEX_CONFIG_EVENTID, + WMI_GET_PMK_EVENTID, + + /* DFS Events */ + WMI_DFS_HOST_ATTACH_EVENTID, + WMI_DFS_HOST_INIT_EVENTID, + WMI_DFS_RESET_DELAYLINES_EVENTID, + WMI_DFS_RESET_RADARQ_EVENTID, + WMI_DFS_RESET_AR_EVENTID, + WMI_DFS_RESET_ARQ_EVENTID, + WMI_DFS_SET_DUR_MULTIPLIER_EVENTID, + WMI_DFS_SET_BANGRADAR_EVENTID, + WMI_DFS_SET_DEBUGLEVEL_EVENTID, + WMI_DFS_PHYERR_EVENTID, + + /* CCX Evants */ + WMI_CCX_RM_STATUS_EVENTID, + + /* P2P Events */ + WMI_P2P_GO_NEG_RESULT_EVENTID, + + WMI_WAC_SCAN_DONE_EVENTID, + WMI_WAC_REPORT_BSS_EVENTID, + WMI_WAC_START_WPS_EVENTID, + WMI_WAC_CTRL_REQ_REPLY_EVENTID, + + WMI_REPORT_WMM_PARAMS_EVENTID, + WMI_WAC_REJECT_WPS_EVENTID, + + /* More P2P Events */ + WMI_P2P_GO_NEG_REQ_EVENTID, + WMI_P2P_INVITE_REQ_EVENTID, + WMI_P2P_INVITE_RCVD_RESULT_EVENTID, + WMI_P2P_INVITE_SENT_RESULT_EVENTID, + WMI_P2P_PROV_DISC_RESP_EVENTID, + WMI_P2P_PROV_DISC_REQ_EVENTID, + + /* RFKILL Events */ + WMI_RFKILL_STATE_CHANGE_EVENTID, + WMI_RFKILL_GET_MODE_CMD_EVENTID, + + WMI_P2P_START_SDPD_EVENTID, + WMI_P2P_SDPD_RX_EVENTID, + + WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID = 0x1047, + + WMI_THIN_RESERVED_START_EVENTID = 0x8000, + /* Events in this range are reserved for thinmode */ + WMI_THIN_RESERVED_END_EVENTID = 0x8fff, + + WMI_SET_CHANNEL_EVENTID, + WMI_ASSOC_REQ_EVENTID, + + /* Generic ACS event */ + WMI_ACS_EVENTID, + WMI_STORERECALL_STORE_EVENTID, + WMI_WOW_EXT_WAKE_EVENTID, + WMI_GTK_OFFLOAD_STATUS_EVENTID, + WMI_NETWORK_LIST_OFFLOAD_EVENTID, + WMI_REMAIN_ON_CHNL_EVENTID, + WMI_CANCEL_REMAIN_ON_CHNL_EVENTID, + WMI_TX_STATUS_EVENTID, + WMI_RX_PROBE_REQ_EVENTID, + WMI_P2P_CAPABILITIES_EVENTID, + WMI_RX_ACTION_EVENTID, + WMI_P2P_INFO_EVENTID, + + /* WPS Events */ + WMI_WPS_GET_STATUS_EVENTID, + WMI_WPS_PROFILE_EVENTID, + + /* more P2P events */ + WMI_NOA_INFO_EVENTID, + WMI_OPPPS_INFO_EVENTID, + WMI_PORT_STATUS_EVENTID, + + /* 802.11w */ + WMI_GET_RSN_CAP_EVENTID, + + WMI_TXE_NOTIFY_EVENTID, +}; + +struct wmi_ready_event_2 { + __le32 sw_version; + __le32 abi_version; + u8 mac_addr[ETH_ALEN]; + u8 phy_cap; +} __packed; + +/* WMI_PHY_CAPABILITY */ +enum wmi_phy_cap { + WMI_11A_CAP = 0x01, + WMI_11G_CAP = 0x02, + WMI_11AG_CAP = 0x03, + WMI_11AN_CAP = 0x04, + WMI_11GN_CAP = 0x05, + WMI_11AGN_CAP = 0x06, +}; + +/* Connect Event */ +struct wmi_connect_event { + union { + struct { + __le16 ch; + u8 bssid[ETH_ALEN]; + __le16 listen_intvl; + __le16 beacon_intvl; + __le32 nw_type; + } sta; + struct { + u8 aid; + u8 phymode; + u8 mac_addr[ETH_ALEN]; + u8 auth; + u8 keymgmt; + __le16 cipher; + u8 apsd_info; + u8 unused[3]; + } ap_sta; + struct { + __le16 ch; + u8 bssid[ETH_ALEN]; + u8 unused[8]; + } ap_bss; + } u; + u8 beacon_ie_len; + u8 assoc_req_len; + u8 assoc_resp_len; + u8 assoc_info[]; +} __packed; + +/* Disconnect Event */ +enum wmi_disconnect_reason { + NO_NETWORK_AVAIL = 0x01, + + /* bmiss */ + LOST_LINK = 0x02, + + DISCONNECT_CMD = 0x03, + BSS_DISCONNECTED = 0x04, + AUTH_FAILED = 0x05, + ASSOC_FAILED = 0x06, + NO_RESOURCES_AVAIL = 0x07, + CSERV_DISCONNECT = 0x08, + INVALID_PROFILE = 0x0a, + DOT11H_CHANNEL_SWITCH = 0x0b, + PROFILE_MISMATCH = 0x0c, + CONNECTION_EVICTED = 0x0d, + IBSS_MERGE = 0xe, +}; + +/* AP mode disconnect proto_reasons */ +enum ap_disconnect_reason { + WMI_AP_REASON_STA_LEFT = 101, + WMI_AP_REASON_FROM_HOST = 102, + WMI_AP_REASON_COMM_TIMEOUT = 103, + WMI_AP_REASON_MAX_STA = 104, + WMI_AP_REASON_ACL = 105, + WMI_AP_REASON_STA_ROAM = 106, + WMI_AP_REASON_DFS_CHANNEL = 107, +}; + +#define ATH6KL_COUNTRY_RD_SHIFT 16 + +struct ath6kl_wmi_regdomain { + __le32 reg_code; +}; + +struct wmi_disconnect_event { + /* reason code, see 802.11 spec. */ + __le16 proto_reason_status; + + /* set if known */ + u8 bssid[ETH_ALEN]; + + /* see WMI_DISCONNECT_REASON */ + u8 disconn_reason; + + u8 assoc_resp_len; + u8 assoc_info[]; +} __packed; + +/* + * BSS Info Event. + * Mechanism used to inform host of the presence and characteristic of + * wireless networks present. Consists of bss info header followed by + * the beacon or probe-response frame body. The 802.11 header is no included. + */ +enum wmi_bi_ftype { + BEACON_FTYPE = 0x1, + PROBERESP_FTYPE, + ACTION_MGMT_FTYPE, + PROBEREQ_FTYPE, +}; + +#define DEF_LRSSI_SCAN_PERIOD 5 +#define DEF_LRSSI_ROAM_THRESHOLD 20 +#define DEF_LRSSI_ROAM_FLOOR 60 +#define DEF_SCAN_FOR_ROAM_INTVL 2 + +enum wmi_roam_ctrl { + WMI_FORCE_ROAM = 1, + WMI_SET_ROAM_MODE, + WMI_SET_HOST_BIAS, + WMI_SET_LRSSI_SCAN_PARAMS, +}; + +enum wmi_roam_mode { + WMI_DEFAULT_ROAM_MODE = 1, /* RSSI based roam */ + WMI_HOST_BIAS_ROAM_MODE = 2, /* Host bias based roam */ + WMI_LOCK_BSS_MODE = 3, /* Lock to the current BSS */ +}; + +struct bss_bias { + u8 bssid[ETH_ALEN]; + s8 bias; +} __packed; + +struct bss_bias_info { + u8 num_bss; + struct bss_bias bss_bias[]; +} __packed; + +struct low_rssi_scan_params { + __le16 lrssi_scan_period; + a_sle16 lrssi_scan_threshold; + a_sle16 lrssi_roam_threshold; + u8 roam_rssi_floor; + u8 reserved[1]; +} __packed; + +struct roam_ctrl_cmd { + union { + u8 bssid[ETH_ALEN]; /* WMI_FORCE_ROAM */ + u8 roam_mode; /* WMI_SET_ROAM_MODE */ + struct bss_bias_info bss; /* WMI_SET_HOST_BIAS */ + struct low_rssi_scan_params params; /* WMI_SET_LRSSI_SCAN_PARAMS + */ + } __packed info; + u8 roam_ctrl; +} __packed; + +struct set_beacon_int_cmd { + __le32 beacon_intvl; +} __packed; + +struct set_dtim_cmd { + __le32 dtim_period; +} __packed; + +/* BSS INFO HDR version 2.0 */ +struct wmi_bss_info_hdr2 { + __le16 ch; /* frequency in MHz */ + + /* see, enum wmi_bi_ftype */ + u8 frame_type; + + u8 snr; /* note: rssi = snr - 95 dBm */ + u8 bssid[ETH_ALEN]; + __le16 ie_mask; +} __packed; + +/* Command Error Event */ +enum wmi_error_code { + INVALID_PARAM = 0x01, + ILLEGAL_STATE = 0x02, + INTERNAL_ERROR = 0x03, +}; + +struct wmi_cmd_error_event { + __le16 cmd_id; + u8 err_code; +} __packed; + +struct wmi_pstream_timeout_event { + u8 tx_queue_num; + u8 rx_queue_num; + u8 traffic_direc; + u8 traffic_class; +} __packed; + +/* + * The WMI_NEIGHBOR_REPORT Event is generated by the target to inform + * the host of BSS's it has found that matches the current profile. + * It can be used by the host to cache PMKs and/to initiate pre-authentication + * if the BSS supports it. The first bssid is always the current associated + * BSS. + * The bssid and bssFlags information repeats according to the number + * or APs reported. + */ +enum wmi_bss_flags { + WMI_DEFAULT_BSS_FLAGS = 0x00, + WMI_PREAUTH_CAPABLE_BSS = 0x01, + WMI_PMKID_VALID_BSS = 0x02, +}; + +struct wmi_neighbor_info { + u8 bssid[ETH_ALEN]; + u8 bss_flags; /* enum wmi_bss_flags */ +} __packed; + +struct wmi_neighbor_report_event { + u8 num_neighbors; + struct wmi_neighbor_info neighbor[]; +} __packed; + +/* TKIP MIC Error Event */ +struct wmi_tkip_micerr_event { + u8 key_id; + u8 is_mcast; +} __packed; + +enum wmi_scan_status { + WMI_SCAN_STATUS_SUCCESS = 0, +}; + +/* WMI_SCAN_COMPLETE_EVENTID */ +struct wmi_scan_complete_event { + a_sle32 status; +} __packed; + +#define MAX_OPT_DATA_LEN 1400 + +/* + * Special frame receive Event. + * Mechanism used to inform host of the receiption of the special frames. + * Consists of special frame info header followed by special frame body. + * The 802.11 header is not included. + */ +struct wmi_opt_rx_info_hdr { + __le16 ch; + u8 frame_type; + s8 snr; + u8 src_addr[ETH_ALEN]; + u8 bssid[ETH_ALEN]; +} __packed; + +/* Reporting statistic */ +struct tx_stats { + __le32 pkt; + __le32 byte; + __le32 ucast_pkt; + __le32 ucast_byte; + __le32 mcast_pkt; + __le32 mcast_byte; + __le32 bcast_pkt; + __le32 bcast_byte; + __le32 rts_success_cnt; + __le32 pkt_per_ac[4]; + __le32 err_per_ac[4]; + + __le32 err; + __le32 fail_cnt; + __le32 retry_cnt; + __le32 mult_retry_cnt; + __le32 rts_fail_cnt; + a_sle32 ucast_rate; +} __packed; + +struct rx_stats { + __le32 pkt; + __le32 byte; + __le32 ucast_pkt; + __le32 ucast_byte; + __le32 mcast_pkt; + __le32 mcast_byte; + __le32 bcast_pkt; + __le32 bcast_byte; + __le32 frgment_pkt; + + __le32 err; + __le32 crc_err; + __le32 key_cache_miss; + __le32 decrypt_err; + __le32 dupl_frame; + a_sle32 ucast_rate; +} __packed; + +#define RATE_INDEX_WITHOUT_SGI_MASK 0x7f +#define RATE_INDEX_MSB 0x80 + +struct tkip_ccmp_stats { + __le32 tkip_local_mic_fail; + __le32 tkip_cnter_measures_invoked; + __le32 tkip_replays; + __le32 tkip_fmt_err; + __le32 ccmp_fmt_err; + __le32 ccmp_replays; +} __packed; + +struct pm_stats { + __le32 pwr_save_failure_cnt; + __le16 stop_tx_failure_cnt; + __le16 atim_tx_failure_cnt; + __le16 atim_rx_failure_cnt; + __le16 bcn_rx_failure_cnt; +} __packed; + +struct cserv_stats { + __le32 cs_bmiss_cnt; + __le32 cs_low_rssi_cnt; + __le16 cs_connect_cnt; + __le16 cs_discon_cnt; + a_sle16 cs_ave_beacon_rssi; + __le16 cs_roam_count; + a_sle16 cs_rssi; + u8 cs_snr; + u8 cs_ave_beacon_snr; + u8 cs_last_roam_msec; +} __packed; + +struct wlan_net_stats { + struct tx_stats tx; + struct rx_stats rx; + struct tkip_ccmp_stats tkip_ccmp_stats; +} __packed; + +struct arp_stats { + __le32 arp_received; + __le32 arp_matched; + __le32 arp_replied; +} __packed; + +struct wlan_wow_stats { + __le32 wow_pkt_dropped; + __le16 wow_evt_discarded; + u8 wow_host_pkt_wakeups; + u8 wow_host_evt_wakeups; +} __packed; + +struct wmi_target_stats { + __le32 lq_val; + a_sle32 noise_floor_calib; + struct pm_stats pm_stats; + struct wlan_net_stats stats; + struct wlan_wow_stats wow_stats; + struct arp_stats arp_stats; + struct cserv_stats cserv_stats; +} __packed; + +/* + * WMI_RSSI_THRESHOLD_EVENTID. + * Indicate the RSSI events to host. Events are indicated when we breach a + * thresold value. + */ +enum wmi_rssi_threshold_val { + WMI_RSSI_THRESHOLD1_ABOVE = 0, + WMI_RSSI_THRESHOLD2_ABOVE, + WMI_RSSI_THRESHOLD3_ABOVE, + WMI_RSSI_THRESHOLD4_ABOVE, + WMI_RSSI_THRESHOLD5_ABOVE, + WMI_RSSI_THRESHOLD6_ABOVE, + WMI_RSSI_THRESHOLD1_BELOW, + WMI_RSSI_THRESHOLD2_BELOW, + WMI_RSSI_THRESHOLD3_BELOW, + WMI_RSSI_THRESHOLD4_BELOW, + WMI_RSSI_THRESHOLD5_BELOW, + WMI_RSSI_THRESHOLD6_BELOW +}; + +struct wmi_rssi_threshold_event { + a_sle16 rssi; + u8 range; +} __packed; + +enum wmi_snr_threshold_val { + WMI_SNR_THRESHOLD1_ABOVE = 1, + WMI_SNR_THRESHOLD1_BELOW, + WMI_SNR_THRESHOLD2_ABOVE, + WMI_SNR_THRESHOLD2_BELOW, + WMI_SNR_THRESHOLD3_ABOVE, + WMI_SNR_THRESHOLD3_BELOW, + WMI_SNR_THRESHOLD4_ABOVE, + WMI_SNR_THRESHOLD4_BELOW +}; + +struct wmi_snr_threshold_event { + /* see, enum wmi_snr_threshold_val */ + u8 range; + + u8 snr; +} __packed; + +/* WMI_REPORT_ROAM_TBL_EVENTID */ +#define MAX_ROAM_TBL_CAND 5 + +struct wmi_bss_roam_info { + a_sle32 roam_util; + u8 bssid[ETH_ALEN]; + s8 rssi; + s8 rssidt; + s8 last_rssi; + s8 util; + s8 bias; + + /* for alignment */ + u8 reserved; +} __packed; + +struct wmi_target_roam_tbl { + __le16 roam_mode; + __le16 num_entries; + struct wmi_bss_roam_info info[]; +} __packed; + +/* WMI_CAC_EVENTID */ +enum cac_indication { + CAC_INDICATION_ADMISSION = 0x00, + CAC_INDICATION_ADMISSION_RESP = 0x01, + CAC_INDICATION_DELETE = 0x02, + CAC_INDICATION_NO_RESP = 0x03, +}; + +#define WMM_TSPEC_IE_LEN 63 + +struct wmi_cac_event { + u8 ac; + u8 cac_indication; + u8 status_code; + u8 tspec_suggestion[WMM_TSPEC_IE_LEN]; +} __packed; + +/* WMI_APLIST_EVENTID */ + +enum aplist_ver { + APLIST_VER1 = 1, +}; + +struct wmi_ap_info_v1 { + u8 bssid[ETH_ALEN]; + __le16 channel; +} __packed; + +union wmi_ap_info { + struct wmi_ap_info_v1 ap_info_v1; +} __packed; + +struct wmi_aplist_event { + u8 ap_list_ver; + u8 num_ap; + union wmi_ap_info ap_list[]; +} __packed; + +/* Developer Commands */ + +/* + * WMI_SET_BITRATE_CMDID + * + * Get bit rate cmd uses same definition as set bit rate cmd + */ +enum wmi_bit_rate { + RATE_AUTO = -1, + RATE_1Mb = 0, + RATE_2Mb = 1, + RATE_5_5Mb = 2, + RATE_11Mb = 3, + RATE_6Mb = 4, + RATE_9Mb = 5, + RATE_12Mb = 6, + RATE_18Mb = 7, + RATE_24Mb = 8, + RATE_36Mb = 9, + RATE_48Mb = 10, + RATE_54Mb = 11, + RATE_MCS_0_20 = 12, + RATE_MCS_1_20 = 13, + RATE_MCS_2_20 = 14, + RATE_MCS_3_20 = 15, + RATE_MCS_4_20 = 16, + RATE_MCS_5_20 = 17, + RATE_MCS_6_20 = 18, + RATE_MCS_7_20 = 19, + RATE_MCS_0_40 = 20, + RATE_MCS_1_40 = 21, + RATE_MCS_2_40 = 22, + RATE_MCS_3_40 = 23, + RATE_MCS_4_40 = 24, + RATE_MCS_5_40 = 25, + RATE_MCS_6_40 = 26, + RATE_MCS_7_40 = 27, +}; + +struct wmi_bit_rate_reply { + /* see, enum wmi_bit_rate */ + s8 rate_index; +} __packed; + +/* + * WMI_SET_FIXRATES_CMDID + * + * Get fix rates cmd uses same definition as set fix rates cmd + */ +struct wmi_fix_rates_reply { + /* see wmi_bit_rate */ + __le32 fix_rate_mask; +} __packed; + +enum roam_data_type { + /* get the roam time data */ + ROAM_DATA_TIME = 1, +}; + +struct wmi_target_roam_time { + __le32 disassoc_time; + __le32 no_txrx_time; + __le32 assoc_time; + __le32 allow_txrx_time; + u8 disassoc_bssid[ETH_ALEN]; + s8 disassoc_bss_rssi; + u8 assoc_bssid[ETH_ALEN]; + s8 assoc_bss_rssi; +} __packed; + +enum wmi_txop_cfg { + WMI_TXOP_DISABLED = 0, + WMI_TXOP_ENABLED +}; + +struct wmi_set_wmm_txop_cmd { + u8 txop_enable; +} __packed; + +struct wmi_set_keepalive_cmd { + u8 keep_alive_intvl; +} __packed; + +struct wmi_get_keepalive_cmd { + __le32 configured; + u8 keep_alive_intvl; +} __packed; + +struct wmi_set_appie_cmd { + u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */ + u8 ie_len; + u8 ie_info[]; +} __packed; + +struct wmi_set_ie_cmd { + u8 ie_id; + u8 ie_field; /* enum wmi_ie_field_type */ + u8 ie_len; + u8 reserved; + u8 ie_info[]; +} __packed; + +/* Notify the WSC registration status to the target */ +#define WSC_REG_ACTIVE 1 +#define WSC_REG_INACTIVE 0 + +#define WOW_MAX_FILTERS_PER_LIST 4 +#define WOW_PATTERN_SIZE 64 + +#define MAC_MAX_FILTERS_PER_LIST 4 + +struct wow_filter { + u8 wow_valid_filter; + u8 wow_filter_id; + u8 wow_filter_size; + u8 wow_filter_offset; + u8 wow_filter_mask[WOW_PATTERN_SIZE]; + u8 wow_filter_pattern[WOW_PATTERN_SIZE]; +} __packed; + +#define MAX_IP_ADDRS 2 + +struct wmi_set_ip_cmd { + /* IP in network byte order */ + __be32 ips[MAX_IP_ADDRS]; +} __packed; + +enum ath6kl_wow_filters { + WOW_FILTER_SSID = BIT(1), + WOW_FILTER_OPTION_MAGIC_PACKET = BIT(2), + WOW_FILTER_OPTION_EAP_REQ = BIT(3), + WOW_FILTER_OPTION_PATTERNS = BIT(4), + WOW_FILTER_OPTION_OFFLOAD_ARP = BIT(5), + WOW_FILTER_OPTION_OFFLOAD_NS = BIT(6), + WOW_FILTER_OPTION_OFFLOAD_GTK = BIT(7), + WOW_FILTER_OPTION_8021X_4WAYHS = BIT(8), + WOW_FILTER_OPTION_NLO_DISCVRY = BIT(9), + WOW_FILTER_OPTION_NWK_DISASSOC = BIT(10), + WOW_FILTER_OPTION_GTK_ERROR = BIT(11), + WOW_FILTER_OPTION_TEST_MODE = BIT(15), +}; + +enum ath6kl_host_mode { + ATH6KL_HOST_MODE_AWAKE, + ATH6KL_HOST_MODE_ASLEEP, +}; + +struct wmi_set_host_sleep_mode_cmd { + __le32 awake; + __le32 asleep; +} __packed; + +enum ath6kl_wow_mode { + ATH6KL_WOW_MODE_DISABLE, + ATH6KL_WOW_MODE_ENABLE, +}; + +struct wmi_set_wow_mode_cmd { + __le32 enable_wow; + __le32 filter; + __le16 host_req_delay; +} __packed; + +struct wmi_add_wow_pattern_cmd { + u8 filter_list_id; + u8 filter_size; + u8 filter_offset; + u8 filter[]; +} __packed; + +struct wmi_del_wow_pattern_cmd { + __le16 filter_list_id; + __le16 filter_id; +} __packed; + +/* WMI_SET_TXE_NOTIFY_CMDID */ +struct wmi_txe_notify_cmd { + __le32 rate; + __le32 pkts; + __le32 intvl; +} __packed; + +/* WMI_TXE_NOTIFY_EVENTID */ +struct wmi_txe_notify_event { + __le32 rate; + __le32 pkts; +} __packed; + +/* WMI_SET_AKMP_PARAMS_CMD */ + +struct wmi_pmkid { + u8 pmkid[WMI_PMKID_LEN]; +} __packed; + +/* WMI_GET_PMKID_LIST_CMD Reply */ +struct wmi_pmkid_list_reply { + __le32 num_pmkid; + u8 bssid_list[ETH_ALEN][1]; + struct wmi_pmkid pmkid_list[1]; +} __packed; + +/* WMI_ADDBA_REQ_EVENTID */ +struct wmi_addba_req_event { + u8 tid; + u8 win_sz; + __le16 st_seq_no; + + /* f/w response for ADDBA Req; OK (0) or failure (!=0) */ + u8 status; +} __packed; + +/* WMI_ADDBA_RESP_EVENTID */ +struct wmi_addba_resp_event { + u8 tid; + + /* OK (0), failure (!=0) */ + u8 status; + + /* three values: not supported(0), 3839, 8k */ + __le16 amsdu_sz; +} __packed; + +/* WMI_DELBA_EVENTID + * f/w received a DELBA for peer and processed it. + * Host is notified of this + */ +struct wmi_delba_event { + u8 tid; + u8 is_peer_initiator; + __le16 reason_code; +} __packed; + +#define PEER_NODE_JOIN_EVENT 0x00 +#define PEER_NODE_LEAVE_EVENT 0x01 +#define PEER_FIRST_NODE_JOIN_EVENT 0x10 +#define PEER_LAST_NODE_LEAVE_EVENT 0x11 + +struct wmi_peer_node_event { + u8 event_code; + u8 peer_mac_addr[ETH_ALEN]; +} __packed; + +/* Transmit complete event data structure(s) */ + +/* version 1 of tx complete msg */ +struct tx_complete_msg_v1 { +#define TX_COMPLETE_STATUS_SUCCESS 0 +#define TX_COMPLETE_STATUS_RETRIES 1 +#define TX_COMPLETE_STATUS_NOLINK 2 +#define TX_COMPLETE_STATUS_TIMEOUT 3 +#define TX_COMPLETE_STATUS_OTHER 4 + + u8 status; + + /* packet ID to identify parent packet */ + u8 pkt_id; + + /* rate index on successful transmission */ + u8 rate_idx; + + /* number of ACK failures in tx attempt */ + u8 ack_failures; +} __packed; + +struct wmi_tx_complete_event { + /* no of tx comp msgs following this struct */ + u8 num_msg; + + /* length in bytes for each individual msg following this struct */ + u8 msg_len; + + /* version of tx complete msg data following this struct */ + u8 msg_type; + + /* individual messages follow this header */ + u8 reserved; +} __packed; + +/* + * ------- AP Mode definitions -------------- + */ + +/* + * !!! Warning !!! + * -Changing the following values needs compilation of both driver and firmware + */ +#define AP_MAX_NUM_STA 10 + +/* Spl. AID used to set DTIM flag in the beacons */ +#define MCAST_AID 0xFF + +#define DEF_AP_COUNTRY_CODE "US " + +/* Used with WMI_AP_SET_NUM_STA_CMDID */ + +/* + * Used with WMI_AP_SET_MLME_CMDID + */ + +/* MLME Commands */ +#define WMI_AP_MLME_ASSOC 1 /* associate station */ +#define WMI_AP_DISASSOC 2 /* disassociate station */ +#define WMI_AP_DEAUTH 3 /* deauthenticate station */ +#define WMI_AP_MLME_AUTHORIZE 4 /* authorize station */ +#define WMI_AP_MLME_UNAUTHORIZE 5 /* unauthorize station */ + +struct wmi_ap_set_mlme_cmd { + u8 mac[ETH_ALEN]; + __le16 reason; /* 802.11 reason code */ + u8 cmd; /* operation to perform (WMI_AP_*) */ +} __packed; + +struct wmi_ap_set_pvb_cmd { + __le32 flag; + __le16 rsvd; + __le16 aid; +} __packed; + +struct wmi_rx_frame_format_cmd { + /* version of meta data for rx packets <0 = default> (0-7 = valid) */ + u8 meta_ver; + + /* + * 1 == leave .11 header intact, + * 0 == replace .11 header with .3 <default> + */ + u8 dot11_hdr; + + /* + * 1 == defragmentation is performed by host, + * 0 == performed by target <default> + */ + u8 defrag_on_host; + + /* for alignment */ + u8 reserved[1]; +} __packed; + +struct wmi_ap_hidden_ssid_cmd { + u8 hidden_ssid; +} __packed; + +struct wmi_set_inact_period_cmd { + __le32 inact_period; + u8 num_null_func; +} __packed; + +/* AP mode events */ +struct wmi_ap_set_apsd_cmd { + u8 enable; +} __packed; + +enum wmi_ap_apsd_buffered_traffic_flags { + WMI_AP_APSD_NO_DELIVERY_FRAMES = 0x1, +}; + +struct wmi_ap_apsd_buffered_traffic_cmd { + __le16 aid; + __le16 bitmap; + __le32 flags; +} __packed; + +/* WMI_PS_POLL_EVENT */ +struct wmi_pspoll_event { + __le16 aid; +} __packed; + +struct wmi_per_sta_stat { + __le32 tx_bytes; + __le32 tx_pkts; + __le32 tx_error; + __le32 tx_discard; + __le32 rx_bytes; + __le32 rx_pkts; + __le32 rx_error; + __le32 rx_discard; + __le32 aid; +} __packed; + +struct wmi_ap_mode_stat { + __le32 action; + struct wmi_per_sta_stat sta[AP_MAX_NUM_STA + 1]; +} __packed; + +/* End of AP mode definitions */ + +struct wmi_remain_on_chnl_cmd { + __le32 freq; + __le32 duration; +} __packed; + +/* wmi_send_action_cmd is to be deprecated. Use + * wmi_send_mgmt_cmd instead. The new structure supports P2P mgmt + * operations using station interface. + */ +struct wmi_send_action_cmd { + __le32 id; + __le32 freq; + __le32 wait; + __le16 len; + u8 data[]; +} __packed; + +struct wmi_send_mgmt_cmd { + __le32 id; + __le32 freq; + __le32 wait; + __le32 no_cck; + __le16 len; + u8 data[]; +} __packed; + +struct wmi_tx_status_event { + __le32 id; + u8 ack_status; +} __packed; + +struct wmi_probe_req_report_cmd { + u8 enable; +} __packed; + +struct wmi_disable_11b_rates_cmd { + u8 disable; +} __packed; + +struct wmi_set_appie_extended_cmd { + u8 role_id; + u8 mgmt_frm_type; + u8 ie_len; + u8 ie_info[]; +} __packed; + +struct wmi_remain_on_chnl_event { + __le32 freq; + __le32 duration; +} __packed; + +struct wmi_cancel_remain_on_chnl_event { + __le32 freq; + __le32 duration; + u8 status; +} __packed; + +struct wmi_rx_action_event { + __le32 freq; + __le16 len; + u8 data[]; +} __packed; + +struct wmi_p2p_capabilities_event { + __le16 len; + u8 data[]; +} __packed; + +struct wmi_p2p_rx_probe_req_event { + __le32 freq; + __le16 len; + u8 data[]; +} __packed; + +#define P2P_FLAG_CAPABILITIES_REQ (0x00000001) +#define P2P_FLAG_MACADDR_REQ (0x00000002) +#define P2P_FLAG_HMODEL_REQ (0x00000002) + +struct wmi_get_p2p_info { + __le32 info_req_flags; +} __packed; + +struct wmi_p2p_info_event { + __le32 info_req_flags; + __le16 len; + u8 data[]; +} __packed; + +struct wmi_p2p_capabilities { + u8 go_power_save; +} __packed; + +struct wmi_p2p_macaddr { + u8 mac_addr[ETH_ALEN]; +} __packed; + +struct wmi_p2p_hmodel { + u8 p2p_model; +} __packed; + +struct wmi_p2p_probe_response_cmd { + __le32 freq; + u8 destination_addr[ETH_ALEN]; + __le16 len; + u8 data[]; +} __packed; + +/* Extended WMI (WMIX) + * + * Extended WMIX commands are encapsulated in a WMI message with + * cmd=WMI_EXTENSION_CMD. + * + * Extended WMI commands are those that are needed during wireless + * operation, but which are not really wireless commands. This allows, + * for instance, platform-specific commands. Extended WMI commands are + * embedded in a WMI command message with WMI_COMMAND_ID=WMI_EXTENSION_CMDID. + * Extended WMI events are similarly embedded in a WMI event message with + * WMI_EVENT_ID=WMI_EXTENSION_EVENTID. + */ +struct wmix_cmd_hdr { + __le32 cmd_id; +} __packed; + +enum wmix_command_id { + WMIX_DSETOPEN_REPLY_CMDID = 0x2001, + WMIX_DSETDATA_REPLY_CMDID, + WMIX_GPIO_OUTPUT_SET_CMDID, + WMIX_GPIO_INPUT_GET_CMDID, + WMIX_GPIO_REGISTER_SET_CMDID, + WMIX_GPIO_REGISTER_GET_CMDID, + WMIX_GPIO_INTR_ACK_CMDID, + WMIX_HB_CHALLENGE_RESP_CMDID, + WMIX_DBGLOG_CFG_MODULE_CMDID, + WMIX_PROF_CFG_CMDID, /* 0x200a */ + WMIX_PROF_ADDR_SET_CMDID, + WMIX_PROF_START_CMDID, + WMIX_PROF_STOP_CMDID, + WMIX_PROF_COUNT_GET_CMDID, +}; + +enum wmix_event_id { + WMIX_DSETOPENREQ_EVENTID = 0x3001, + WMIX_DSETCLOSE_EVENTID, + WMIX_DSETDATAREQ_EVENTID, + WMIX_GPIO_INTR_EVENTID, + WMIX_GPIO_DATA_EVENTID, + WMIX_GPIO_ACK_EVENTID, + WMIX_HB_CHALLENGE_RESP_EVENTID, + WMIX_DBGLOG_EVENTID, + WMIX_PROF_COUNT_EVENTID, +}; + +/* + * ------Error Detection support------- + */ + +/* + * WMIX_HB_CHALLENGE_RESP_CMDID + * Heartbeat Challenge Response command + */ +struct wmix_hb_challenge_resp_cmd { + __le32 cookie; + __le32 source; +} __packed; + +struct ath6kl_wmix_dbglog_cfg_module_cmd { + __le32 valid; + __le32 config; +} __packed; + +/* End of Extended WMI (WMIX) */ + +enum wmi_sync_flag { + NO_SYNC_WMIFLAG = 0, + + /* transmit all queued data before cmd */ + SYNC_BEFORE_WMIFLAG, + + /* any new data waits until cmd execs */ + SYNC_AFTER_WMIFLAG, + + SYNC_BOTH_WMIFLAG, + + /* end marker */ + END_WMIFLAG +}; + +enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi); +void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id); +int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb); +int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb, + u8 msg_type, u32 flags, + enum wmi_data_hdr_data_type data_type, + u8 meta_ver, void *tx_meta_info, u8 if_idx); + +int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb); +int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb); +int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx, + struct sk_buff *skb, u32 layer2_priority, + bool wmm_enabled, u8 *ac); + +int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb); + +int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb, + enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag); + +int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx, + enum network_type nw_type, + enum dot11_auth_mode dot11_auth_mode, + enum auth_mode auth_mode, + enum ath6kl_crypto_type pairwise_crypto, + u8 pairwise_crypto_len, + enum ath6kl_crypto_type group_crypto, + u8 group_crypto_len, int ssid_len, u8 *ssid, + u8 *bssid, u16 channel, u32 ctrl_flags, + u8 nw_subtype); + +int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid, + u16 channel); +int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx); + +int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx, + enum wmi_scan_type scan_type, + u32 force_fgscan, u32 is_legacy, + u32 home_dwell_time, u32 force_scan_interval, + s8 num_chan, u16 *ch_list, u32 no_cck, + u32 *rates); +int ath6kl_wmi_enable_sched_scan_cmd(struct wmi *wmi, u8 if_idx, bool enable); + +int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, u16 fg_start_sec, + u16 fg_end_sec, u16 bg_sec, + u16 minact_chdw_msec, u16 maxact_chdw_msec, + u16 pas_chdw_msec, u8 short_scan_ratio, + u8 scan_ctrl_flag, u32 max_dfsch_act_time, + u16 maxact_scan_per_ssid); +int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter, + u32 ie_mask); +int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag, + u8 ssid_len, u8 *ssid); +int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx, + u16 listen_interval, + u16 listen_beacons); +int ath6kl_wmi_bmisstime_cmd(struct wmi *wmi, u8 if_idx, + u16 bmiss_time, u16 num_beacons); +int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode); +int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period, + u16 ps_poll_num, u16 dtim_policy, + u16 tx_wakup_policy, u16 num_tx_to_wakeup, + u16 ps_fail_event_policy); +int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx, + struct wmi_create_pstream_cmd *pstream); +int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class, + u8 tsid); +int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout); + +int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold); +int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status, + u8 preamble_policy); + +int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source); +int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config); + +int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx); +int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index, + enum ath6kl_crypto_type key_type, + u8 key_usage, u8 key_len, + u8 *key_rsc, unsigned int key_rsc_len, + u8 *key_material, + u8 key_op_ctrl, u8 *mac_addr, + enum wmi_sync_flag sync_flag); +int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, const u8 *krk); +int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index); +int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid, + const u8 *pmkid, bool set); +int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM); +int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx); +int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi); + +int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg); +int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, + u8 keep_alive_intvl); +int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx, + enum nl80211_band band, + struct ath6kl_htcap *htcap); +int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len); + +s32 ath6kl_wmi_get_rate(struct wmi *wmi, s8 rate_index); + +int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, u8 if_idx, + __be32 ips0, __be32 ips1); +int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx, + enum ath6kl_host_mode host_mode); +int ath6kl_wmi_set_bitrate_mask(struct wmi *wmi, u8 if_idx, + const struct cfg80211_bitrate_mask *mask); +int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx, + enum ath6kl_wow_mode wow_mode, + u32 filter, u16 host_req_delay); +int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, + u8 list_id, u8 filter_size, + u8 filter_offset, const u8 *filter, + const u8 *mask); +int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, + u16 list_id, u16 filter_id); +int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi); +int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi); +int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period); +int ath6kl_wmi_ap_set_beacon_intvl_cmd(struct wmi *wmi, u8 if_idx, + u32 beacon_interval); +int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid); +int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode); +int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on); +int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, + u8 *filter, bool add_filter); +int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enable); +int ath6kl_wmi_set_txe_notify(struct wmi *wmi, u8 idx, + u32 rate, u32 pkts, u32 intvl); +int ath6kl_wmi_set_regdomain_cmd(struct wmi *wmi, const char *alpha2); + +/* AP mode uAPSD */ +int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable); + +int ath6kl_wmi_set_apsd_bfrd_traf(struct wmi *wmi, + u8 if_idx, u16 aid, + u16 bitmap, u32 flags); + +u8 ath6kl_wmi_get_traffic_class(u8 user_priority); + +u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri); +/* AP mode */ +int ath6kl_wmi_ap_hidden_ssid(struct wmi *wmi, u8 if_idx, bool enable); +int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx, + struct wmi_connect_cmd *p); + +int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, + const u8 *mac, u16 reason); + +int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid, bool flag); + +int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx, + u8 rx_meta_version, + bool rx_dot11_hdr, bool defrag_on_host); + +int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, + const u8 *ie, u8 ie_len); + +int ath6kl_wmi_set_ie_cmd(struct wmi *wmi, u8 if_idx, u8 ie_id, u8 ie_field, + const u8 *ie_info, u8 ie_len); + +/* P2P */ +int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable); + +int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, + u32 dur); + +int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq, + u32 wait, const u8 *data, u16 data_len, + u32 no_cck); + +int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq, + const u8 *dst, const u8 *data, + u16 data_len); + +int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable); + +int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags); + +int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx); + +int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, + const u8 *ie, u8 ie_len); + +int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout); + +void ath6kl_wmi_sscan_timer(struct timer_list *t); + +int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source); + +struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx); +void *ath6kl_wmi_init(struct ath6kl *devt); +void ath6kl_wmi_shutdown(struct wmi *wmi); +void ath6kl_wmi_reset(struct wmi *wmi); + +#endif /* WMI_H */ |